diff --git a/.github/workflows/bloat-test.yml b/.github/workflows/bloat-test.yml deleted file mode 100644 index e6462579..00000000 --- a/.github/workflows/bloat-test.yml +++ /dev/null @@ -1,31 +0,0 @@ -#name: Check changes in binary size -# -#on: -# push: -# branches: [ main ] -# paths-ignore: -# - 'docs_src/**' -# - 'README.md' -# - 'CHANGELOG.md' -# - 'CITATION' -# - 'book.toml' -# pull_request: -# branches: [ main ] -# paths-ignore: -# - 'docs_src/**' -# - 'README.md' -# - 'CHANGELOG.md' -# - 'CITATION' -# - 'book.toml' -# -#jobs: -# bloat_test: -# name: Cargo Fmt and Clippy - Linux -# runs-on: ubuntu-latest -# steps: -# - name: Checkout repository -# uses: actions/checkout@v2 -# - name: Run cargo bloat -# uses: orf/cargo-bloat-action@v1 -# with: -# token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 8f14260f..b15bf654 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -10,6 +10,9 @@ on: - 'CITATION' - 'book.toml' - 'CONTRIBUTING.md' + - '.github/workflows/exe-release-prometheuspush.yml' + - '*.md' + - 'oranda.json' pull_request: branches: [ main, dev ] paths-ignore: @@ -18,12 +21,16 @@ on: - 'CHANGELOG.md' - 'CITATION' - 'book.toml' + - '.github/workflows/exe-release-prometheuspush.yml' + - '*.md' + - 'oranda.json' env: CARGO_TERM_COLOR: always - AWX_PUBLIC_ACCESS: "{Xs%g5/a/Si=;_[4LL" - AWX_PUBLIC_USER: "scaphandre-public" - AWX_HOST: "https://cd.hubblo.org" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: check_pr_is_on_the_right_branch: @@ -41,34 +48,33 @@ jobs: runs-on: ubuntu-latest needs: check_pr_is_on_the_right_branch steps: - - name: Cancel Previous Runs - uses: ambimax/action-cancel-previous-runs@v1 - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: bpetit/action-toolchain@v2.0.0 with: toolchain: stable profile: minimal override: true components: rustfmt - name: Check formatting - uses: actions-rs/cargo@v1 + uses: bpetit/action-cargo@v2.0.1 with: command: fmt args: --all -- --check - name: Clippy Check - uses: actions-rs/cargo@v1 + uses: bpetit/action-cargo@v2.0.1 with: command: clippy args: -- -A clippy::upper_case_acronyms -D warnings + fmt_and_clippy_windows: name: Cargo Fmt and Clippy - Windows runs-on: windows-latest - needs: fmt_and_clippy_linux + needs: check_pr_is_on_the_right_branch steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Rustup uses: crazy-max/ghaction-chocolatey@v2 with: @@ -77,55 +83,56 @@ jobs: run: | rustup toolchain install stable-x86_64-pc-windows-msvc - name: Check formatting - uses: actions-rs/cargo@v1 + uses: bpetit/action-cargo@v2.0.1 with: command: fmt args: --all -- --check - name: Clippy Check - uses: actions-rs/cargo@v1 + uses: bpetit/action-cargo@v2.0.1 with: command : clippy - args: --no-default-features --features "prometheus json riemann" + args: --no-default-features --features "prometheus json riemann warpten" test_linux_x86_64: name: Test on GNU/Linux x86_64 (Bare metal worker) runs-on: ubuntu-latest - needs: fmt_and_clippy_linux steps: - name: Install dependencies (awxkit) - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: - python-version: '3.x' + python-version: '3.11' - name: Install python requirements (awxkit) run: | python -m pip install --upgrade pip - pip install awxkit + pip install awxkit setuptools - name: Log on AWX id: login run: | - export AWX_TOKEN=$(awx --conf.host "${{env.AWX_HOST}}" --conf.username "${{env.AWX_PUBLIC_USER}}" --conf.password "${{env.AWX_PUBLIC_ACCESS}}" login | jq .token | tr -d '"') - echo "::set-output name=awx_token::${AWX_TOKEN}" + export AWX_TOKEN=$(awx --conf.host "${{ secrets.AWX_HOST }}" --conf.username "${{ secrets.AWX_PUBLIC_USER }}" --conf.password "${{ secrets.AWX_PASSWORD }}" login | jq .token | tr -d '"') + echo "awx_token=${AWX_TOKEN}" >> $GITHUB_OUTPUT - name: Prepare Rust environment on bare metal worker id: rust run: | - awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ env.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"$GITHUB_REPOSITORY\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 12 --monitor + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"$GITHUB_REPOSITORY\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 12 --monitor - name: Clone Scaphandre repository id: clone run: | - awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ env.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 13 --monitor + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 13 --monitor - name: Run Unit Tests id: unittests run: | - awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ env.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 14 --monitor + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 14 --monitor build_linux_x86_64: name: Build on GNU/Linux x86_64 (Bare metal worker) runs-on: ubuntu-latest - needs: test_linux_x86_64 + needs: + - fmt_and_clippy_linux + - test_linux_x86_64 steps: - name: Install dependencies (awxkit) - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: - python-version: '3.x' + python-version: '3.11' - name: Install python requirements (awxkit) run: | python -m pip install --upgrade pip @@ -133,23 +140,34 @@ jobs: - name: Log on AWX id: login run: | - export AWX_TOKEN=$(awx --conf.host "${{env.AWX_HOST}}" --conf.username "${{env.AWX_PUBLIC_USER}}" --conf.password "${{env.AWX_PUBLIC_ACCESS}}" login | jq .token | tr -d '"') - echo "::set-output name=awx_token::${AWX_TOKEN}" + export AWX_TOKEN=$(awx --conf.host "${{ secrets.AWX_HOST }}" --conf.username "${{ secrets.AWX_PUBLIC_USER }}" --conf.password "${{ secrets.AWX_PASSWORD }}" login | jq .token | tr -d '"') + echo "awx_token=${AWX_TOKEN}" >> $GITHUB_OUTPUT + - name: Build debug version + id: builddebug + run: | + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 17 --monitor + - name: Test JSON exporter + id: jsonexporter + run: | + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 18 --monitor - name: Build Docker image id: dockerbuild run: | - awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ env.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 15 --monitor + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 15 --monitor - name: Test Scaphandre + Prometheus in docker-compose id: promtest run: | - awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ env.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 16 --monitor + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\"}" 16 --monitor test_windows_x86_64: name: Test on Windows x86_64 (Virtual machine worker) - runs-on: "windows-2019" - needs: fmt_and_clippy_windows + runs-on: "windows-latest" steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 + - name: Install openssl for Windows with vcpkg + run: | + echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append + vcpkg install openssl:x64-windows-static-md - name: Install Rustup uses: crazy-max/ghaction-chocolatey@v2 with: @@ -157,6 +175,9 @@ jobs: - name: Install Rust toolchain run: | rustup toolchain install stable-x86_64-pc-windows-msvc + - name: Tests + run: | + cargo test --no-default-features --features "prometheus prometheuspush json riemann" exporters - name: Build (debug mode) run: | - cargo build --no-default-features --features "prometheus json riemann" + cargo build --no-default-features --features "prometheus prometheuspush json riemann" diff --git a/.github/workflows/codesee-arch-diagram.yml b/.github/workflows/codesee-arch-diagram.yml deleted file mode 100644 index 1ec93fd3..00000000 --- a/.github/workflows/codesee-arch-diagram.yml +++ /dev/null @@ -1,87 +0,0 @@ -on: - push: - branches: - - main - pull_request_target: - types: [opened, synchronize, reopened] - -name: CodeSee Map - -jobs: - test_map_action: - runs-on: ubuntu-latest - continue-on-error: true - name: Run CodeSee Map Analysis - steps: - - name: checkout - id: checkout - uses: actions/checkout@v2 - with: - repository: ${{ github.event.pull_request.head.repo.full_name }} - ref: ${{ github.event.pull_request.head.ref }} - fetch-depth: 0 - - # codesee-detect-languages has an output with id languages. - - name: Detect Languages - id: detect-languages - uses: Codesee-io/codesee-detect-languages-action@latest - - - name: Configure JDK 16 - uses: actions/setup-java@v2 - if: ${{ fromJSON(steps.detect-languages.outputs.languages).java }} - with: - java-version: '16' - distribution: 'zulu' - - # CodeSee Maps Go support uses a static binary so there's no setup step required. - - - name: Configure Node.js 14 - uses: actions/setup-node@v2 - if: ${{ fromJSON(steps.detect-languages.outputs.languages).javascript }} - with: - node-version: '14' - - - name: Configure Python 3.x - uses: actions/setup-python@v2 - if: ${{ fromJSON(steps.detect-languages.outputs.languages).python }} - with: - python-version: '3.10' - architecture: 'x64' - - - name: Configure Ruby '3.x' - uses: ruby/setup-ruby@v1 - if: ${{ fromJSON(steps.detect-languages.outputs.languages).ruby }} - with: - ruby-version: '3.0' - - # We need the rust toolchain because it uses rustc and cargo to inspect the package - - name: Configure Rust 1.x stable - uses: actions-rs/toolchain@v1 - if: ${{ fromJSON(steps.detect-languages.outputs.languages).rust }} - with: - toolchain: stable - - - name: Generate Map - id: generate-map - uses: Codesee-io/codesee-map-action@latest - with: - step: map - api_token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }} - github_ref: ${{ github.ref }} - languages: ${{ steps.detect-languages.outputs.languages }} - - - name: Upload Map - id: upload-map - uses: Codesee-io/codesee-map-action@latest - with: - step: mapUpload - api_token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }} - github_ref: ${{ github.ref }} - - - name: Insights - id: insights - uses: Codesee-io/codesee-map-action@latest - with: - step: insights - api_token: ${{ secrets.CODESEE_ARCH_DIAG_API_TOKEN }} - github_ref: ${{ github.ref }} diff --git a/.github/workflows/debian-release.yml b/.github/workflows/debian-release.yml new file mode 100644 index 00000000..c0d27dae --- /dev/null +++ b/.github/workflows/debian-release.yml @@ -0,0 +1,107 @@ +name: Build Debian package on release + +on: + push: + paths-ignore: + - 'docs_src/**' + - 'README.md' + - 'CITATION' + - 'book.toml' + - 'CONTRIBUTING.md' + - '*.md' + - 'oranda.json' + tags: [ 'v*.*.*', 'dev*.*.*' ] + + +jobs: + create_debian_pkg_with_tag: + name: Create Debian package associated to version tag + runs-on: ubuntu-latest + outputs: + deb11output: ${{ steps.deb11pkgname.outputs.deb11pkg }} + deb12output: ${{ steps.deb12pkgname.outputs.deb12pkg }} + steps: + - name: Install s3cmd + run: sudo apt install python3-pip -y && sudo pip3 install s3cmd awxkit + - name: Checkout scaphandre repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Get latest tag of scaphandre repository + id: latest-scaphandre-tag + uses: "WyriHaximus/github-action-get-previous-tag@v1.3.0" + with: + fallback: dev0.5.18 + - name: Checkout scaphandre-debian repository + uses: actions/checkout@v4 + with: + repository: hubblo-org/scaphandre-debian + - name: Build package with version tag and Debian 11 Bullseye + run : | + ./build.sh -v ${{ steps.latest-scaphandre-tag.outputs.tag }} + - name: Modify name of package to include tag version for Debian 11 + id: deb11pkgname + run: | + cd target + PKG_NAME=$(ls | sed "s/\([0-9]\+\.\)\{2\}[0-9]\+\-[0-9]\+\?/${{ steps.latest-scaphandre-tag.outputs.tag }}-deb11/") + mv *.deb $PKG_NAME + echo "deb11pkg=$PKG_NAME" >> "$GITHUB_OUTPUT" + - name: Upload to scw s3 and remove package + run: | + cd target + s3cmd --access_key="${{ secrets.S3_ACCESS_KEY_ID }}" --secret_key="${{ secrets.S3_SECRET_ACCESS_KEY }}" --region="fr-par" --acl-public --host="s3.fr-par.scw.cloud" --host-bucket="%(bucket).s3.fr-par.scw.cloud" put ${{ steps.deb11pkgname.outputs.deb11pkg }} s3://scaphandre/x86_64/ + rm *.deb + - name: Build package with version tag and Debian 12 Bookworm + run: | + ./build.sh -i debian:bookworm-slim -v ${{ steps.latest-scaphandre-tag.outputs.tag }} + - name: Modify name of package to include tag version for Debian 12 + id: deb12pkgname + run: | + cd target + PKG_NAME=$(ls | sed "s/\([0-9]\+\.\)\{2\}[0-9]\+\-[0-9]\+\?/${{ steps.latest-scaphandre-tag.outputs.tag }}-deb12/") + mv *.deb $PKG_NAME + echo "deb12pkg=$PKG_NAME" >> "$GITHUB_OUTPUT" + - name: Upload to scw s3 + run: | + cd target + s3cmd --access_key="${{ secrets.S3_ACCESS_KEY_ID }}" --secret_key="${{ secrets.S3_SECRET_ACCESS_KEY }}" --region="fr-par" --acl-public --host="s3.fr-par.scw.cloud" --host-bucket="%(bucket).s3.fr-par.scw.cloud" put ${{ steps.deb12pkgname.outputs.deb12pkg }} s3://scaphandre/x86_64/ + deb11-container-install-scaphandre: + name: Create Debian 11 container and install scaphandre with URL + needs: create_debian_pkg_with_tag + env: + DEB11PKG: ${{ needs.create_debian_pkg_with_tag.outputs.deb11output }} + runs-on: ubuntu-latest + container: + image: debian:buster-slim + steps: + - name: Install dependencies + run: | + apt update + apt install -y curl + - name: Download Debian 11 scaphandre package + run: | + curl -O https://s3.fr-par.scw.cloud/scaphandre/x86_64/${{ env.DEB11PKG }} + - name: Install and show scaphandre version + run: | + apt install -y ./${{ env.DEB11PKG }} + scaphandre -V + deb12-container-install-scaphandre: + name: Create Debian 12 container and install scaphandre with URL + needs: create_debian_pkg_with_tag + runs-on: ubuntu-latest + env: + DEB12PKG: ${{ needs.create_debian_pkg_with_tag.outputs.deb12output }} + container: + image: debian:bookworm-slim + steps: + - name: Install dependencies + run: | + apt update + apt install -y curl + - name: Download Debian 12 scaphandre package + run: | + curl -O https://s3.fr-par.scw.cloud/scaphandre/x86_64/${{ env.DEB12PKG }} + - name: Install and show scaphandre version + run: | + apt install -y ./${{ env.DEB12PKG }} + scaphandre -V diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index cd2e829f..d39ecab5 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -10,6 +10,8 @@ on: - 'CITATION' - 'book.toml' - 'CONTRIBUTING.md' + - '*.md' + - 'oranda.json' tags: [ 'v*.*.*' ] jobs: diff --git a/.github/workflows/exe-release-prometheuspush.yml b/.github/workflows/exe-release-prometheuspush.yml new file mode 100644 index 00000000..7dc86883 --- /dev/null +++ b/.github/workflows/exe-release-prometheuspush.yml @@ -0,0 +1,76 @@ +name: Build exe installer for windows for prometheus-push only version + +on: + push: + paths-ignore: + - 'docs_src/**' + - 'README.md' + - 'CITATION' + - 'book.toml' + - 'CONTRIBUTING.md' + - '*.md' + - 'oranda.json' + tags: [ 'v*.*.*', 'dev*.*.*' ] + branches: [ '336-proper-handling-of-windows-service-management' ] + +env: + WRD_VERSION: v0.0.4 + WRD_BASE_URL: https://github.com/hubblo-org/windows-rapl-driver/releases/download + +jobs: + build_exe_win1011: + name: Build exe installer for windows 10/11/server 2016/server 2019/server 2022 + runs-on: "windows-latest" + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Install Innosoft + run: | + $url = "https://jrsoftware.org/download.php/is.exe" + $dest = "is.exe" + Invoke-WebRequest -Uri $url -OutFile $dest + ls + & "D:\a\scaphandre\scaphandre\$dest" /verysilent /suppressmsgbox + ls "C:\Program Files (x86)\Inno Setup 6\" + - name: Get windows-rapl-driver + shell: pwsh + run: | + $dest = "DriverLoader.exe" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/DriverLoader.exe" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + $dest = "ScaphandreDrv.cat" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/ScaphandreDrv.cat" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + $dest = "ScaphandreDrv.sys" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/ScaphandreDrv.sys" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + $dest = "ScaphandreDrv.inf" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/ScaphandreDrv.inf" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + - name: Install Rustup + uses: crazy-max/ghaction-chocolatey@v2 + with: + args: install rustup.install --ignore-checksums + - name: Install Rust toolchain + run: | + rustup toolchain install stable-x86_64-pc-windows-msvc + - name: Build Scaphandre + run: | + cargo build --release --no-default-features --features "prometheuspush json" + - name: Build package + run: | + & "C:\Program Files (x86)\Inno Setup 6\ISCC.exe" packaging/windows/installer.iss + - name: Upload artifact #Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force + run: | + Set-PSRepository -Name 'PSGallery' -InstallationPolicy Trusted + Install-Module -Confirm:$False -Name AWS.Tools.Installer + Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope LocalMachine + Import-Module AWS.Tools.Installer + Install-AWSToolsModule AWS.Tools.EC2,AWS.Tools.S3 -CleanUp -Confirm:$False -AllowClobber + Set-AWSCredential -AccessKey ${{ secrets.S3_ACCESS_KEY_ID }} -SecretKey ${{ secrets.S3_SECRET_ACCESS_KEY }} -StoreAs default + mv packaging/windows/Output/scaphandre_installer.exe scaphandre_${{ github.ref_name }}_prometheuspush_installer.exe + $clientconfig=@{ + SignatureVersion="s3v4" + ServiceUrl="https://s3.fr-par.scw.cloud" + } + Write-S3Object -EndpointUrl "https://s3.fr-par.scw.cloud" -Region "fr-par" -BucketName "scaphandre" -File scaphandre_${{ github.ref_name }}_prometheuspush_installer.exe -key "x86_64/scaphandre_${{ github.ref_name }}_prometheuspush_installer.exe" -PublicReadOnly -ClientConfig $clientconfig \ No newline at end of file diff --git a/.github/workflows/exe-release.yml b/.github/workflows/exe-release.yml new file mode 100644 index 00000000..afb01c0b --- /dev/null +++ b/.github/workflows/exe-release.yml @@ -0,0 +1,76 @@ +name: Build exe installer for windows for prometheus-push only version + +on: + push: + paths-ignore: + - 'docs_src/**' + - 'README.md' + - 'CITATION' + - 'book.toml' + - 'CONTRIBUTING.md' + - '*.md' + - 'oranda.json' + tags: [ 'v*.*.*', 'dev*.*.*' ] + branches: [ '336-proper-handling-of-windows-service-management' ] + +env: + WRD_VERSION: v0.0.4 + WRD_BASE_URL: https://github.com/hubblo-org/windows-rapl-driver/releases/download + +jobs: + build_exe_win1011: + name: Build exe installer for windows 10/11/server 2016/server 2019/server 2022 + runs-on: "windows-latest" + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Install Innosoft + run: | + $url = "https://jrsoftware.org/download.php/is.exe" + $dest = "is.exe" + Invoke-WebRequest -Uri $url -OutFile $dest + ls + & "D:\a\scaphandre\scaphandre\$dest" /verysilent /suppressmsgbox + ls "C:\Program Files (x86)\Inno Setup 6\" + - name: Get windows-rapl-driver + shell: pwsh + run: | + $dest = "DriverLoader.exe" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/DriverLoader.exe" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + $dest = "ScaphandreDrv.cat" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/ScaphandreDrv.cat" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + $dest = "ScaphandreDrv.sys" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/ScaphandreDrv.sys" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + $dest = "ScaphandreDrv.inf" + $url = "${{ env.WRD_BASE_URL }}/${{ env.WRD_VERSION }}/ScaphandreDrv.inf" + Invoke-WebRequest -Uri ($url -replace '"', "") -OutFile $dest + - name: Install Rustup + uses: crazy-max/ghaction-chocolatey@v2 + with: + args: install rustup.install --ignore-checksums + - name: Install Rust toolchain + run: | + rustup toolchain install stable-x86_64-pc-windows-msvc + - name: Build Scaphandre + run: | + cargo build --release --no-default-features --features "prometheus json" + - name: Build package + run: | + & "C:\Program Files (x86)\Inno Setup 6\ISCC.exe" packaging/windows/installer.iss + - name: Upload artifact #Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force + run: | + Set-PSRepository -Name 'PSGallery' -InstallationPolicy Trusted + Install-Module -Confirm:$False -Name AWS.Tools.Installer + Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope LocalMachine + Import-Module AWS.Tools.Installer + Install-AWSToolsModule AWS.Tools.EC2,AWS.Tools.S3 -CleanUp -Confirm:$False -AllowClobber + Set-AWSCredential -AccessKey ${{ secrets.S3_ACCESS_KEY_ID }} -SecretKey ${{ secrets.S3_SECRET_ACCESS_KEY }} -StoreAs default + mv packaging/windows/Output/scaphandre_installer.exe scaphandre_${{ github.ref_name }}_installer.exe + $clientconfig=@{ + SignatureVersion="s3v4" + ServiceUrl="https://s3.fr-par.scw.cloud" + } + Write-S3Object -EndpointUrl "https://s3.fr-par.scw.cloud" -Region "fr-par" -BucketName "scaphandre" -File scaphandre_${{ github.ref_name }}_installer.exe -key "x86_64/scaphandre_${{ github.ref_name }}_installer.exe" -PublicReadOnly -ClientConfig $clientconfig \ No newline at end of file diff --git a/.github/workflows/oranda.yml b/.github/workflows/oranda.yml index 865c3792..0fa63632 100644 --- a/.github/workflows/oranda.yml +++ b/.github/workflows/oranda.yml @@ -30,7 +30,7 @@ on: # If you only want docs to update with releases, disable this one. push: branches: - - main + - dev # Whenever a workflow called "Release" completes, update the docs! # @@ -75,7 +75,7 @@ jobs: - name: Deploy to Github Pages uses: JamesIves/github-pages-deploy-action@v4.4.1 # ONLY if we're on dev (so no PRs or feature branches allowed!) - if: ${{ github.ref == 'refs/heads/main' }} + if: ${{ github.ref == 'refs/heads/dev' }} with: branch: gh-pages # Gotta tell the action where to find oranda's output diff --git a/.github/workflows/python_build.yml b/.github/workflows/python_build.yml deleted file mode 100644 index 1a93f310..00000000 --- a/.github/workflows/python_build.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: python_build - -on: - push: - branches: [main] - pull_request: - branches: [main] - -defaults: - run: - working-directory: ./python - -jobs: - lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: 3.7 - - name: Check Python - run: | - pip install black isort mypy - make check-python - - name: Install minimal stable with clippy and rustfmt - uses: actions-rs/toolchain@v1 - with: - profile: default - toolchain: stable - override: true - - name: Check Rust - run: make check-rust - - test: - name: Python Build - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Install latest nightly - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - components: rustfmt, clippy - - - uses: Swatinem/rust-cache@v1 - - - uses: actions/setup-python@v3 - with: - python-version: "3.7" - - - name: Build and install scaphandre - run: | - pip install virtualenv - virtualenv venv - source venv/bin/activate - make develop - - - name: Run tests - run: | - source venv/bin/activate - make unit-test - - - name: Build Sphinx documentation - run: | - source venv/bin/activate - make build-documentation \ No newline at end of file diff --git a/.github/workflows/rpm-release-prometheuspush.yml b/.github/workflows/rpm-release-prometheuspush.yml new file mode 100644 index 00000000..ce37bf8f --- /dev/null +++ b/.github/workflows/rpm-release-prometheuspush.yml @@ -0,0 +1,211 @@ +name: Build RPM package for prometheus-push only version + +on: + push: + paths-ignore: + - 'docs_src/**' + - 'README.md' + - 'CITATION' + - 'book.toml' + - 'CONTRIBUTING.md' + - '*.md' + - 'oranda.json' + tags: [ 'v*.*.*', 'dev*.*.*' ] + +jobs: + build_rpm_rhel9: + name: Build RPM package for RHEL9 + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - uses: Swatinem/rust-cache@v2 + with: + # The prefix cache key, this can be changed to start a new cache manually. + # default: "v0-rust" + prefix-key: "" + + # A cache key that is used instead of the automatic `job`-based key, + # and is stable over multiple jobs. + # default: empty + shared-key: "" + + # An additional cache key that is added alongside the automatic `job`-based + # cache key and can be used to further differentiate jobs. + # default: empty + key: "" + + # A whitespace separated list of env-var *prefixes* who's value contributes + # to the environment cache key. + # The env-vars are matched by *prefix*, so the default `RUST` var will + # match all of `RUSTC`, `RUSTUP_*`, `RUSTFLAGS`, `RUSTDOC_*`, etc. + # default: "CARGO CC CFLAGS CXX CMAKE RUST" + env-vars: "" + + # The cargo workspaces and target directory configuration. + # These entries are separated by newlines and have the form + # `$workspace -> $target`. The `$target` part is treated as a directory + # relative to the `$workspace` and defaults to "target" if not explicitly given. + # default: ". -> target" + workspaces: "" + + # Additional non workspace directories to be cached, separated by newlines. + cache-directories: "" + + # Determines whether workspace `target` directories are cached. + # If `false`, only the cargo registry will be cached. + # default: "true" + cache-targets: "" + + # Determines if the cache should be saved even when the workflow has failed. + # default: "false" + cache-on-failure: "" + + # Determines which crates are cached. + # If `true` all crates will be cached, otherwise only dependent crates will be cached. + # Useful if additional crates are used for CI tooling. + # default: "false" + cache-all-crates: "" + + # Determiners whether the cache should be saved. + # If `false`, the cache is only restored. + # Useful for jobs where the matrix is additive e.g. additional Cargo features. + # default: "true" + save-if: "" + - name: Install s3cmd + run: sudo apt install python3-pip -y && sudo pip3 install s3cmd awxkit + - name: Get tag + id: tag + uses: devops-actions/action-get-tag@v1.0.2 + with: + strip_v: true # Optional: Remove 'v' character from version + default: "v0.0.0" # Optional: Default version when tag not found + - name: Override version + run: "sed -i 's/Version: .*/Version: ${{steps.tag.outputs.tag}}/' packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec" + - name: Debug + run: grep Version packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec + - name: Extract release notes + id: extract-release-notes + uses: ffurrer2/extract-release-notes@v1 + #- name: Display release notes + # run: "echo ${{ steps.extract-release-notes.outputs.release_notes }}" + - name: Edit changelog #TODO commit and push to increase changelog + run: date=$(date "+%a %b %d %Y - "); sed -i "/%changelog/ a * ${date}${{steps.tag.outputs.tag}}/" packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec + - name: Edit changelog + run: echo " Packaging for version ${{steps.tag.outputs.tag}}" >> packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec + - name: build RPM package + id: rpm + uses: bpetit/rpmbuild@master + with: + spec_file: "packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec" + - name: Upload to scw s3 + run: | + s3cmd --access_key="${{ secrets.S3_ACCESS_KEY_ID }}" --secret_key="${{ secrets.S3_SECRET_ACCESS_KEY }}" --region="fr-par" --acl-public --host="s3.fr-par.scw.cloud" --host-bucket="%(bucket).s3.fr-par.scw.cloud" put --recursive ${{ steps.rpm.outputs.rpm_dir_path }} s3://scaphandre/ + - name: Log on AWX + id: login + run: | + RAW_RESULT=$(awx --conf.host "${{ secrets.AWX_HOST }}" --conf.username "${{ secrets.AWX_PUBLIC_USER }}" --conf.password "${{ secrets.AWX_PASSWORD }}" login) + export AWX_TOKEN=$(echo $RAW_RESULT | jq .token | tr -d '"') + echo "awx_token=${AWX_TOKEN}" >> $GITHUB_OUTPUT + - name: Install and test RPM package + id: rpmtest + run: | + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\",\"github_rpm_url\":\"https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre-prometheuspush-${{steps.tag.outputs.tag}}-1.el9.x86_64.rpm\",\"github_package_name\":\"scaphandre-prometheuspush\"}" 19 --monitor + build_rpm_rhel8: + name: Build RPM package for RHEL8 + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - uses: Swatinem/rust-cache@v2 + with: + # The prefix cache key, this can be changed to start a new cache manually. + # default: "v0-rust" + prefix-key: "" + + # A cache key that is used instead of the automatic `job`-based key, + # and is stable over multiple jobs. + # default: empty + shared-key: "" + + # An additional cache key that is added alongside the automatic `job`-based + # cache key and can be used to further differentiate jobs. + # default: empty + key: "" + + # A whitespace separated list of env-var *prefixes* who's value contributes + # to the environment cache key. + # The env-vars are matched by *prefix*, so the default `RUST` var will + # match all of `RUSTC`, `RUSTUP_*`, `RUSTFLAGS`, `RUSTDOC_*`, etc. + # default: "CARGO CC CFLAGS CXX CMAKE RUST" + env-vars: "" + + # The cargo workspaces and target directory configuration. + # These entries are separated by newlines and have the form + # `$workspace -> $target`. The `$target` part is treated as a directory + # relative to the `$workspace` and defaults to "target" if not explicitly given. + # default: ". -> target" + workspaces: "" + + # Additional non workspace directories to be cached, separated by newlines. + cache-directories: "" + + # Determines whether workspace `target` directories are cached. + # If `false`, only the cargo registry will be cached. + # default: "true" + cache-targets: "" + + # Determines if the cache should be saved even when the workflow has failed. + # default: "false" + cache-on-failure: "" + + # Determines which crates are cached. + # If `true` all crates will be cached, otherwise only dependent crates will be cached. + # Useful if additional crates are used for CI tooling. + # default: "false" + cache-all-crates: "" + + # Determiners whether the cache should be saved. + # If `false`, the cache is only restored. + # Useful for jobs where the matrix is additive e.g. additional Cargo features. + # default: "true" + save-if: "" + - name: Install s3cmd + run: sudo apt install python3-pip -y && sudo pip3 install s3cmd awxkit + - name: Get tag + id: tag + uses: devops-actions/action-get-tag@v1.0.2 + with: + strip_v: true # Optional: Remove 'v' character from version + default: "v0.0.0" # Optional: Default version when tag not found + - name: Override version + run: "sed -i 's/Version: .*/Version: ${{steps.tag.outputs.tag}}/' packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec" + - name: Debug + run: grep Version packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec + - name: Extract release notes + id: extract-release-notes + uses: ffurrer2/extract-release-notes@v1 + #- name: Display release notes + # run: "echo ${{ steps.extract-release-notes.outputs.release_notes }}" + - name: Edit changelog #TODO commit and push to increase changelog + run: date=$(date "+%a %b %d %Y - "); sed -i "/%changelog/ a * ${date}${{steps.tag.outputs.tag}}/" packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec + - name: Edit changelog + run: echo " Packaging for version ${{steps.tag.outputs.tag}}" >> packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec + - name: build RPM package + id: rpm + uses: bpetit/rpmbuild@rhel8 + with: + spec_file: "packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec" + - name: Upload to scw s3 + run: | + s3cmd --access_key="${{ secrets.S3_ACCESS_KEY_ID }}" --secret_key="${{ secrets.S3_SECRET_ACCESS_KEY }}" --region="fr-par" --acl-public --host="s3.fr-par.scw.cloud" --host-bucket="%(bucket).s3.fr-par.scw.cloud" put --recursive ${{ steps.rpm.outputs.rpm_dir_path }} s3://scaphandre/ + #- name: Log on AWX + # id: login + # run: | + # RAW_RESULT=$(awx --conf.host "${{ secrets.AWX_HOST }}" --conf.username "${{ secrets.AWX_PUBLIC_USER }}" --conf.password "${{ secrets.AWX_PASSWORD }}" login) + # export AWX_TOKEN=$(echo $RAW_RESULT | jq .token | tr -d '"') + # echo "awx_token=${AWX_TOKEN}" >> $GITHUB_OUTPUT + #- name: Install and test RPM package + # id: rpmtest + # run: | + # awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\",\"github_rpm_url\":\"https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre-prometheuspush-${{steps.tag.outputs.tag}}-1.el8.x86_64.rpm\",\"github_package_name\":\"scaphandre-prometheuspush\"}" 19 --monitor diff --git a/.github/workflows/rpm-release.yml b/.github/workflows/rpm-release.yml new file mode 100644 index 00000000..0bcdc99e --- /dev/null +++ b/.github/workflows/rpm-release.yml @@ -0,0 +1,113 @@ +name: Build RPM package + +on: + push: + paths-ignore: + - 'docs_src/**' + - 'README.md' + - 'CITATION' + - 'book.toml' + - 'CONTRIBUTING.md' + - '*.md' + - 'oranda.json' + tags: [ 'v*.*.*', 'dev*.*.*' ] + +jobs: + build_rpm: + name: Build RPM package + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - uses: Swatinem/rust-cache@v2 + with: + # The prefix cache key, this can be changed to start a new cache manually. + # default: "v0-rust" + prefix-key: "" + + # A cache key that is used instead of the automatic `job`-based key, + # and is stable over multiple jobs. + # default: empty + shared-key: "" + + # An additional cache key that is added alongside the automatic `job`-based + # cache key and can be used to further differentiate jobs. + # default: empty + key: "" + + # A whitespace separated list of env-var *prefixes* who's value contributes + # to the environment cache key. + # The env-vars are matched by *prefix*, so the default `RUST` var will + # match all of `RUSTC`, `RUSTUP_*`, `RUSTFLAGS`, `RUSTDOC_*`, etc. + # default: "CARGO CC CFLAGS CXX CMAKE RUST" + env-vars: "" + + # The cargo workspaces and target directory configuration. + # These entries are separated by newlines and have the form + # `$workspace -> $target`. The `$target` part is treated as a directory + # relative to the `$workspace` and defaults to "target" if not explicitly given. + # default: ". -> target" + workspaces: "" + + # Additional non workspace directories to be cached, separated by newlines. + cache-directories: "" + + # Determines whether workspace `target` directories are cached. + # If `false`, only the cargo registry will be cached. + # default: "true" + cache-targets: "" + + # Determines if the cache should be saved even when the workflow has failed. + # default: "false" + cache-on-failure: "" + + # Determines which crates are cached. + # If `true` all crates will be cached, otherwise only dependent crates will be cached. + # Useful if additional crates are used for CI tooling. + # default: "false" + cache-all-crates: "" + + # Determiners whether the cache should be saved. + # If `false`, the cache is only restored. + # Useful for jobs where the matrix is additive e.g. additional Cargo features. + # default: "true" + save-if: "" + - name: Install s3cmd + run: sudo apt install python3-pip -y && sudo pip3 install s3cmd awxkit + - name: Get tag + id: tag + uses: devops-actions/action-get-tag@v1.0.2 + with: + strip_v: true # Optional: Remove 'v' character from version + default: "v0.0.0" # Optional: Default version when tag not found + - name: Override version + run: "sed -i 's/Version: .*/Version: ${{steps.tag.outputs.tag}}/' packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec" + - name: Debug + run: grep Version packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec + - name: Extract release notes + id: extract-release-notes + uses: ffurrer2/extract-release-notes@v1 + #- name: Display release notes + # run: "echo ${{ steps.extract-release-notes.outputs.release_notes }}" + - name: Edit changelog #TODO commit and push to increase changelog + run: date=$(date "+%a %b %d %Y - "); sed -i "/%changelog/ a * ${date}${{steps.tag.outputs.tag}}/" packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec + - name: Edit changelog + run: echo " Packaging for version ${{steps.tag.outputs.tag}}" >> packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec + - name: build RPM package + id: rpm + uses: bpetit/rpmbuild@master + with: + spec_file: "packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec" + - name: Upload to scw s3 + run: | + s3cmd --access_key="${{ secrets.S3_ACCESS_KEY_ID }}" --secret_key="${{ secrets.S3_SECRET_ACCESS_KEY }}" --region="fr-par" --acl-public --host="s3.fr-par.scw.cloud" --host-bucket="%(bucket).s3.fr-par.scw.cloud" put --recursive ${{ steps.rpm.outputs.rpm_dir_path }} s3://scaphandre/ + - name: Log on AWX + id: login + run: | + RAW_RESULT=$(awx --conf.host "${{ secrets.AWX_HOST }}" --conf.username "${{ secrets.AWX_PUBLIC_USER }}" --conf.password "${{ secrets.AWX_PASSWORD }}" login) + export AWX_TOKEN=$(echo $RAW_RESULT | jq .token | tr -d '"') + echo "awx_token=${AWX_TOKEN}" >> $GITHUB_OUTPUT + - name: Install and test RPM package + id: rpmtest + run: | + awx --conf.token ${{ steps.login.outputs.awx_token }} --conf.host ${{ secrets.AWX_HOST }} job_templates launch --extra_vars="{\"github_repository\":\"${GITHUB_REPOSITORY}\",\"github_actor\":\"${GITHUB_ACTOR}\",\"github_workflow\":\"${GITHUB_WORKFLOW}\",\"github_workspace\":\"${GITHUB_WORKSPACE}\",\"github_event_name\":\"${GITHUB_EVENT_NAME}\",\"github_event_path\":\"${GITHUB_EVENT_PATH}\",\"github_sha\":\"${GITHUB_SHA}\",\"github_ref\":\"${GITHUB_REF}\",\"github_head_ref\":\"${GITHUB_HEAD_REF}\",\"github_base_ref\":\"${GITHUB_BASE_REF}\",\"github_server_url\":\"${GITHUB_SERVER_URL}\",\"github_rpm_url\":\"https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre-${{steps.tag.outputs.tag}}-1.el9.x86_64.rpm\"}" 19 --monitor \ No newline at end of file diff --git a/.gitignore b/.gitignore index ecc91c21..98738b5b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Generated by Cargo # will have compiled files and executables /target/ +**/Output/ # These are backup files generated by rustfmt **/*.rs.bk @@ -24,3 +25,11 @@ # Used by integration tests /integration_tests + +# Packaging folder : avoir build artefacts +packaging/*/BUILD/* +packaging/*/BUILDROOT/* +*.rpm +*.tar.gz +*.swp +packaging/*/BUILDROOT/* diff --git a/CHANGELOG.md b/CHANGELOG.md index d1a4fa95..1ab181ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 Please check dev branch. +## [1.0.0](https://github.com/hubblo-org/scaphandre/releases/tag/v1.0.0) + +### Added + +- Host resources consumption metrics : scaph_host_swap_total_bytes, scaph_host_swap_free_bytes, scaph_host_memory_free_bytes, scaph_host_memory_available_bytes, scaph_host_memory_total_bytes, scaph_host_disk_total_bytes, scaph_host_disk_available_bytes, scaph_host_cpu_frequency, scaph_host_load_avg_fifteen, scaph_host_load_avg_five, scaph_host_load_avg_one - see https://hubblo-org.github.io/scaphandre-documentation/references/metrics.html for details, https://github.com/hubblo-org/scaphandre/issues/271 and https://github.com/hubblo-org/scaphandre/pull/278 for reference +- Per-process resource consumption metrics : scaph_process_cpu_usage_percentage, scaph_process_memory_bytes, scaph_process_memory_virtual_bytes, scaph_process_disk_total_write_bytes, scaph_process_disk_write_bytes, scaph_process_disk_read_bytes, scaph_process_disk_total_read_bytes - see https://hubblo-org.github.io/scaphandre-documentation/references/metrics.html for details, https://github.com/hubblo-org/scaphandre/issues/141 and https://github.com/hubblo-org/scaphandre/pull/274 for reference +- Added service monitor to helm chart, see https://github.com/hubblo-org/scaphandre/pull/230, thanks @mmadoo +- Added packaging folder with sample systemd services files, see https://github.com/hubblo-org/scaphandre/pull/317 and https://github.com/hubblo-org/scaphandre/issues/261, thanks @jcaesar +- Added prometheus push mode exporter, see https://github.com/hubblo-org/scaphandre/issues/269 +- Added RPM build github action workflow, see https://github.com/hubblo-org/scaphandre/issues/310 +- Added RAPL mmio metric, when the domain is present, see https://github.com/hubblo-org/scaphandre/issues/318 and https://github.com/hubblo-org/scaphandre/pull/329 +- Added specific RAPL PSYS metric, when available, see https://github.com/hubblo-org/scaphandre/issues/316 and https://github.com/hubblo-org/scaphandre/pull/329 +- Filtering per process in JSON exporter, see https://github.com/hubblo-org/scaphandre/issues/216 +- Github action workflow to build Windows EXE installer on each release, see https://github.com/hubblo-org/scaphandre/pull/333 +- Github action workflow to build DEB package on each release, see https://github.com/hubblo-org/scaphandre/pull/352, thanks @bdromard +- Added warning messages when powercap files permissions won't allow Scaphandre to read RAPL data, see https://github.com/hubblo-org/scaphandre/issues/214 + +### Changed + +- Global power metrics have changed and could give higher numbers than previously. Please have a look at the [documentation](https://hubblo-org.github.io/scaphandre-documentation/explanations/host_metrics.html). +- `scaph_self_mem_total_program_size`, `scaph_self_mem_resident_set_size` and `scaph_self_mem_shared_resident_size` are replaced by `scaph_self_memory_bytes` and `scaph_self_memory_virtual_bytes`, see https://github.com/hubblo-org/scaphandre/pull/274/files +- Refactored warp10 exporter, see https://github.com/hubblo-org/scaphandre/pull/291 and https://github.com/hubblo-org/scaphandre/issues/105 +- Refactored exporters creation with clap4, see https://github.com/hubblo-org/scaphandre/pull/292, thanks @TheElectronWill +- Default docker-compose sets a privileged container now, otherwise it doesn't work in an apparmor context, see https://github.com/hubblo-org/scaphandre/issues/135 and https://github.com/hubblo-org/scaphandre/commit/a1a06ea280b8e66067b2c3b73ac08a377604eb61 +- Moved from procfs, to sysinfo, see https://github.com/hubblo-org/scaphandre/issues/267 + +### Fixed + +- Fixed doc broken links, see https://github.com/hubblo-org/scaphandre/pull/259 and https://github.com/hubblo-org/scaphandre/issues/288, thanks @homersimpsons +- Now works on more than 1 vcpu Qemu/KVM virtual machines, see https://github.com/hubblo-org/scaphandre/issues/133 and https://github.com/hubblo-org/scaphandre/pull/207, thanks @tawalaya +- Fix for Kubernetes, don't create PSP if version of kubernetes is above 1.25, see https://github.com/hubblo-org/scaphandre/pull/250, thanks @rossf7 +- Fixed bug in --containers flag, see https://github.com/hubblo-org/scaphandre/pull/326, thanks rossf7 +- Fix on qemu exporter, see https://github.com/hubblo-org/scaphandre/issues/260 +- Fixed panics on regex filters, see https://github.com/hubblo-org/scaphandre/issues/295 +- Fixed invalid escape sequence in Prometheus exporter, see https://github.com/hubblo-org/scaphandre/issues/204, thanks @demeringo +- Removed broken python bindings, until it is fixed, see https://github.com/hubblo-org/scaphandre/pull/315 and https://github.com/hubblo-org/scaphandre/issues/296 + ## [0.5.0](https://github.com/hubblo-org/scaphandre/releases/tag/v0.5.0) ### Changed diff --git a/CITATION b/CITATION index b61b20ed..f150cd67 100644 --- a/CITATION +++ b/CITATION @@ -1,7 +1,7 @@ @software{scaphandre, author = {Benoit Petit}, title = {scaphandre}, - year = 2021, - version = {v0.3}, + year = 2023, + version = {v1.0}, url = {https://github.com/hubblo-org/scaphandre} } diff --git a/Cargo.lock b/Cargo.lock index 8a359b91..94da456c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,20 +10,20 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.15" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" dependencies = [ "memchr", ] [[package]] -name = "ansi_term" -version = "0.11.0" +name = "android_system_properties" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" dependencies = [ - "winapi", + "libc", ] [[package]] @@ -35,11 +35,60 @@ dependencies = [ "winapi", ] +[[package]] +name = "anstream" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e579a7752471abc2a8268df8b20005e3eadd975f585398f17efcfd8d4927371" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" + +[[package]] +name = "anstyle-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys 0.48.0", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcd8291a340dd8ac70e18878bc4501dd7b4ff970cfa21c207d36ece51ea88fd" +dependencies = [ + "anstyle", + "windows-sys 0.48.0", +] + [[package]] name = "async-channel" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -52,40 +101,40 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "base-x" -version = "0.2.8" +name = "base64" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] -name = "base64" -version = "0.13.0" +name = "bit_field" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" [[package]] name = "bitflags" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bumpalo" -version = "3.6.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" +checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535" [[package]] name = "byteorder" @@ -95,21 +144,21 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.0.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] -name = "cache-padded" -version = "1.1.1" +name = "castaway" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" [[package]] name = "cc" -version = "1.0.67" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" [[package]] name = "cfg-if" @@ -119,33 +168,79 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", "serde", - "time 0.1.44", + "time 0.1.45", + "wasm-bindgen", "winapi", ] [[package]] name = "clap" -version = "2.33.3" +version = "4.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +checksum = "956ac1f6381d8d82ab4684768f89c0ea3afe66925ceadb4eeb3fc452ffc55d62" dependencies = [ - "ansi_term 0.11.0", - "atty", + "clap_builder", + "clap_derive", + "once_cell", +] + +[[package]] +name = "clap_builder" +version = "4.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84080e799e54cff944f4b4a4b0e71630b0e0443b25b985175c7dddc1a859b749" +dependencies = [ + "anstream", + "anstyle", "bitflags", - "strsim 0.8.0", - "textwrap", + "clap_lex", + "once_cell", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.15", +] + +[[package]] +name = "clap_lex" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1" + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", "unicode-width", - "vec_map", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "colored" version = "2.0.0" @@ -159,39 +254,44 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ - "cache-padded", + "crossbeam-utils", ] [[package]] -name = "const_fn" -version = "0.4.6" +name = "core-foundation-sys" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] -name = "core-foundation-sys" -version = "0.8.3" +name = "core_affinity" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "622892f5635ce1fc38c8f16dfc938553ed64af482edb5e150bf4caedbfcb2304" +dependencies = [ + "libc", + "num_cpus", + "winapi", +] [[package]] name = "crc32fast" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -199,9 +299,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -210,32 +310,31 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ + "autocfg", "cfg-if", "crossbeam-utils", - "lazy_static", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", - "lazy_static", ] [[package]] name = "curl" -version = "0.4.38" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003cb79c1c6d1c93344c7e1201bb51c2148f24ec2bd9c253709d6b2efb796515" +checksum = "509bd11746c7ac09ebd19f0b17782eae80aadee26237658a6b4808afb5c11a22" dependencies = [ "curl-sys", "libc", @@ -248,9 +347,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.47+curl-7.79.0" +version = "0.4.61+curl-8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab94a47d9b61f2d905beb7a3d46aba7704c9f1dfcf84e7d178998d9e95f7989" +checksum = "14d05c10f541ae6f3bc5b3d923c20001f47db7d5f0b2bc6ad16490133842db79" dependencies = [ "cc", "libc", @@ -262,6 +361,50 @@ dependencies = [ "winapi", ] +[[package]] +name = "cxx" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn 2.0.15", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.15", +] + [[package]] name = "dirs" version = "3.0.2" @@ -273,21 +416,15 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", "winapi", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "docker-sync" version = "0.1.2" @@ -310,54 +447,67 @@ dependencies = [ "lazy_static", "regex", "serde", - "strsim 0.10.0", + "strsim", ] -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "either" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ "cfg-if", ] +[[package]] +name = "errno" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" +dependencies = [ + "errno-dragonfly", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "event-listener" -version = "2.5.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "fastrand" -version = "1.4.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" dependencies = [ "instant", ] [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ - "cfg-if", "crc32fast", - "libc", "miniz_oxide", ] @@ -384,40 +534,39 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] [[package]] name = "futures-channel" -version = "0.3.13" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", ] [[package]] name = "futures-core" -version = "0.3.13" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-io" -version = "0.3.13" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-lite" -version = "1.11.3" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" dependencies = [ "fastrand", "futures-core", @@ -430,21 +579,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.13" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.13" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.13" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures-core", "futures-task", @@ -465,20 +614,20 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "h2" -version = "0.3.4" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -495,19 +644,40 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.9.1" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] [[package]] name = "hermit-abi" -version = "0.1.18" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" + [[package]] name = "hex" version = "0.4.3" @@ -527,9 +697,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ "bytes", "fnv", @@ -538,9 +708,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -549,21 +719,21 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.13" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d1cfb9e4f68655fa04c01f59edb405b6074a0f7118ea881e5026e4a1cd8593" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -583,22 +753,45 @@ dependencies = [ "want", ] +[[package]] +name = "iana-time-zone" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows 0.48.0", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" dependencies = [ - "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" -version = "1.6.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -606,20 +799,44 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.9" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] +[[package]] +name = "io-lifetimes" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c66c74d2ae7e79a5a8f7ac924adbe38ee42a859c6539ad869eb51f0b52dc220" +dependencies = [ + "hermit-abi 0.3.1", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "is-terminal" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +dependencies = [ + "hermit-abi 0.3.1", + "io-lifetimes", + "rustix 0.37.13", + "windows-sys 0.48.0", +] + [[package]] name = "isahc" -version = "1.5.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431445cb4ba85a80cb1438a9ae8042dadb78ae4046ecee89ad027b614aa0ddb7" +checksum = "334e04b4d781f436dc315cb1e7515bd96826426345d498149e4bde36b67f8ee9" dependencies = [ "async-channel", + "castaway", "crossbeam-utils", "curl", "curl-sys", @@ -643,24 +860,24 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.7" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "js-sys" -version = "0.3.49" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" +checksum = "445dde2150c55e483f3d8416706b97ec8e8237c307e5b7b4b8dd15e6af2a0730" dependencies = [ "wasm-bindgen", ] [[package]] name = "k8s-openapi" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "748acc444200aa3528dc131a8048e131a9e75a611a52d152e276e99199313d1a" +checksum = "4f8de9873b904e74b3533f77493731ee26742418077503683db44e1b3c54aa5c" dependencies = [ "base64", "bytes", @@ -700,15 +917,15 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.112" +version = "0.2.142" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" +checksum = "6a987beff54b60ffa6d51982e1aa1146bc42f19bd26be28b0586f252fccf5317" [[package]] name = "libnghttp2-sys" -version = "0.1.6+1.43.0" +version = "0.1.7+1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0af55541a8827e138d59ec9e5877fb6095ece63fb6f4da45e7491b4fbd262855" +checksum = "57ed28aba195b38d5ff02b9170cbff627e336a20925e43b4945390401c5dc93f" dependencies = [ "cc", "libc", @@ -716,9 +933,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.2" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "libc", @@ -726,26 +943,48 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + +[[package]] +name = "linux-raw-sys" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "9b085a4f2cde5781fc4b1717f2e86c62f5cda49de7ba99a7c2eae02b61c9064c" [[package]] name = "lock_api" -version = "0.4.2" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if", ] @@ -756,7 +995,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60d8de15ae71e760bce7f05447f85f73624fe0d3b1e4c5a63ba5d4cb0748d374" dependencies = [ - "ansi_term 0.12.1", + "ansi_term", "atty", "log", ] @@ -767,79 +1006,62 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - [[package]] name = "memchr" -version = "2.3.4" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" dependencies = [ "adler", - "autocfg", ] [[package]] name = "mio" -version = "0.7.13" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" dependencies = [ "libc", "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.45.0", ] [[package]] name = "ntapi" -version = "0.3.6" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -847,56 +1069,67 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] [[package]] name = "once_cell" -version = "1.7.2" +version = "1.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" +checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.36" +version = "0.10.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a" +checksum = "97ea2d98598bf9ada7ea6ee8a30fb74f9156b63bbe495d64ec2b87c269d2dda3" dependencies = [ "bitflags", "cfg-if", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.15", +] + [[package]] name = "openssl-probe" -version = "0.1.2" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.66" +version = "0.9.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82" +checksum = "992bac49bdbab4423199c654a5515bd2a6c6a23bf03f2dd3bdb7e5ae6259bc69" dependencies = [ - "autocfg", "cc", "libc", "pkg-config", @@ -905,75 +1138,73 @@ dependencies = [ [[package]] name = "ordered-float" -version = "2.8.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97c9d06878b3a851e8026ef94bf7fef9ba93062cd412601da4d9cf369b1cc62d" +checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" dependencies = [ "num-traits", ] [[package]] name = "parking" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" [[package]] name = "parking_lot" -version = "0.11.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.3" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ "cfg-if", - "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", - "winapi", + "windows-sys 0.45.0", ] [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pin-project" -version = "1.0.6" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.6" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "pin-project-lite" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -983,49 +1214,46 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "polling" -version = "2.0.3" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fc12d774e799ee9ebae13f4076ca003b40d18a11ac0f3641e6f899618580b7b" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ + "autocfg", + "bitflags", "cfg-if", + "concurrent-queue", "libc", "log", - "wepoll-sys", - "winapi", + "pin-project-lite", + "windows-sys 0.48.0", ] [[package]] name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "proc-macro-hack" -version = "0.5.19" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] name = "procfs" -version = "0.12.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0941606b9934e2d98a3677759a971756eb821f75764d0e0d26946d08e74d9104" +checksum = "943ca7f9f29bab5844ecd8fdb3992c5969b6622bb9609b9502fef9b4310e3f1f" dependencies = [ "bitflags", "byteorder", @@ -1033,20 +1261,20 @@ dependencies = [ "flate2", "hex", "lazy_static", - "libc", + "rustix 0.36.12", ] [[package]] name = "protobuf" -version = "2.22.1" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b7f4a129bb3754c25a4e04032a90173c68f85168f77118ac4cb4936e7f06f92" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "quote" -version = "1.0.9" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -1059,21 +1287,9 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom 0.1.16", "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", -] - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", + "rand_chacha", + "rand_core", + "rand_hc", ] [[package]] @@ -1083,17 +1299,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.3", + "rand_core", ] [[package]] @@ -1105,82 +1311,80 @@ dependencies = [ "getrandom 0.1.16", ] -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom 0.2.3", -] - [[package]] name = "rand_hc" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" dependencies = [ - "rand_core 0.5.1", + "rand_core", ] [[package]] -name = "rand_hc" -version = "0.3.1" +name = "raw-cpuid" +version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ - "rand_core 0.6.3", + "bitflags", ] [[package]] name = "rayon" -version = "1.5.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" dependencies = [ - "autocfg", - "crossbeam-deque", "either", "rayon-core", ] [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.5" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.3", - "redox_syscall", + "getrandom 0.2.9", + "redox_syscall 0.2.16", + "thiserror", ] [[package]] name = "regex" -version = "1.4.5" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" +checksum = "ac6cf59af1067a3fb53fbe5c88c053764e930f932be1d71d3ffe032cbe147f59" dependencies = [ "aho-corasick", "memchr", @@ -1189,18 +1393,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "b6868896879ba532248f33598de5181522d8b3d9d724dfd230911e1a7d4822f5" [[package]] name = "riemann_client" @@ -1234,19 +1429,38 @@ dependencies = [ ] [[package]] -name = "rustc_version" -version = "0.2.3" +name = "rustix" +version = "0.36.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0af200a3324fa5bcd922e84e9b55a298ea9f431a489f01961acdebc6e908f25" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.1.4", + "windows-sys 0.45.0", +] + +[[package]] +name = "rustix" +version = "0.37.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +checksum = "f79bef90eb6d984c72722595b5b1348ab39275a5e5123faca6863bf07d75a4e0" dependencies = [ - "semver", + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.3", + "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.19.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ "base64", "log", @@ -1257,18 +1471,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09700171bbcc799d113f2c675314d6005c3dc035f3e7307cf3e7fd459ccbe246" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ "base64", ] [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "scaphandre" @@ -1277,35 +1491,39 @@ dependencies = [ "chrono", "clap", "colored", + "core_affinity", "docker-sync", "hostname", "hyper", + "isahc", "k8s-sync", "log", "loggerv", "ordered-float", "procfs", "protobuf", - "rand 0.7.3", + "rand", + "raw-cpuid", "regex", "riemann_client", "serde", "serde_json", "sysinfo", - "time 0.2.26", + "time 0.3.20", "tokio", "warp10", - "windows", + "windows 0.27.0", + "windows-service", + "x86", ] [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" dependencies = [ - "lazy_static", - "winapi", + "windows-sys 0.42.0", ] [[package]] @@ -1315,35 +1533,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] -name = "sct" -version = "0.6.0" +name = "scratch" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -dependencies = [ - "ring", - "untrusted", -] +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] -name = "semver" -version = "0.9.0" +name = "sct" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" dependencies = [ - "semver-parser", + "ring", + "untrusted", ] -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" -version = "1.0.130" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -1360,20 +1569,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.64" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -1382,59 +1591,56 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.21" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c608a35705a5d3cdc9fbe403147647ff34b921f8e833e49306df898f9b20af" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "signal-hook-registry" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "slab" -version = "0.4.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +dependencies = [ + "autocfg", +] [[package]] name = "sluice" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fa0333a60ff2e3474a6775cc611840c2a55610c831dd366503474c02f1a28f5" +checksum = "6d7400c0eff44aa2fcb5e31a5f24ba9716ed90138769e4977a2ba6014ae63eb5" dependencies = [ - "futures-channel", + "async-channel", "futures-core", "futures-io", ] [[package]] name = "smallvec" -version = "1.6.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.2" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -1447,91 +1653,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] -name = "stdweb-internal-macros" -version = "0.2.9" +name = "syn" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "base-x", "proc-macro2", "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", + "unicode-ident", ] -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - [[package]] name = "syn" -version = "1.0.65" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1d708c221c5a612956ef9f75b37e454e88d1f7b899fbd3a18d4252012d663" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] name = "sysinfo" -version = "0.22.4" +version = "0.28.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb37aa4af23791c584202d286ed9420e023e9d27e49d5a76215623f4bcc2502" +checksum = "b4c2f3ca6693feb29a89724516f016488e9aafc7f37264f898593ee4b942f31b" dependencies = [ "cfg-if", "core-foundation-sys", @@ -1544,147 +1697,143 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ "cfg-if", - "libc", - "rand 0.8.4", - "redox_syscall", - "remove_dir_all", - "winapi", + "fastrand", + "redox_syscall 0.3.5", + "rustix 0.37.13", + "windows-sys 0.45.0", ] [[package]] -name = "textwrap" -version = "0.11.0" +name = "termcolor" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" dependencies = [ - "unicode-width", + "winapi-util", ] [[package]] -name = "time" -version = "0.1.44" +name = "thiserror" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.15", ] [[package]] name = "time" -version = "0.2.26" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ - "const_fn", "libc", - "standback", - "stdweb", - "time-macros", - "version_check", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] [[package]] -name = "time-macros" -version = "0.1.1" +name = "time" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ - "proc-macro-hack", - "time-macros-impl", + "serde", + "time-core", ] [[package]] -name = "time-macros-impl" -version = "0.1.1" +name = "time-core" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "tinyvec" -version = "1.1.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.11.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", - "once_cell", "parking_lot", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", - "winapi", + "windows-sys 0.45.0", ] [[package]] name = "tokio-macros" -version = "1.3.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.15", ] [[package]] name = "tokio-util" -version = "0.6.8" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd" +checksum = "5427d89453009325de0d8f342c9490009f76e999cb7672d77e46267448f7e6b2" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.25" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", "log", @@ -1695,22 +1844,22 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -1725,39 +1874,36 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-ident" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" -version = "0.1.17" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" - -[[package]] -name = "unicode-xid" -version = "0.2.1" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "untrusted" @@ -1767,33 +1913,26 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", "idna", - "matches", "percent-encoding", ] [[package]] -name = "vcpkg" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" - -[[package]] -name = "vec_map" -version = "0.8.2" +name = "utf8parse" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] -name = "version_check" -version = "0.9.3" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "waker-fn" @@ -1813,13 +1952,15 @@ dependencies = [ [[package]] name = "warp10" -version = "1.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7bd564482b2d4ff8d3dc9334e2602923873e71c8406a3052dda6c3b7b6fef7c" +checksum = "e45b50e49a8a42f57459d1f2875c77c0825dc10dc69720a1cd146d571de4d621" dependencies = [ "isahc", "percent-encoding", - "time 0.2.26", + "serde", + "serde_json", + "time 0.3.20", "url", ] @@ -1835,11 +1976,17 @@ version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" -version = "0.2.72" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1847,24 +1994,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.72" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.72" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1872,28 +2019,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.72" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.72" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" -version = "0.3.49" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fe19d70f5dacc03f6e46777213facae5ac3801575d56ca6cbd4c93dcd12310" +checksum = "e33b99f4b23ba3eec1a53ac264e35a755f00e966e0065077d6027c0f575b0b97" dependencies = [ "js-sys", "wasm-bindgen", @@ -1911,21 +2058,18 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" +checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" dependencies = [ "webpki", ] [[package]] -name = "wepoll-sys" -version = "3.0.1" +name = "widestring" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" -dependencies = [ - "cc", -] +checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" [[package]] name = "winapi" @@ -1943,6 +2087,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -1955,7 +2108,27 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebbc80318ebf919219a113c41deae34aa90198e4a15e93c810a9ea1aaa4c1a78" dependencies = [ - "windows-sys", + "windows-sys 0.27.0", +] + +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-service" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9db37ecb5b13762d95468a2fc6009d4b2c62801243223aabd44fca13ad13c8" +dependencies = [ + "bitflags", + "widestring", + "windows-sys 0.45.0", ] [[package]] @@ -1964,43 +2137,201 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cae116ee11e4bce7c0a0425f2b0c866a91d86d209624b7707a7deea52da786" dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", + "windows_aarch64_msvc 0.27.0", + "windows_i686_gnu 0.27.0", + "windows_i686_msvc 0.27.0", + "windows_x86_64_gnu 0.27.0", + "windows_x86_64_msvc 0.27.0", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_msvc" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec7d1649bbab232cde71148c6ef7bbe647f214d2154dd66347fada60de40cda7" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_i686_gnu" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4eb20b59b93fc302839f3b0df3e61de7e9606b44cb54cbeb68d71cf137309fa" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_msvc" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40331d8ef3e4dcdc8982eb7de16e1f09b86f5384626a56b3a99c2a51b88ff98e" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_x86_64_gnu" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5937d290e39c3308147d9b877c5fa741c50f4121ea78d2d20c4a138ad365464a" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_msvc" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee1b76aec4e2bead4758a181b663c37af0de7ec56fe6837c10215b8d6a1635f" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + +[[package]] +name = "x86" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2781db97787217ad2a2845c396a5efe286f87467a5810836db6d74926e94a385" +dependencies = [ + "bit_field", + "bitflags", + "raw-cpuid", +] + [[package]] name = "yaml-rust" version = "0.4.5" diff --git a/Cargo.toml b/Cargo.toml index 6d63daff..2074d408 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "scaphandre" version = "0.5.0" authors = ["Benoit Petit "] -edition = "2018" +edition = "2021" license = "Apache-2.0" description = "Electric power/energy consumption monitoring agent." repository = "https://github.com/hubblo-org/scaphandre" @@ -12,38 +12,44 @@ homepage = "https://scaphandre.hubblo.org" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -loggerv = "0.7.2" +loggerv = "0.7" log = "0.4" -clap = "2.33.3" -regex = "1" +clap = { version = "4.2", features = ["cargo", "derive"] } +regex = "1.7" riemann_client = { version = "0.9.0", optional = true } hostname = "0.3.1" -protobuf = "2.20.0" +protobuf = "2.28.0" serde = { version = "1.0", features = ["derive"], optional = true } serde_json = { version = "1.0", optional = true } ordered-float = "2.0" -warp10 = { version = "1.0.0", optional = true } +warp10 = { version = "2.0.0", optional = true } rand = { version = "0.7.3" } -time = "0.2.25" -colored = "2.0.0" -chrono = "0.4.19" +time = "0.3" +colored = "2.0" +chrono = "0.4" docker-sync = { version = "0.1.2", optional = true } k8s-sync = { version = "0.2.3", optional = true } hyper = { version = "0.14", features = ["full"], optional = true } -tokio = { version = "1", features = ["full"], optional = true} +tokio = { version = "1.26.0", features = ["full"], optional = true} +sysinfo = { version = "0.28.3"} +isahc = { version = "1.7.2", optional = true } [target.'cfg(target_os="linux")'.dependencies] -procfs = { version = "0.12.0" } +procfs = { version = "0.15.0" } [target.'cfg(target_os="windows")'.dependencies] -windows = { version = "0.27.0", features = ["alloc","Win32_Storage_FileSystem","Win32_Foundation","Win32_Security","Win32_System_IO","Win32_System_Ioctl"]} -sysinfo = { version = "0.22.4"} - +windows = { version = "0.27.0", features = ["alloc","Win32_Storage_FileSystem","Win32_Foundation","Win32_Security","Win32_System_IO","Win32_System_Ioctl","Win32_System_Threading", "Win32_System_SystemInformation"]} +windows-service = { version = "0.6.0" } +raw-cpuid = { version = "10.5.0" } +core_affinity = { version = "0.8.1"} +x86 = { version = "0.52.0" } [features] -default = ["prometheus", "riemann", "warpten", "json", "containers"] +default = ["prometheus", "riemann", "warpten", "json", "containers", "prometheuspush"] prometheus = ["hyper", "tokio"] riemann = ["riemann_client"] json = ["serde", "serde_json"] containers = ["docker-sync", "k8s-sync"] warpten = ["warp10"] +prometheuspush = ["isahc"] +qemu = [] diff --git a/Dockerfile b/Dockerfile index e622898e..38a3d497 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM rust:1.59 as planner +FROM rust:1.74 as planner WORKDIR app RUN cargo install cargo-chef @@ -7,7 +7,7 @@ COPY . . # Analyze dependencies RUN cargo chef prepare --recipe-path recipe.json -FROM rust:1.59 as cacher +FROM rust:1.74 as cacher WORKDIR app RUN cargo install cargo-chef COPY --from=planner /app/recipe.json recipe.json @@ -15,7 +15,7 @@ COPY --from=planner /app/recipe.json recipe.json # Cache dependencies RUN cargo chef cook --release --recipe-path recipe.json -FROM rust:1.59 as builder +FROM rust:1.74 as builder WORKDIR app COPY . . @@ -24,11 +24,11 @@ COPY --from=cacher /app/target target COPY --from=cacher $CARGO_HOME $CARGO_HOME RUN cargo build --release -FROM ubuntu:20.04 as runtime +FROM ubuntu:22.04 as runtime WORKDIR app RUN apt-get update \ - && DEBIAN_FRONTEND="noninteractive" apt-get install -y ca-certificates tzdata \ + && DEBIAN_FRONTEND="noninteractive" apt-get install -y ca-certificates tzdata libssl3 \ && rm -rf /var/lib/apt/lists/* COPY --from=builder /app/target/release/scaphandre /usr/local/bin diff --git a/README.md b/README.md index c27f6bff..ace3a8fc 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ --- -Scaphandre *[skafɑ̃dʁ]* is a metrology agent dedicated to electrical [power](https://en.wikipedia.org/wiki/Electric_power) consumption metrics. The goal of the project is to permit to any company or individual to **measure** the power consumption of its tech services and get this data in a convenient form, sending it through any monitoring or data analysis toolchain. +Scaphandre *[skafɑ̃dʁ]* is a metrology agent dedicated to electric [power](https://en.wikipedia.org/wiki/Electric_power) and energy consumption metrics. The goal of the project is to permit to any company or individual to **measure** the power consumption of its tech services and get this data in a convenient form, sending it through any monitoring or data analysis toolchain. **Scaphandre** means *heavy* **diving suit** in [:fr:](https://fr.wikipedia.org/wiki/Scaphandre_%C3%A0_casque). It comes from the idea that tech related services often don't track their power consumption and thus don't expose it to their clients. Most of the time the reason is a presumed bad [ROI](https://en.wikipedia.org/wiki/Return_on_investment). Scaphandre makes, for tech providers and tech users, easier and cheaper to go under the surface to bring back the desired power consumption metrics, take better sustainability focused decisions, and then show the metrics to their clients to allow them to do the same. @@ -29,15 +29,18 @@ Join us on [Gitter](https://gitter.im/hubblo-org/scaphandre) or [Matrix](https:/ ## ✨ Features -- measuring power consumption on **bare metal hosts** -- measuring power consumption of **qemu/kvm virtual machines** from the host -- **exposing** power consumption metrics of a virtual machine, to allow **manipulating those metrics in the VM** as if it was a bare metal machine (relies on hypervisor features) -- exposing power consumption metrics as a **[prometheus](https://prometheus.io) (HTTP) exporter** -- sending power consumption metrics to **[riemann](http://riemann.io/)** -- sending power consumption metrics to **[Warp10](http://warp10.io/)** +- measuring power/energy consumed on **bare metal hosts** +- measuring power/energy consumed of **qemu/kvm virtual machines** from the host +- **exposing** power/energy metrics of a virtual machine, to allow **manipulating those metrics in the VM** as if it was a bare metal machine (relies on hypervisor features) +- exposing metrics as a **[prometheus](https://prometheus.io) (HTTP) exporter** +- sending metrics in push mode to a **[prometheus](https://prometheus.io) [Push Gateway](https://github.com/prometheus/pushgateway)** +- sending metrics to **[riemann](http://riemann.io/)** +- sending metrics to **[Warp10](http://warp10.io/)** - works on **[kubernetes](https://kubernetes.io/)** - storing power consumption metrics in a **JSON** file - showing basic power consumption metrics **in the terminal** +- operating systems supported so far : **Gnu/Linux**, **Windows 10, 11 and Server 2016/2019/2022** +- packages available for **RHEL 8 and 9, Debian 11 and 12 and Windows**, also **NixOS** (community support) Here is an example dashboard built thanks to scaphandre: [https://metrics.hubblo.org](https://metrics.hubblo.org). @@ -62,3 +65,8 @@ The ongoing roadmap can be seen [here](https://github.com/hubblo-org/scaphandre/ ## ⚖️ Footprint In opposition to its name, scaphandre aims to be as light and clean as possible. One of the main focus areas of the project is to come as close as possible to a 0 overhead, both about resources consumption and power consumption. + +## 🙏 Sponsoring + +If you like this project and would like to provide financial help, here's our [sponsoring page](https://github.com/sponsors/hubblo-org). +Thanks a lot for considering it ! \ No newline at end of file diff --git a/automation/ansible/install-configure-prometheuspush-rhel.yml b/automation/ansible/install-configure-prometheuspush-rhel.yml new file mode 100644 index 00000000..f87d0a5f --- /dev/null +++ b/automation/ansible/install-configure-prometheuspush-rhel.yml @@ -0,0 +1,32 @@ +- hosts: targets + vars: + rhel_version: 9 + scaphandre_version: "dev0.5.10" + pushgateway_host: localhost + pushgateway_scheme: http + pushgateway_port: 9092 + scaphandre_config_path: /etc/scaphandre/prometheuspush + service_name: scaphandre-prometheuspush + tasks: + #- name: Ensure scaphandre package is purged + # shell: "dnf remove -y {{ }}" + - name: Install RPM package + shell: "dnf install -y https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre-prometheuspush-{{ scaphandre_version }}-1.el{{ rhel_version }}.x86_64.rpm" + - name: Refresh systemd config + shell: systemctl daemon-reload + - name: Configure prometheus-push exporter to target push gateway + lineinfile: + path: "{{ scaphandre_config_path }}" + regexp: '^SCAPHANDRE_ARGS=.*' + backrefs: true + line: "SCAPHANDRE_ARGS=\"prometheus-push -H {{ pushgateway_host }} -S {{ pushgateway_scheme }} -p {{ pushgateway_port }} -s 30\"" + state: present + - name: Start & enable service + shell: "systemctl restart {{ service_name }} && systemctl enable {{ service_name }}" + - name: Check service state + shell: "systemctl status {{ service_name }}" + register: result + - name: Display error if failed + fail: + msg: "STDOUT: {{ result.stdout }} STDERR: {{ result.stderr }}" + when: result.rc != 0 diff --git a/automation/ansible/inventory b/automation/ansible/inventory new file mode 100644 index 00000000..8151d196 --- /dev/null +++ b/automation/ansible/inventory @@ -0,0 +1,2 @@ +[targets] +nce.hopto.org ansible_ssh_port=22029 diff --git a/docker-compose/dashboards/sample-dashboard.json b/docker-compose/dashboards/sample-dashboard.json index 5b62fba5..109847c4 100644 --- a/docker-compose/dashboards/sample-dashboard.json +++ b/docker-compose/dashboards/sample-dashboard.json @@ -49,7 +49,7 @@ "repeat": null, "seriesOverrides": [ ], "spaceLength": 10, - "span": 6, + "span": 3, "stack": false, "steppedLine": false, "targets": [ @@ -59,6 +59,20 @@ "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" + }, + { + "expr": "sum(scaph_process_power_consumption_microwatts) / 1000000", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "sum of processes power", + "refId": "B" + }, + { + "expr": "sum(scaph_domain_power_microwatts) / 1000000", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "sum of rapl domains power", + "refId": "C" } ], "thresholds": [ ], @@ -130,7 +144,7 @@ "repeat": null, "seriesOverrides": [ ], "spaceLength": 10, - "span": 4, + "span": 3, "stack": false, "steppedLine": false, "targets": [ @@ -180,20 +194,7 @@ "show": true } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Per hosts", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ + }, { "aliasColors": { }, "bars": false, @@ -227,22 +228,29 @@ "repeat": null, "seriesOverrides": [ ], "spaceLength": 10, - "span": 6, + "span": 3, "stack": false, "steppedLine": false, "targets": [ { - "expr": "scaph_socket_power_microwatts / 1000000", + "expr": "scaph_host_disk_total_bytes", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{instance}} Socket {{socket_id}}", + "legendFormat": "{{ disk_name }} {{ disk_type }} total", "refId": "A" + }, + { + "expr": "scaph_host_disk_available_bytes", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ disk_name }} {{ disk_type }} available", + "refId": "B" } ], "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Socket power consumption", + "title": "Disks capacity and usage", "tooltip": { "shared": true, "sort": 0, @@ -258,7 +266,102 @@ }, "yaxes": [ { - "format": "W", + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_host_load_avg_one", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "load_avg_1", + "refId": "A" + }, + { + "expr": "scaph_host_load_avg_five", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "load_avg_5", + "refId": "B" + }, + { + "expr": "scaph_host_load_avg_fifteen", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "load_avg_15", + "refId": "C" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Host load average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "", "label": null, "logBase": 1, "max": null, @@ -266,7 +369,7 @@ "show": true }, { - "format": "W", + "format": "", "label": null, "logBase": 1, "max": null, @@ -280,7 +383,7 @@ "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Per CPU Sockets", + "title": "Per hosts", "titleSize": "h6", "type": "row" }, @@ -289,47 +392,85 @@ "collapsed": false, "panels": [ { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, "datasource": "${PROMETHEUS_DS}", - "fieldConfig": { - "defaults": { - "links": [ ], - "mappings": [ ], - "thresholds": { - "mode": "absolute", - "steps": [ ] - }, - "unit": "none" - } - }, + "fill": 1, + "fillGradient": 0, "gridPos": { }, - "id": 5, - "links": [ ], - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - } + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false }, - "pluginVersion": "7", + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "sort_desc(topk(3, sum by (exe) (scaph_process_power_consumption_microwatts/1000000)))", + "expr": "scaph_socket_power_microwatts / 1000000", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{exe}}", + "legendFormat": "{{instance}} Socket {{socket_id}}", "refId": "A" } ], - "title": "Top process consumers", - "transparent": false, - "type": "stat" + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Socket power consumption", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "W", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "W", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] }, { "aliasColors": { }, @@ -340,16 +481,16 @@ "fill": 1, "fillGradient": 0, "gridPos": { }, - "id": 6, + "id": 7, "legend": { - "alignAsTable": true, + "alignAsTable": false, "avg": false, "current": false, "max": false, "min": false, "rightSide": false, "show": true, - "sideWidth": "30%", + "sideWidth": null, "total": false, "values": false }, @@ -364,22 +505,22 @@ "repeat": null, "seriesOverrides": [ ], "spaceLength": 10, - "span": 8, - "stack": true, + "span": 3, + "stack": false, "steppedLine": false, "targets": [ { - "expr": "scaph_process_power_consumption_microwatts{exe=~\".*${process_filter}.*\"}/1000000", + "expr": "scaph_domain_power_microwatts / 1000000", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{ cmdline }}", + "legendFormat": "{{domain_name}}", "refId": "A" } ], "thresholds": [ ], "timeFrom": null, "timeShift": null, - "title": "Filtered process (process_filter) power, by exe", + "title": "scaph_domain_power", "tooltip": { "shared": true, "sort": 0, @@ -411,6 +552,512 @@ "show": true } ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_self_cpu_usage_percent", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{__name__}}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "scaph_self_cpu", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "%", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "%", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_self_memory_bytes", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{__name__}}", + "refId": "A" + }, + { + "expr": "scaph_self_memory_virtual_bytes", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{__name__}}", + "refId": "B" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "scaph_self_mem", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Per CPU Sockets", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": "30%", + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_process_power_consumption_microwatts{cmdline=~\".*${process_filter}.*\"}/1000000", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ cmdline }}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "Filtered process (process_filter) power, by cmdline", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "W", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "W", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": "30%", + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_process_cpu_usage_percentage{cmdline=~\".*${process_filter}.*\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ cmdline }}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "scaph_process_cpu", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "%", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "%", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": "30%", + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_process_memory_bytes{cmdline=~\".*${process_filter}.*\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ cmdline }}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "scaph_process_mem", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${PROMETHEUS_DS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": "30%", + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ ], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "scaph_process_memory_virtual_bytes{cmdline=~\".*${process_filter}.*\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ cmdline }}", + "refId": "A" + } + ], + "thresholds": [ ], + "timeFrom": null, + "timeShift": null, + "title": "scaph_process_mem_virtual", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] } ], "repeat": null, diff --git a/docker-compose/docker-compose-dev.yaml b/docker-compose/docker-compose-dev.yaml index 3e70a730..8c77befe 100644 --- a/docker-compose/docker-compose-dev.yaml +++ b/docker-compose/docker-compose-dev.yaml @@ -16,7 +16,9 @@ services: - type: bind source: /var/run/docker.sock target: /var/run/docker.sock - command: ["-v", "prometheus", "--containers"] + command: ["-vvvv", "prometheus", "--containers"] + environment: + RUST_BACKTRACE: "full" networks: - scaphandre-network diff --git a/docker-compose/docker-compose.yaml b/docker-compose/docker-compose.yaml index 173df237..24233a62 100644 --- a/docker-compose/docker-compose.yaml +++ b/docker-compose/docker-compose.yaml @@ -20,6 +20,7 @@ services: target: "/var/lib/grafana/dashboards/sample/sample-dashboard.json" scaphandre: image: hubblo/scaphandre + privileged: true ports: - "8080:8080" volumes: diff --git a/docker-compose/sample.jsonnet b/docker-compose/sample.jsonnet index 975a235b..96dc3309 100644 --- a/docker-compose/sample.jsonnet +++ b/docker-compose/sample.jsonnet @@ -32,7 +32,7 @@ dashboard.new( title='Hosts power consumption', datasource='${PROMETHEUS_DS}', format='W', - span=6, + span=3, min=0 ) .addTarget( @@ -41,12 +41,24 @@ dashboard.new( legendFormat='{{instance}}', ) ) + .addTarget( + grafana.prometheus.target( + 'sum(scaph_process_power_consumption_microwatts) / 1000000', + legendFormat='sum of processes power', + ) + ) + .addTarget( + grafana.prometheus.target( + 'sum(scaph_domain_power_microwatts) / 1000000', + legendFormat='sum of rapl domains power', + ) + ) ) .addPanel( grafana.graphPanel.new( title='Hosts power consumption total (dynamic time range)', datasource='${PROMETHEUS_DS}', - span=4, + span=3, bars=true, format='Wh', x_axis_mode='series', @@ -60,6 +72,53 @@ dashboard.new( ) ) ) + .addPanel( + grafana.graphPanel.new( + title='Disks capacity and usage', + datasource='${PROMETHEUS_DS}', + span=3, + format='bytes', + ) + .addTarget( + grafana.prometheus.target( + 'scaph_host_disk_total_bytes', + legendFormat='{{ disk_name }} {{ disk_type }} total', + ) + ) + .addTarget( + grafana.prometheus.target( + 'scaph_host_disk_available_bytes', + legendFormat='{{ disk_name }} {{ disk_type }} available', + ) + ) + ) + .addPanel( + grafana.graphPanel.new( + title='Host load average', + datasource='${PROMETHEUS_DS}', + span=3, + format='', + min=0 + ) + .addTarget( + grafana.prometheus.target( + 'scaph_host_load_avg_one', + legendFormat='load_avg_1', + ) + ) + .addTarget( + grafana.prometheus.target( + 'scaph_host_load_avg_five', + legendFormat='load_avg_5', + ) + ) + .addTarget( + grafana.prometheus.target( + 'scaph_host_load_avg_fifteen', + legendFormat='load_avg_15', + ) + ) + ) ) .addRow( row.new( @@ -70,7 +129,7 @@ dashboard.new( title='Socket power consumption', datasource='${PROMETHEUS_DS}', format='W', - span=6, + span=3, min=0 ) .addTarget( @@ -80,29 +139,125 @@ dashboard.new( ) ) ) + .addPanel( + grafana.graphPanel.new( + title='scaph_domain_power', + datasource='${PROMETHEUS_DS}', + format='W', + span=3, + min=0 + ) + .addTarget( + grafana.prometheus.target( + 'scaph_domain_power_microwatts / 1000000', + legendFormat='{{domain_name}}', + ) + ) + ) + .addPanel( + grafana.graphPanel.new( + title='scaph_self_cpu', + datasource='${PROMETHEUS_DS}', + format='%', + span=3, + min=0 + ) + .addTarget( + grafana.prometheus.target( + 'scaph_self_cpu_usage_percent', + legendFormat='{{__name__}}', + ) + ) + ) + .addPanel( + grafana.graphPanel.new( + title='scaph_self_mem', + datasource='${PROMETHEUS_DS}', + format='bytes', + span=3, + min=0 + ) + .addTarget( + grafana.prometheus.target( + 'scaph_self_memory_bytes', + legendFormat='{{__name__}}', + ) + ) + .addTarget( + grafana.prometheus.target( + 'scaph_self_memory_virtual_bytes', + legendFormat='{{__name__}}', + ) + ) + ) ) .addRow( row.new( title='Per process', ) .addPanel( - grafana.statPanel.new( - title='Top process consumers', + grafana.graphPanel.new( + title='Filtered process (process_filter) power, by cmdline', datasource='${PROMETHEUS_DS}', + span=3, + format='W', + legend_rightSide=false, + legend_alignAsTable=true, + legend_sideWidth='30%', + stack=true, + min=0 ) .addTarget( grafana.prometheus.target( - 'sort_desc(topk(3, sum by (exe) (scaph_process_power_consumption_microwatts/1000000)))', - legendFormat='{{exe}}', + 'scaph_process_power_consumption_microwatts{cmdline=~".*${process_filter}.*"}/1000000', + legendFormat='{{ cmdline }}', ) ) ) .addPanel( grafana.graphPanel.new( - title='Filtered process (process_filter) power, by exe', + title='scaph_process_cpu', datasource='${PROMETHEUS_DS}', - span=8, - format='W', + span=3, + format='%', + legend_rightSide=false, + legend_alignAsTable=true, + legend_sideWidth='30%', + stack=true, + min=0 + ) + .addTarget( + grafana.prometheus.target( + 'scaph_process_cpu_usage_percentage{cmdline=~".*${process_filter}.*"}', + legendFormat='{{ cmdline }}', + ) + ) + ) + .addPanel( + grafana.graphPanel.new( + title='scaph_process_mem', + datasource='${PROMETHEUS_DS}', + span=3, + format='bytes', + legend_rightSide=false, + legend_alignAsTable=true, + legend_sideWidth='30%', + stack=true, + min=0 + ) + .addTarget( + grafana.prometheus.target( + 'scaph_process_memory_bytes{cmdline=~".*${process_filter}.*"}', + legendFormat='{{ cmdline }}', + ) + ) + ) + .addPanel( + grafana.graphPanel.new( + title='scaph_process_mem_virtual', + datasource='${PROMETHEUS_DS}', + span=3, + format='bytes', legend_rightSide=false, legend_alignAsTable=true, legend_sideWidth='30%', @@ -111,7 +266,7 @@ dashboard.new( ) .addTarget( grafana.prometheus.target( - 'scaph_process_power_consumption_microwatts{exe=~".*${process_filter}.*"}/1000000', + 'scaph_process_memory_virtual_bytes{cmdline=~".*${process_filter}.*"}', legendFormat='{{ cmdline }}', ) ) diff --git a/docs_src/SUMMARY.md b/docs_src/SUMMARY.md index db2bd8be..fe343879 100644 --- a/docs_src/SUMMARY.md +++ b/docs_src/SUMMARY.md @@ -14,15 +14,20 @@ - [Propagate power consumption metrics from hypervisor to virtual machines (Qemu/KVM)](how-to_guides/propagate-metrics-hypervisor-to-vm_qemu-kvm.md) - [Get process-level power consumption in my grafana dashboard](how-to_guides/get-process-level-power-in-grafana.md) +- [Install Scaphandre with only Prometheus-push exporter compiled, for Prometheus Push Gateway, on RHEL 8 and 9](how-to_guides/install-prometheuspush-only-rhel.md) # Explanations +- [Explanations about host level power and energy metrics](explanations/host_metrics.md) - [How scaphandre computes per process power consumption](explanations/how-scaph-computes-per-process-power-consumption.md) - [Internal structure](explanations/internal-structure.md) - [About containers](explanations/about-containers.md) +- [About RAPL domains](explanations/rapl-domains.md) # References +- [Metrics available](references/metrics.md) + ## Exporters - [JSON exporter](references/exporter-json.md) @@ -36,6 +41,7 @@ - [MSR_RAPL sensor](references/sensor-msr_rapl.md) - [PowercapRAPL sensor](references/sensor-powercap_rapl.md) +- [MSRRAPL sensor](references/sensor-msr_rapl.md) [Why this project ?](why.md) [Compatibility](compatibility.md) diff --git a/docs_src/compatibility.md b/docs_src/compatibility.md index e9d62114..0797e320 100644 --- a/docs_src/compatibility.md +++ b/docs_src/compatibility.md @@ -6,14 +6,22 @@ To summarize, scaphandre should provide two ways to estimate the power consumpti In scaphandre, the code responsible to collect the power consumption data before any further processing is grouped in components called **sensors**. If you want more details about scaphandre structure, [here are the explanations](explanations/internal-structure.md). -The [PowercapRAPL sensor](references/sensor-powercap_rapl.md) enables you to measure the power consumption, it is the most precise solution, but it doesn't work in all contexts. A future sensor is to be developed to support other use cases. Here is the current state of scaphandre's compatibility: +On GNU/Linux [PowercapRAPL sensor](references/sensor-powercap_rapl.md) enables you to measure the power consumption, but it doesn't work in all contexts. + +On Windows, [the MsrRAPL sensor](references/sensor-msr_rapl.md), coupled with the [driver responsible to read RAPL MSR's](https://github.com/hubblo-org/windows-rapl-driver/) enables you to do (almost) the same. | Sensor | Intel x86 bare metal | AMD x86 bare metal | ARM bare metal | Virtual Machine | Public cloud instance | Container | | :------------- | :------------------: | :----------------: | :------------: | :-------------: | :-------------------: | :-------: | -| PowercapRAPL | [Yes](references/sensor-powercap_rapl.md) | Yes ⚠️ kernel > 5.11 required | We don't know yet | Yes, if on a qemu/KVM hypervisor that runs scaphandre and the [Qemu exporter](references/exporter-qemu.md) | No, until your cloud provider uses scaphandre on its hypervisors | [Depends on what you want](explanations/about-containers.md) | -| Future estimation based sensor | Future Yes | Future Yes | Future Yes | Future Yes | Future Yes | +| PowercapRAPL (GNU/Linux only) | [Yes](references/sensor-powercap_rapl.md) | Yes ⚠️ kernel > 5.11 required | We don't know yet | Yes, if on a qemu/KVM hypervisor that runs scaphandre and the [Qemu exporter](references/exporter-qemu.md) | No, until your cloud provider uses scaphandre on its hypervisors | [Depends on what you want](explanations/about-containers.md) | +| MsrRAPL (Windows only) | Yes | Probable yes (not tested yet, if you have windows operated AMD gear, please consider [contributing](contributing.md) | No | Not yet, depends on improvements on the MsrRAPL sensors and overall windows/hypervisors support in Scaphandre | No, until your cloud provider uses scaphandre on its hypervisors | Might work, not tested yet. If you want to join us in this journey, please consider [contributing](contributing.md) | +| Future estimation based sensor | Future Yes | Future Yes | Future Yes | Future Yes | Future Yes | Future Yes + +## Checking RAPL is available on your CPU + +Sensors including "RAPL" in their name rely on [RAPL](explanations/rapl-domains.md). + +The `pts` and `pln` feature flags ("Intel Package Thermal Status" and "Intel Power Limit Notification" respectively) seem to indicate that RAPL is supported on a CPU. On GNU/Linux, you could be sure of their presence, if this command succeds and matches : -| Sensor | GNU/Linux | Windows | MacOS | -| :-----------: | :--------------: | :------------------------------------: | :---: | -| PowercapRAPL | Yes (see above) | No | No | -| MsrRAPL | No | Yes (tested on windows 10/server 2019) | No | \ No newline at end of file +``` +egrep "(pts|pln)" /proc/cpuinfo +``` \ No newline at end of file diff --git a/docs_src/explanations/host_metrics.md b/docs_src/explanations/host_metrics.md new file mode 100644 index 00000000..ac3895e8 --- /dev/null +++ b/docs_src/explanations/host_metrics.md @@ -0,0 +1,22 @@ +# Explanations about host level power and energy metrics. + +This is true starting **from Scaphandre >= 1.0.** + +There are several [metrics](../references/metrics.md) available at the host level in Scaphandre: +- `scaph_host_power_microwatts` : always returned, computed from Record structs made from `scaph_host_energy_microjoules` metric +- `scaph_host_energy_microjoules` : always returned, either one value or a sum of values coming directly from RAPL counters (`energy_uj` files or direct read from an MSR) +- `scaph_host_rapl_psys_microjoules` : is available only when the PSYS [RAPL domain](explanations/rapl-domains.md) is available on the machine. + +In addition to those metrics, you might want to build, on your time series database, the sum of process_ metrics to have a view of the weight of all processes on the host power. Using Prometheus, it would look like: `sum(scaph_process_power_consumption_microwatts{hostname="$hostname"}) / 1000000`, to get it in Watts. + +Let's explain the relationship between those metrics, and what you could expect. + +`host_power` metric will return : +1. If PSYS domain is available, a computed power coming from PSYS energy records +2. If not, a computed power which is the sum of per-socket power (PKG RAPL domain) + DRAM RAPL domain power + +Briefly explained (see [RAPL domains](explanations/rapl-domains.md) for detailled explanations), PSYS covers most components on the machine ("all components connected to the SoC / motherboard" according to most documentations), so we return this wider ranged metric when available. If not we use a combination of PKG domain, that includes CPU and integrated GPU power, and DRAM domain, that includes memory power. The first options gives higher figures than the second, for now. + +Suming the power of all processes, if the machine is mostly IDLE, you'll get a tiny percentage of the host machine, most likely. The difference between host power and the sum of processes power can be accounted as "power due to IDLE activity", in other words the power your machine demands for "doing nothing". The higher this difference on a long period of time (better seen as a graph), the higher chance that there is room for improvement in moving the workloads to another machine and shut the current machine down (and make it available for another project or to another organization to prevent from buying a new machine). + +**Warning:** that being said, the way per-process power is computed is still biased and shall be improved in the following versions of Scaphandre. For now, the main key for allocation is CPU time. As host level power metrics include power usage of more and more components on the machine (work in progress) this allocation key will be more and more inaccurate. Future versions of this allocation model should include keys regarding the activity of other components than CPU. Enabling a better set of allocation keys for per-process power is part of the [roadmap](https://github.com/hubblo-org/scaphandre/projects/1). diff --git a/docs_src/explanations/rapl-domains.md b/docs_src/explanations/rapl-domains.md new file mode 100644 index 00000000..b2a14a64 --- /dev/null +++ b/docs_src/explanations/rapl-domains.md @@ -0,0 +1,16 @@ +# Explanation on RAPL / Running Average Power Limit domains: what we (think we) know so far + +RAPL stands for "Running Average Power Limit", it is a feature on Intel/AMD x86 CPU's (manufactured after 2012) that allows to set limits on power used by the CPU and other components. This feature also allows to just get "measurements" (mind the double quotes, as at least part of the numbers RAPL gives are coming from estimations/modeling) of components power usage. + +![RAPL domains](rapl.png) + +It is composed of "domains", that, in 2023, may include: +- **Core/PP0**: Energy consumed by the CPU Cores themselves. +- **Uncore/PP1**: Energy consumed by components close to the CPU : most of the time it means the embedded GPU chipset. +- **Dram**: Energy consumed by the memory/RAM sticks +- **Package/PKG**: Includes "Core" and "Uncore". In some documentations and in some of our experiments it seem to include "Dram", but this doesn't seem true in every cases. +- **PSys**: We don't have a clear understanding on this one (yet). But most documentations refer to it with words similar to "PSys: (introduced with Intel Skylake) monitors and controls the thermal and power specifications of the entire SoC and it is useful especially when the source of the power consumption is neither the CPU nor the GPU. For multi-socket server systems, each socket reports its own RAPL values.". To summarize, Psys seems like an interesting metric to get energy consumed by a motherboard and connected components (which includes RAPL usual suspects but also WiFi/Bluetooth cards and probably more). If you want to know more about this metric, we gathered references/sources [here](https://github.com/bpetit/awesome-energy/tree/master#rapl-psys-domain). If you want to help us understanding and documenting better this metric, please consider constributing to the [Energizta project](https://github.com/Boavizta/Energizta/). + +RAPL documentation from Intel doesn't necessarily give very precise informations about how RAPL behaves depending on the platform, or about what is included in the calculation. Actively looking for other experimentations/feedbacks/documentations is needed. You might find some informations gathered here: [awesome-energy](https://github.com/bpetit/awesome-energy#rapl). If you have more or more precise informations and are willing to contribute, don't hesitate to open a PR to dev branch on [scaphandre's repository](https://github.com/hubblo-org/scaphandre/tree/dev) (targeting [docs_src folder](https://github.com/hubblo-org/scaphandre/tree/dev/docs_src)) and/or the [awesome-energy](https://github.com/bpetit/awesome-energy) repository. + +If you want to know if RAPL is supported by your CPU, please have a look to the end of the [Compatibility](../compatibility.md/) section. \ No newline at end of file diff --git a/docs_src/explanations/rapl.png b/docs_src/explanations/rapl.png new file mode 100644 index 00000000..22208db9 Binary files /dev/null and b/docs_src/explanations/rapl.png differ diff --git a/docs_src/favicon.ico b/docs_src/favicon.ico new file mode 100644 index 00000000..39b5a015 Binary files /dev/null and b/docs_src/favicon.ico differ diff --git a/docs_src/how-to_guides/install-prometheuspush-only-rhel.md b/docs_src/how-to_guides/install-prometheuspush-only-rhel.md new file mode 100644 index 00000000..b1eea7a5 --- /dev/null +++ b/docs_src/how-to_guides/install-prometheuspush-only-rhel.md @@ -0,0 +1,57 @@ +# Install Scaphandre with only Prometheus-push exporter compiled, for Prometheus Push Gateway, on RHEL 8 and 9 + +## Manual installation + +Scaphandre can be compiled with a limited set of features. You have the choice to only install Scaphandre with prometheus-push exporter (alongside with stdout and json exporters, which might be useful locally). + +RPM packages containing only those features are provided for RHEL 8 and 9 : +- [RPM package for RHEL8](https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre-prometheuspush-dev0.5.18-1.el8.x86_64.rpm) +- [RPM package for RHEL9](https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre-prometheuspush-dev0.5.18-1.el9.x86_64.rpm) + +You can download it and install it just providing the right URL to dnf : + + dnf install -y URL + +Then you'll probably need to change its configuration to target the appropriate Push Gateway server. Edit the configuration file : + + vi /etc/scaphandre/prometheuspush + +Default options look like : + + SCAPHANDRE_ARGS="prometheus-push -H localhost -S http" + +Those are prometheus-push exporter CLI options. Run the executable to get the reference of the options : + + /usr/bin/scaphandre-prometheuspush --help + +A simple configuration to target Push Gateway reachable on https://myserver.mydomain:PORT and send data every 30 seconds would look like : + + SCAPHANDRE_ARGS="prometheus-push -H myserver.mydomain -S https -p PORT -s 30" + +Once the configuration is changed, you can restart the service and ensure it is enabled as well for next reboot : + + systemctl restart scaphandre-prometheuspush && systemctl enable scaphandre-prometheuspush + +Configuration issues or issues to reach the push gateway should be visible in the logs : + + systemctl status scaphandre-prometheuspush + +## Automatic installation with ansible + +There is a [sample Ansible playbook](https://github.com/hubblo-org/scaphandre/blob/dev/automation/ansible/install-configure-prometheuspush-rhel.yml) available in the [automation/ansible](https://github.com/hubblo-org/scaphandre/tree/dev/automation/ansible) folder of the project. + +This can be used this way : + + ansible-playbook -i inventory -b -u myunprivilegeduser -K install-configure-prometheuspush-rhel.yml + +Beware of the playbook parameters : + + rhel_version: 9 + scaphandre_version: "dev0.5.10" + pushgateway_host: localhost + pushgateway_scheme: http + pushgateway_port: 9092 + scaphandre_config_path: /etc/scaphandre/prometheuspush + service_name: scaphandre-prometheuspush + +Ensure to change those to match your context, including changing rhel version if needed (8 and 9 are supported) and parameters to reach the Push Gateway on the network. \ No newline at end of file diff --git a/docs_src/how-to_guides/propagate-metrics-hypervisor-to-vm_qemu-kvm.md b/docs_src/how-to_guides/propagate-metrics-hypervisor-to-vm_qemu-kvm.md index 63ed2939..fd8d12aa 100644 --- a/docs_src/how-to_guides/propagate-metrics-hypervisor-to-vm_qemu-kvm.md +++ b/docs_src/how-to_guides/propagate-metrics-hypervisor-to-vm_qemu-kvm.md @@ -29,7 +29,7 @@ In the definition of the virtual machine (here we are using libvirt), ensure you virsh edit DOMAIN_NAME -Then add: +Then add this filesystem configuration block inside the `` block: @@ -40,6 +40,13 @@ Then add: Save and (re)start the virtual machine. +If you get this error: "error: unsupported configuration: 'virtiofs' requires shared memory", you might add this configuration section to the `` section. + + + + + + Then connect to the virtual machine and mount the filesystem: mount -t 9p -o trans=virtio scaphandre /var/scaphandre diff --git a/docs_src/references/exporter-json.md b/docs_src/references/exporter-json.md index 65e51855..30fae878 100644 --- a/docs_src/references/exporter-json.md +++ b/docs_src/references/exporter-json.md @@ -26,22 +26,34 @@ To get informations about processes that are running in containers, add `--conta scaphandre --no-header json --containers --max-top-consumers=15 | jq -As always exporter's options can be displayed with `-h`: - - $ scaphandre json -h - JSON exporter allows you to output the power consumption data in a json file +Since 1.0.0 you can filter the processes, either by their process name with `--process-regex`, or by the name of the container they run in with `--container-regex` (needs the flag `--containers` to be active as well). - USAGE: - scaphandre json [FLAGS] [OPTIONS] - - FLAGS: - --containers Monitor and apply labels for processes running as containers - -h, --help Prints help information - -V, --version Prints version information +As always exporter's options can be displayed with `-h`: - OPTIONS: - -f, --file Destination file for the report. [default: ] - -m, --max-top-consumers Maximum number of processes to watch. [default: 10] - -s, --step Set measurement step duration in second. [default: 2] - -n, --step_nano Set measurement step duration in nano second. [default: 0] - -t, --timeout Maximum time spent measuring, in seconds. + Write the metrics in the JSON format to a file or to stdout + + Usage: scaphandre json [OPTIONS] + + Options: + -t, --timeout + Maximum time spent measuring, in seconds. If unspecified, runs forever + -s, --step + Interval between two measurements, in seconds [default: 2] + --step-nano + Additional step duration in _nano_ seconds. This is added to `step` to get the final duration [default: 0] + --max-top-consumers + Maximum number of processes to watch [default: 10] + -f, --file + Destination file for the report (if absent, print the report to stdout) + --containers + Monitor and apply labels for processes running as containers + --process-regex + Filter processes based on regular expressions (example: 'scaph\\w\\w.e') + --container-regex + Filter containers based on regular expressions + --resources + Monitor and incude CPU, RAM and Disk usage per process + -h, --help + Print help + +Metrics provided Scaphandre are documented [here](references/metrics.md). \ No newline at end of file diff --git a/docs_src/references/exporter-prometheus.md b/docs_src/references/exporter-prometheus.md index 5ca8e77a..ee3cd158 100644 --- a/docs_src/references/exporter-prometheus.md +++ b/docs_src/references/exporter-prometheus.md @@ -34,81 +34,4 @@ With default options values, the metrics are exposed on http://localhost:8080/me Use -q or --qemu option if you are running scaphandre on a hypervisor. In that case a label with the vm name will be added to all `qemu-system*` processes. This will allow to easily create charts consumption for each vm and defined which one is the top contributor. -## Metrics exposed - -All metrics have a HELP section provided on /metrics (or whatever suffix you choosed to expose them). - -Here are some key metrics that you will most probably be interested in: - -- `scaph_host_power_microwatts`: Power measurement on the whole host, in microwatts (GAUGE) -- `scaph_process_power_consumption_microwatts{exe="$PROCESS_EXE",pid="$PROCESS_PID",cmdline="path/to/exe --and-maybe-options"}`: Power consumption due to the process, measured on at the topology level, in microwatts. PROCESS_EXE being the name of the executable and PROCESS_PID being the pid of the process. (GAUGE) - -For more details on that metric labels, see [this section](#scaph_process_power_consumption_microwatts). - -And some more deep metrics that you may want if you need to make more complex calculations and data processing: - -- `scaph_host_energy_microjoules` : Energy measurement for the whole host, as extracted from the sensor, in microjoules. (COUNTER) -- `scaph_socket_power_microwatts{socket_id="$SOCKET_ID"}`: Power measurement relative to a CPU socket, in microwatts. SOCKET_ID being the socket numerical id (GAUGE) - -If you hack scaph or just want to investigate its behavior, you may be interested in some internal metrics: - -- `scaph_self_mem_total_program_size`: Total program size, measured in pages - -- `scaph_self_mem_resident_set_size`: Resident set size, measured in pages - -- `scaph_self_mem_shared_resident_size`: Number of resident shared pages (i.e., backed by a file) - -- `scaph_self_topo_stats_nb`: Number of CPUStat traces stored for the host - -- `scaph_self_topo_records_nb`: Number of energy consumption Records stored for the host - -- `scaph_self_topo_procs_nb`: Number of processes monitored by scaph - -- `scaph_self_socket_stats_nb{socket_id="SOCKET_ID"}`: Number of CPUStat traces stored for each socket - -- `scaph_self_socket_records_nb{socket_id="SOCKET_ID"}`: Number of energy consumption Records stored for each socket, with SOCKET_ID being the id of the socket measured - -- `scaph_self_domain_records_nb{socket_id="SOCKET_ID",rapl_domain_name="RAPL_DOMAIN_NAME -"}`: Number of energy consumption Records stored for a Domain, where SOCKET_ID identifies the socket and RAPL_DOMAIN_NAME identifies the rapl domain measured on that socket - -### scaph_process_power_consumption_microwatts - -Here are available labels for the `scaph_process_power_consumption_microwatts` metric that you may need to extract the data you need: - -- `exe`: is the name of the executable that is the origin of that process. This is good to be used when your application is running one or only a few processes. -- `cmdline`: this contains the whole command line with the executable path and its parameters (concatenated). You can filter on this label by using prometheus `=~` operator to match a regular expression pattern. This is very practical in many situations. -- `instance`: this is a prometheus generated label to enable you to filter the metrics by the originating host. This is very useful when you monitor distributed services, so that you can not only sum the metrics for the same service on the different hosts but also see what instance of that service is consuming the most, or notice differences beteween hosts that may not have the same hardware, and so on... -- `pid`: is the process id, which is useful if you want to track a specific process and have your eyes on what's happening on the host, but not so practical to use in a more general use case - -### Get container-specific labels on scaph_process_power_consumption_microwatts metrics - -The flag --containers enables Scaphandre to collect data about the running Docker containers or Kubernetes pods on the local machine. This way, it adds specific labels to make filtering processes power consumption metrics by their encapsulation in containers easier. - -Generic labels help to identify the container runtime and scheduler used (based on the content of `/proc/PID/cgroup`): - -`container_scheduler`: possible values are `docker` or `kubernetes`. If this label is not attached to the metric, it means that scaphandre didn't manage to identify the container scheduler based on cgroups data. - -Then the label `container_runtime` could be attached. The only possible value for now is `containerd`. - -`container_id` is the ID scaphandre got from /proc/PID/cgroup for that container. - -For Docker containers (if `container_scheduler` is set), available labels are : - -- `container_names`: is a string containing names attached to that container, according to the docker daemon -- `container_docker_version`: version of the docker daemon -- `container_label_maintainer`: content of the maintainer field for this container - -For containers coming from a docker-compose file, there are a bunch of labels related to data coming from the docker daemon: - -- `container_label_com_docker_compose_project_working_dir` -- `container_label_com_docker_compose_container_number` -- `container_label_com_docker_compose_project_config_files` -- `container_label_com_docker_compose_version` -- `container_label_com_docker_compose_service` -- `container_label_com_docker_compose_oneoff` - -For Kubernetes pods (if `container_scheduler` is set), available labels are : - -- `kubernetes_node_name`: identifies the name of the kubernetes node scaphandre is running on -- `kubernetes_pod_name`: the name of the pod the container belongs to -- `kubernetes_pod_namespace`: the namespace of the pod the container belongs to +Metrics provided Scaphandre are documented [here](references/metrics.md). \ No newline at end of file diff --git a/docs_src/references/exporter-prometheuspush.md b/docs_src/references/exporter-prometheuspush.md new file mode 100644 index 00000000..5b86b6c2 --- /dev/null +++ b/docs_src/references/exporter-prometheuspush.md @@ -0,0 +1,34 @@ +# PrometheusPush Exporter for Prometheus Push Gateway + +## Usage + +You can launch the prometheus exporter this way (running the default powercap_rapl sensor): + + scaphandre prometheus-push + +As always exporter's options can be displayed with `-h`: +``` + scaphandre prometheus-push -h + Push metrics to Prometheus Push Gateway + + Usage: scaphandre prometheus-push [OPTIONS] + + Options: + -H, --host IP address (v4 or v6) of the metrics endpoint for Prometheus [default: localhost] + -p, --port TCP port of the metrics endpoint for Prometheus [default: 9091] + --suffix [default: metrics] + -S, --scheme [default: http] + -s, --step [default: 5] + --qemu Apply labels to metrics of processes that look like a Qemu/KVM virtual machine + --containers Apply labels to metrics of processes running as containers + -j, --job Job name to apply as a label for pushed metrics [default: scaphandre] + --no-tls-check Don't verify remote TLS certificate (works with --scheme="https") + -h, --help Print help +``` +With default options values, the metrics are sent to http://localhost:9091/metrics + +## Metrics exposed + +Metrics exposed are the same as the Prometheus (pull mode) exporter. + +Push gateway's grouping key for each host is in the form `job/scaphandre/instance/${HOSTNAME}` with HOSTNAME being the hostname of the host sending metrics. \ No newline at end of file diff --git a/docs_src/references/exporter-riemann.md b/docs_src/references/exporter-riemann.md index 8fdabae6..255089d1 100644 --- a/docs_src/references/exporter-riemann.md +++ b/docs_src/references/exporter-riemann.md @@ -9,28 +9,35 @@ You can launch the Riemann exporter this way (running the default powercap_rapl scaphandre riemann As always exporter's options can be displayed with `-h`: + ``` -scaphandre-riemann -Riemann exporter sends power consumption metrics to a Riemann server - -USAGE: - scaphandre riemann [FLAGS] [OPTIONS] - -FLAGS: - -h, --help Prints help information - --mtls Connect to a Riemann server using mTLS. Parameters address, ca, cert and key must be defined. - -q, --qemu Instruct that scaphandre is running on an hypervisor - -V, --version Prints version information - -OPTIONS: - -a, --address
Riemann ipv6 or ipv4 address. If mTLS is used then server fqdn must be - provided [default: localhost] - -d, --dispatch Duration between metrics dispatch [default: 5] - -p, --port Riemann TCP port number [default: 5555] - --ca CA certificate file (.pem format) - --cert Client certificate file (.pem format) - --key Client RSA key +Expose the metrics to a Riemann server + +Usage: scaphandre riemann [OPTIONS] + +Options: + -a, --address
+ Address of the Riemann server. If mTLS is used this must be the server's FQDN [default: localhost] + -p, --port + TCP port number of the Riemann server [default: 5555] + -d, --dispatch-interval + Duration between each metric dispatch, in seconds [default: 5] + -q, --qemu + Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + --containers + Monitor and apply labels for processes running as containers + --mtls + Connect to Riemann using mTLS instead of plain TCP + --ca + CA certificate file (.pem format) + --cert + Client certificate file (.pem format) + --key + Client RSA key file + -h, --help + Print help ``` + With default options values, the metrics are sent to http://localhost:5555 every 5 seconds Use `--mtls` option to connect to a Riemann server using mTLS. In such case, you must provide the following parameters: @@ -79,7 +86,8 @@ As a reference here is a Riemann configuration: ``` ## Metrics exposed -Typically the Riemann exporter is working in the same way as the prometheus exporter regarding metrics. Please look at details in [Prometheus exporter](exporter-prometheus.md) documentations. + +Metrics provided Scaphandre are documented [here](references/metrics.md). There is only one exception about `process_power_consumption_microwatts` each process has a service name `process_power_consumption_microwatts_pid_exe`. diff --git a/docs_src/references/exporter-stdout.md b/docs_src/references/exporter-stdout.md index af569898..de37a939 100644 --- a/docs_src/references/exporter-stdout.md +++ b/docs_src/references/exporter-stdout.md @@ -30,26 +30,22 @@ Here is how to display power data for the 'scaphandre' process: scaphandre stdout -r 'scaphandre' -Note +Metrics provided Scaphandre are documented [here](references/metrics.md). -As always exporter's options can be displayed with `-h`: - - $ scaphandre stdout -h - scaphandre-stdout - Stdout exporter allows you to output the power consumption data in the terminal +Since 1.0.0 the flag `--raw-metrics` displays all metrics available for the host, as a parseable list. This might be useful to list metrics that you would like to fetch afterwards in your monitoring dashboard. Without this flag enabled, Stdout exporter has it's own format and might not show you all available metrics. - USAGE: - scaphandre stdout [OPTIONS] +As always exporter's options can be displayed with `-h`: - FLAGS: - -h, --help Prints help information - -V, --version Prints version information + Write the metrics to the terminal - OPTIONS: - -p, --process Number of processes to display. [default: 5] - -r, --regex Filter processes based on regular expressions (e.g: 'scaph\w\wd.e'). This option - disable '-p' or '--process' one. - -s, --step Set measurement step duration in seconds. [default: 2] - -t, --timeout Maximum time spent measuring, in seconds. 0 means continuous measurement. - [default: 10] + Usage: scaphandre stdout [OPTIONS] + Options: + -t, --timeout Maximum time spent measuring, in seconds. If negative, runs forever [default: 10] + -s, --step Interval between two measurements, in seconds [default: 2] + -p, --processes Maximum number of processes to display [default: 5] + -r, --regex-filter Filter processes based on regular expressions (example: 'scaph\\w\\w.e') + --containers Monitor and apply labels for processes running as containers + -q, --qemu Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + --raw-metrics Display metrics with their names + -h, --help Print help \ No newline at end of file diff --git a/docs_src/references/exporter-warp10.md b/docs_src/references/exporter-warp10.md index 6b2a76cd..92a8be1f 100644 --- a/docs_src/references/exporter-warp10.md +++ b/docs_src/references/exporter-warp10.md @@ -13,30 +13,25 @@ The `SCAPH_WARP10_WRITE_TOKEN` env var can be used to make it available to scaph Please refer to the warp10 documentation to know how to get the token in the first place. As always exporter's options can be displayed with `-h`: + ``` -scaphandre-warp10 -Warp10 exporter sends data to a Warp10 host, through HTTP - -USAGE: - scaphandre warp10 [FLAGS] [OPTIONS] - -FLAGS: - -h, --help Prints help information - -q, --qemu Tells scaphandre it is running on a Qemu hypervisor. - -V, --version Prints version information - -OPTIONS: - -H, --host Warp10 host's FQDN or IP address to send data to [default: localhost] - -p, --port TCP port to join Warp10 on the host [default: 8080] - -s, --scheme Either 'http' or 'https' [default: http] - -S, --step Time step between measurements, in seconds. [default: 30] - -t, --write-token Auth. token to write on Warp10 +Expose the metrics to a Warp10 host, through HTTP + +Usage: scaphandre warpten [OPTIONS] + +Options: + -H, --host FQDN or IP address of the Warp10 instance [default: localhost] + -p, --port TCP port of the Warp10 instance [default: 8080] + -S, --scheme "http" or "https" [default: http] + -t, --write-token Auth token to write data to Warp10. If not specified, you must set the env variable SCAPH_WARP10_WRITE_TOKEN + -s, --step Interval between two measurements, in seconds [default: 2] + -q, --qemu Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + -h, --help Print help ``` + With default options values, the metrics are sent to http://localhost:8080 every 60 seconds Use -q or --qemu option if you are running scaphandre on a hypervisor. In that case a label with the vm name will be added to all `qemu-system*` processes. This will allow to easily create charts consumption for each vm and defined which one is the top contributor. -## Metrics exposed - -Typically the Warp10 exporter is working the same way as the riemann and the prometheus exporters regarding metrics. Please look at details in [Prometheus exporter](exporter-prometheus.md) documentations to get the extensive list of metrics available. \ No newline at end of file +Metrics provided Scaphandre are documented [here](references/metrics.md). \ No newline at end of file diff --git a/docs_src/references/metrics.md b/docs_src/references/metrics.md new file mode 100644 index 00000000..e9db61c6 --- /dev/null +++ b/docs_src/references/metrics.md @@ -0,0 +1,106 @@ +# Metrics exposed by Scaphandre + +With [Stdout](exporter-stdout.md) exporter, you can see all metrics available on your machine with flag `--raw-metrics`. +With [prometheus](exporter-prometheus.md) exporter, all metrics have a HELP section provided on /metrics (or whatever suffix you choosed to expose them). + +Here are some key metrics that you will most probably be interested in: + +- `scaph_host_power_microwatts`: Aggregation of several measurements to give a try on the power usage of the the whole host, in microwatts (GAUGE). It might be the same as RAPL PSYS (see [RAPL domains](../explanations/rapl-domains.md)) measurement if available, or a combination of RAPL PKG and DRAM domains + an estimation of other hardware componentes power usage. +- `scaph_process_power_consumption_microwatts{exe="$PROCESS_EXE",pid="$PROCESS_PID",cmdline="path/to/exe --and-maybe-options"}`: Power consumption due to the process, measured on at the topology level, in microwatts. PROCESS_EXE being the name of the executable and PROCESS_PID being the pid of the process. (GAUGE) + +For more details on that metric labels, see [this section](#getting-per-process-data-with-scaph_process_-metrics). + +And some more deep metrics that you may want if you need to make more complex calculations and data processing: + +- `scaph_host_energy_microjoules` : Energy measurement for the whole host, as extracted from the sensor, in microjoules. (COUNTER) +- `scaph_socket_power_microwatts{socket_id="$SOCKET_ID"}`: Power measurement relative to a CPU socket, in microwatts. SOCKET_ID being the socket numerical id (GAUGE) + +If your machine provides RAPL PSYS domain (see [RAPL domains](../explanations/rapl-domains.md)), you can get the raw energy counter for PSYS/platform with `scaph_host_rapl_psys_microjoules`. Note that `scaph_host_power_microwatts` is based on this PSYS counter if it is available. + +Since 1.0.0 the following host metrics are availalable as well ; + +- `scaph_host_swap_total_bytes`: Total swap space on the host, in bytes. +- `scaph_host_swap_free_bytes`: Swap space free to be used on the host, in bytes. +- `scaph_host_memory_free_bytes`: Random Access Memory free to be used (not reused) on the host, in bytes. +- `scaph_host_memory_available_bytes`: Random Access Memory available to be re-used on the host, in bytes. +- `scaph_host_memory_total_bytes`: Random Access Memory installed on the host, in bytes. +- `scaph_host_disk_total_bytes`: Total disk size, in bytes. +- `scaph_host_disk_available_bytes`: Available disk space, in bytes. + +Disk metrics have the following labels : disk_file_system, disk_is_removable, disk_type, disk_mount_point, disk_name + +- `scaph_host_cpu_frequency`: Global frequency of all the cpus. In MegaHertz +- `scaph_host_load_avg_fifteen`: Load average on 15 minutes. +- `scaph_host_load_avg_five`: Load average on 5 minutes. +- `scaph_host_load_avg_one`: Load average on 1 minute. + +If you hack scaph or just want to investigate its behavior, you may be interested in some internal metrics: + +- `scaph_self_memory_bytes`: Scaphandre memory usage, in bytes + +- `scaph_self_memory_virtual_bytes`: Scaphandre virtual memory usage, in bytes + +- `scaph_self_topo_stats_nb`: Number of CPUStat traces stored for the host + +- `scaph_self_topo_records_nb`: Number of energy consumption Records stored for the host + +- `scaph_self_topo_procs_nb`: Number of processes monitored by scaph + +- `scaph_self_socket_stats_nb{socket_id="SOCKET_ID"}`: Number of CPUStat traces stored for each socket + +- `scaph_self_socket_records_nb{socket_id="SOCKET_ID"}`: Number of energy consumption Records stored for each socket, with SOCKET_ID being the id of the socket measured + +- `scaph_self_domain_records_nb{socket_id="SOCKET_ID",rapl_domain_name="RAPL_DOMAIN_NAME +"}`: Number of energy consumption Records stored for a Domain, where SOCKET_ID identifies the socket and RAPL_DOMAIN_NAME identifies the rapl domain measured on that socket + +### Getting per process data with scaph_process_* metrics + +Here are available labels for the `scaph_process_power_consumption_microwatts` metric that you may need to extract the data you need: + +- `exe`: is the name of the executable that is the origin of that process. This is good to be used when your application is running one or only a few processes. +- `cmdline`: this contains the whole command line with the executable path and its parameters (concatenated). You can filter on this label by using prometheus `=~` operator to match a regular expression pattern. This is very practical in many situations. +- `instance`: this is a prometheus generated label to enable you to filter the metrics by the originating host. This is very useful when you monitor distributed services, so that you can not only sum the metrics for the same service on the different hosts but also see what instance of that service is consuming the most, or notice differences beteween hosts that may not have the same hardware, and so on... +- `pid`: is the process id, which is useful if you want to track a specific process and have your eyes on what's happening on the host, but not so practical to use in a more general use case + +Since 1.0.0 the following per-process metrics are available as well : + +- `scaph_process_cpu_usage_percentage`: CPU time consumed by the process, as a percentage of the capacity of all the CPU Cores +- `scaph_process_memory_bytes`: Physical RAM usage by the process, in bytes +- `scaph_process_memory_virtual_bytes`: Virtual RAM usage by the process, in bytes +- `scaph_process_disk_total_write_bytes`: Total data written on disk by the process, in bytes +- `scaph_process_disk_write_bytes`: Data written on disk by the process, in bytes +- `scaph_process_disk_read_bytes`: Data read on disk by the process, in bytes +- `scaph_process_disk_total_read_bytes`: Total data read on disk by the process, in bytes + +### Get container-specific labels on scaph_process_* metrics + +The flag --containers enables Scaphandre to collect data about the running Docker containers or Kubernetes pods on the local machine. This way, it adds specific labels to make filtering processes power consumption metrics by their encapsulation in containers easier. + +Generic labels help to identify the container runtime and scheduler used (based on the content of `/proc/PID/cgroup`): + +`container_scheduler`: possible values are `docker` or `kubernetes`. If this label is not attached to the metric, it means that scaphandre didn't manage to identify the container scheduler based on cgroups data. + +Then the label `container_runtime` could be attached. The only possible value for now is `containerd`. + +`container_id` is the ID scaphandre got from /proc/PID/cgroup for that container. + +For Docker containers (if `container_scheduler` is set), available labels are : + +- `container_names`: is a string containing names attached to that container, according to the docker daemon +- `container_docker_version`: version of the docker daemon +- `container_label_maintainer`: content of the maintainer field for this container + +For containers coming from a docker-compose file, there are a bunch of labels related to data coming from the docker daemon: + +- `container_label_com_docker_compose_project_working_dir` +- `container_label_com_docker_compose_container_number` +- `container_label_com_docker_compose_project_config_files` +- `container_label_com_docker_compose_version` +- `container_label_com_docker_compose_service` +- `container_label_com_docker_compose_oneoff` + +For Kubernetes pods (if `container_scheduler` is set), available labels are : + +- `kubernetes_node_name`: identifies the name of the kubernetes node scaphandre is running on +- `kubernetes_pod_name`: the name of the pod the container belongs to +- `kubernetes_pod_namespace`: the namespace of the pod the container belongs to \ No newline at end of file diff --git a/docs_src/scaphandre.ico b/docs_src/scaphandre.ico new file mode 100644 index 00000000..2431e35b Binary files /dev/null and b/docs_src/scaphandre.ico differ diff --git a/docs_src/tutorials/docker-compose.md b/docs_src/tutorials/docker-compose.md index 2b6456a2..a559c63c 100644 --- a/docs_src/tutorials/docker-compose.md +++ b/docs_src/tutorials/docker-compose.md @@ -7,6 +7,8 @@ Once you have cloned the repository, just move to the docker-compose folder and cd docker-compose docker-compose up -d +Be warned: the sample stack runs scaphandre as a privileged container. Otherwise apparmor or equivalents might complain about ptrace calls on the host. See [#135](https://github.com/hubblo-org/scaphandre/issues/135). + Grafana will be available at `http://localhost:3000`, the default username is `admin` and the password is `secret`. Refresh the dashboard after 30s or enable auto-refresh and you should see the data filling the graphs. diff --git a/docs_src/tutorials/installation-linux.md b/docs_src/tutorials/installation-linux.md index 074d1f59..850313aa 100644 --- a/docs_src/tutorials/installation-linux.md +++ b/docs_src/tutorials/installation-linux.md @@ -16,8 +16,9 @@ Here are some other ways to install scaphandre depending on your context: - [quickly try the project with docker-compose/docker stack](docker-compose.md) - [run scaphandre on kubernetes](kubernetes.md) +- [run scaphandre on RHEL, with prometheus-push mode](../how-to_guides/install-prometheuspush-only-rhel.md) -Brave contributors work on system packages, please have a try and/or contribute to: +Kudos to contributors who work on system packages, please have a try and/or contribute to: - [Debian package](https://github.com/barnumbirr/scaphandre-debian), maintainer: @barnumbirr - [NixOS package](https://github.com/mmai/scaphandre-flake), maintainer: @mmai diff --git a/docs_src/tutorials/installation-windows.md b/docs_src/tutorials/installation-windows.md index 6b9c15ca..ae8a3618 100644 --- a/docs_src/tutorials/installation-windows.md +++ b/docs_src/tutorials/installation-windows.md @@ -1,5 +1,50 @@ -# Install Scaphandre on Windows (experimental) +# Install Scaphandre on Windows -A better procedure and packaging should come soon. +**!! Warning: Windows version of Scaphandre is still in early stage. !!** -The release 0.5.0 of Scaphandre can be tested on windows by compiling both the kernel driver and Scaphandre. See [Compilation for Windows (experimental)](compilation-windows.md) +## Using the installer + +Download the [package](https://scaphandre.s3.fr-par.scw.cloud/x86_64/scaphandre_0.5.0_installer.exe) and install it **as an administrator**. + +### Configuring a Windows service to run Scaphandre in the background + +For example, to run the prometheus-push exporter in the background and target the Prometheus Push Gateway server with ip address `198.51.100.5` using HTTPS on port 443 and a step to send metrics of 45s, without checking the certificate of the push gateway (remove that option if you have a properly signed TLS certificate): + + sc.exe create Scaphandre binPath="C:\Program Files (x86)\scaphandre\scaphandre.exe prometheus-push -H 198.51.100.5 -s 45 -S https -p 443 --no-tls-check" DisplayName=Scaphandre start=auto + +Ensure the service is started in Services.msc, start it by right clicking on it, then Start, otherwise. + +To delete the service, you can do it in Services.msc, or: + + sc.exe delete Scaphandre + +### Using an installer including a development version of the driver + +If you are running a development version of the installer (which probably means a development version of the [driver](https://github.com/hubblo-org/windows-rapl-driver/)), you'll need to enable Test Mode on Windows prior to proceed to this installation, then reboot. + + bcdedit.exe -set TESTSIGNING ON + bcdedit.exe -set nointegritychecks on + +Beware: in this case, activation of test mode **and a reboot** is needed before anyway. + +Once installed, you should be able to run scaphandre from Powershell, by running : + + & 'C:\Program Files (x86)\scaphandre\scaphandre.exe' stdout + +## Troubleshooting + +An error such as + + scaphandre::sensors::msr_rapl: Failed to open device : HANDLE(-1) + +means that the driver is not properly setup. Check it's state by running: + + driverquery /v | findstr capha + +If there is not item returned, the installation of the driver encountered an issue. + +If the service is STOPPED, there is also something wrong. + +## Compilation + +If you look for compiling Scaphandre and its driver yourself, see [Compilation for Windows](compilation-windows.md) \ No newline at end of file diff --git a/docs_src/tutorials/kubernetes.md b/docs_src/tutorials/kubernetes.md index 50dc76af..e82524ee 100644 --- a/docs_src/tutorials/kubernetes.md +++ b/docs_src/tutorials/kubernetes.md @@ -12,6 +12,14 @@ to be installed from the source code. git clone https://github.com/hubblo-org/scaphandre cd scaphandre helm install scaphandre helm/scaphandre +### Parameters +#### Service monitor parameters + +| Name | Description | Value | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created (if not set, default to namespace on which this chart is installed) | `""` | +| `serviceMonitor.interval` | The interval at which metrics should be scraped | `1m` | ## Install Prometheus diff --git a/helm/scaphandre/templates/psp.yaml b/helm/scaphandre/templates/psp.yaml index f7d702d5..269112ee 100644 --- a/helm/scaphandre/templates/psp.yaml +++ b/helm/scaphandre/templates/psp.yaml @@ -1,3 +1,4 @@ +{{- if .Capabilities.APIVersions.Has "policy/v1beta1" }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: @@ -26,3 +27,4 @@ spec: - projected hostPID: true hostIPC: true +{{- end }} diff --git a/helm/scaphandre/templates/rbac.yaml b/helm/scaphandre/templates/rbac.yaml index 73d5003c..b13df11b 100644 --- a/helm/scaphandre/templates/rbac.yaml +++ b/helm/scaphandre/templates/rbac.yaml @@ -20,6 +20,7 @@ metadata: labels: {{- include "labels.common" . | nindent 4 }} rules: +{{- if .Capabilities.APIVersions.Has "policy/v1beta1" }} - apiGroups: - extensions resources: @@ -28,6 +29,7 @@ rules: - {{ .Chart.Name }} verbs: - "use" +{{- end }} - apiGroups: - "" resources: diff --git a/helm/scaphandre/templates/servicemonitor.yaml b/helm/scaphandre/templates/servicemonitor.yaml new file mode 100644 index 00000000..3d343f71 --- /dev/null +++ b/helm/scaphandre/templates/servicemonitor.yaml @@ -0,0 +1,28 @@ +{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "scaphandre.name" . }}-service-monitoring + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: + app.kubernetes.io/name: {{ template "scaphandre.name" . }} +spec: + endpoints: + - path: /metrics + port: metrics + scheme: http + {{- if .Values.serviceMonitor.interval }} + interval: {{ .Values.serviceMonitor.interval }} + {{- end }} + scrapeTimeout: 30s + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "scaphandre.name" . }} +{{- end }} diff --git a/helm/scaphandre/values.yaml b/helm/scaphandre/values.yaml index dec4c7ce..60f47c80 100644 --- a/helm/scaphandre/values.yaml +++ b/helm/scaphandre/values.yaml @@ -6,18 +6,25 @@ port: 8080 resources: limits: - memory: 75Mi + memory: 200Mi requests: cpu: 75m - memory: 50Mi + memory: 100Mi scaphandre: command: prometheus args: {} extraArgs: - containers: true + containers: # rustBacktrace: '1' # Run as root user to get proper permissions userID: 0 groupID: 0 + +serviceMonitor: + # Specifies whether ServiceMonitor for Prometheus operator should be created + enabled: false + interval: 1m + # Specifies namespace, where ServiceMonitor should be installed + # namespace: monitoring diff --git a/oranda.json b/oranda.json index 5953ec40..7579ccd9 100644 --- a/oranda.json +++ b/oranda.json @@ -1,19 +1,37 @@ { + "build": { + "path_prefix": "scaphandre" + }, + "project": { + "repository": "https://github.com/hubblo-org/scaphandre" + }, "styles": { "theme": "light", - "favicon": "https://raw.githubusercontent.com/hubblo-org/scaphandre/dev/docs_src/scaphandre.small.cleaned.png" + "favicon": "docs_src/favicon.ico" }, "marketing": { "social": { + "image": "https://raw.githubusercontent.com/hubblo-org/scaphandre/dev/docs_src/scaphandre.cleaned.png", "image_alt": "Hubblo's twitter/x account", "twitter_account": "@HubbloOrg" } }, "components": { "changelog": true, + "artifacts": { + "auto": false, + "cargo_dist": false, + "package_managers": { + "preferred": { + "crates.io": "cargo install scaphandre", + "DEB package": "dpkg -i scaphandre.X.deb", + "docker": "docker pull hubblo/scaphandre", + "RPM package": "rpm -ivh scaphandre.X.rpm" + } + } + }, "funding": { "preferred_funding": "github" } } } - diff --git a/packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec b/packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec new file mode 100644 index 00000000..f6bc21ee --- /dev/null +++ b/packaging/linux/redhat/rpmbuild/SPECS/scaphandre-prometheuspush-only.spec @@ -0,0 +1,57 @@ +Name: scaphandre-prometheuspush +Version: CHANGEME +Release: 1%{?dist} +Summary: Power usage / Electricity / Energy monitoring agent + +License: Apache-2.0 +URL: https://github.com/hubblo-org/scaphandre +Source0: %{name}-%{version}.tar.gz +#Source0 will be github.com url for tar gz of source + +BuildRequires: rust,cargo,systemd-rpm-macros +#Requires: + +%global debug_package %{nil} + +%description + +%prep +%autosetup + +%build +cargo build --release --no-default-features --features json,prometheuspush + +%pre + +%install +#rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/%{_bindir}/ +cp target/release/scaphandre $RPM_BUILD_ROOT/%{_bindir}/scaphandre-prometheuspush +chmod +x $RPM_BUILD_ROOT/%{_bindir}/scaphandre-prometheuspush +mkdir -p $RPM_BUILD_ROOT/lib/systemd/system +mkdir -p $RPM_BUILD_ROOT/etc/scaphandre +echo 'SCAPHANDRE_ARGS="prometheus-push -H localhost -S http"' > $RPM_BUILD_ROOT/etc/scaphandre/prometheuspush +mkdir -p $RPM_BUILD_ROOT/lib/systemd/system +cp packaging/linux/redhat/scaphandre-prometheuspush.service $RPM_BUILD_ROOT/lib/systemd/system/scaphandre-prometheuspush.service + +%post +%systemd_post scaphandre-prometheuspush.service + +%preun +%systemd_preun scaphandre-prometheuspush.service + +%postun +%systemd_postun_with_restart scaphandre-prometheuspush.service + +%clean +#rm -rf $RPM_BUILD_ROOT + +%files +#%doc README.md +%{_bindir}/scaphandre-prometheuspush +/lib/systemd/system/scaphandre-prometheuspush.service +/etc/scaphandre/prometheuspush + +#%license LICENSE + +%changelog diff --git a/packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec b/packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec new file mode 100644 index 00000000..8f109694 --- /dev/null +++ b/packaging/linux/redhat/rpmbuild/SPECS/scaphandre.spec @@ -0,0 +1,57 @@ +Name: scaphandre +Version: CHANGEME +Release: 1%{?dist} +Summary: Power usage / Electricity / Energy monitoring agent + +License: Apache-2.0 +URL: https://github.com/hubblo-org/scaphandre +Source0: %{name}-%{version}.tar.gz +#Source0 will be github.com url for tar gz of source + +BuildRequires: rust,cargo,openssl-devel,systemd-rpm-macros +#Requires: + +%global debug_package %{nil} + +%description + +%prep +%autosetup + +%build +cargo build --release + +%pre + +%install +#rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/%{_bindir}/ +cp target/release/scaphandre $RPM_BUILD_ROOT/%{_bindir}/ +chmod +x $RPM_BUILD_ROOT/%{_bindir}/scaphandre +mkdir -p $RPM_BUILD_ROOT/lib/systemd/system +mkdir -p $RPM_BUILD_ROOT/etc/scaphandre +echo "SCAPHANDRE_ARGS=prometheus" > $RPM_BUILD_ROOT/etc/scaphandre/default +mkdir -p $RPM_BUILD_ROOT/lib/systemd/system +cp packaging/linux/redhat/scaphandre.service $RPM_BUILD_ROOT/lib/systemd/system/scaphandre.service + +%post +%systemd_post scaphandre.service + +%preun +%systemd_preun scaphandre.service + +%postun +%systemd_postun_with_restart scaphandre.service + +%clean +#rm -rf $RPM_BUILD_ROOT + +%files +#%doc README.md +%{_bindir}/scaphandre +/lib/systemd/system/scaphandre.service +/etc/scaphandre/default + +#%license LICENSE + +%changelog \ No newline at end of file diff --git a/packaging/linux/redhat/scaphandre-prometheuspush.service b/packaging/linux/redhat/scaphandre-prometheuspush.service new file mode 100644 index 00000000..533fc91f --- /dev/null +++ b/packaging/linux/redhat/scaphandre-prometheuspush.service @@ -0,0 +1,55 @@ +[Unit] +Description=Scaphandre (prometheus-push exporter) +Wants=network.target +After=network.target + +[Service] +# systemctl edit and add these if you want to further limit access +#IPAddressAllow=localhost +#IPAddressDeny=any + +ExecStartPre=-+/usr/sbin/modprobe intel_rapl_common +ExecStartPre=+/usr/bin/find /sys/devices/virtual/powercap -name energy_uj -exec chmod g+r -R {} + -exec chown root:powercap {} + +ExecStart=/usr/bin/scaphandre-prometheuspush $SCAPHANDRE_ARGS +EnvironmentFile=/etc/scaphandre/prometheuspush + +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +DevicePolicy=closed +DynamicUser=yes +Group=powercap +IPAccounting=yes +LockPersonality=yes +MemoryDenyWriteExecute=yes +MemoryMax=100M +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +PrivateUsers=yes +#ProtectClock=yes +ProtectControlGroups=yes +ProtectHome=yes +#ProtectHostname=yes +#ProtectKernelLogs=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +RestrictAddressFamilies=AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +SyslogIdentifier=scaphandre +SystemCallFilter=~@cpu-emulation +SystemCallFilter=~@debug +SystemCallFilter=~@keyring +SystemCallFilter=~@module +SystemCallFilter=~@mount +SystemCallFilter=~@obsolete +SystemCallFilter=~@privileged +SystemCallFilter=~@raw-io +SystemCallFilter=~@reboot +SystemCallFilter=~@resources +SystemCallFilter=~@swap +UMask=0777 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/packaging/linux/redhat/scaphandre.service b/packaging/linux/redhat/scaphandre.service new file mode 100644 index 00000000..17f7e887 --- /dev/null +++ b/packaging/linux/redhat/scaphandre.service @@ -0,0 +1,55 @@ +[Unit] +Description=Scaphandre +Wants=network.target +After=network.target + +[Service] +# systemctl edit and add these if you want to further limit access +#IPAddressAllow=localhost +#IPAddressDeny=any + +ExecStartPre=-+/usr/sbin/modprobe intel_rapl_common +ExecStartPre=+/usr/bin/find /sys/devices/virtual/powercap -name energy_uj -exec chmod g+r -R {} + -exec chown root:powercap {} + +ExecStart=/usr/bin/scaphandre $SCAPHANDRE_ARGS +EnvironmentFile=/etc/scaphandre/default + +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +DevicePolicy=closed +DynamicUser=yes +Group=powercap +IPAccounting=yes +LockPersonality=yes +MemoryDenyWriteExecute=yes +MemoryMax=100M +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +PrivateUsers=yes +ProtectClock=yes +ProtectControlGroups=yes +ProtectHome=yes +ProtectHostname=yes +ProtectKernelLogs=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +RestrictAddressFamilies=AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +SyslogIdentifier=scaphandre +SystemCallFilter=~@cpu-emulation +SystemCallFilter=~@debug +SystemCallFilter=~@keyring +SystemCallFilter=~@module +SystemCallFilter=~@mount +SystemCallFilter=~@obsolete +SystemCallFilter=~@privileged +SystemCallFilter=~@raw-io +SystemCallFilter=~@reboot +SystemCallFilter=~@resources +SystemCallFilter=~@swap +UMask=0777 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/packaging/linux/ubuntu/scaphandre.service b/packaging/linux/ubuntu/scaphandre.service new file mode 100644 index 00000000..bd03faeb --- /dev/null +++ b/packaging/linux/ubuntu/scaphandre.service @@ -0,0 +1,55 @@ +[Unit] +Description=Scaphandre +Wants=network.target +After=network.target + +[Service] +# systemctl edit and add these if you want to further limit access +#IPAddressAllow=localhost +#IPAddressDeny=any + +ExecStartPre=-+/usr/sbin/modprobe intel_rapl_common +ExecStartPre=+/usr/bin/find /sys/devices/virtual/powercap -name energy_uj -exec chmod g+r -R {} + -exec chown root:powercap {} + +ExecStart=/usr/local/bin/scaphandre prometheus -p 8080 +EnvironmentFile=/etc/scaphandre + +CapabilityBoundingSet=CAP_NET_BIND_SERVICE +DevicePolicy=closed +DynamicUser=yes +Group=powercap +IPAccounting=yes +LockPersonality=yes +MemoryDenyWriteExecute=yes +MemoryMax=100M +NoNewPrivileges=yes +PrivateDevices=yes +PrivateTmp=yes +PrivateUsers=yes +ProtectClock=yes +ProtectControlGroups=yes +ProtectHome=yes +ProtectHostname=yes +ProtectKernelLogs=yes +ProtectKernelModules=yes +ProtectKernelTunables=yes +ProtectSystem=strict +RestrictAddressFamilies=AF_INET AF_INET6 +RestrictNamespaces=yes +RestrictRealtime=yes +RestrictSUIDSGID=yes +SyslogIdentifier=scaphandre +SystemCallFilter=~@cpu-emulation +SystemCallFilter=~@debug +SystemCallFilter=~@keyring +SystemCallFilter=~@module +SystemCallFilter=~@mount +SystemCallFilter=~@obsolete +SystemCallFilter=~@privileged +SystemCallFilter=~@raw-io +SystemCallFilter=~@reboot +SystemCallFilter=~@resources +SystemCallFilter=~@swap +UMask=0777 + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/packaging/windows/dev_installer.iss b/packaging/windows/dev_installer.iss new file mode 100644 index 00000000..710174d8 --- /dev/null +++ b/packaging/windows/dev_installer.iss @@ -0,0 +1,75 @@ + ; Script generated by the Inno Setup Script Wizard. +; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! + +#define MyAppName "scaphandre" +#define MyAppVersion "0.5.0" +#define MyAppPublisher "Hubblo" +#define MyAppURL "https://hubblo-org.github.io/scaphandre-documentation" +#define MyAppExeName "scaphandre.exe" +#define MyAppSourceFolder "C:\Users\bpeti\Documents\GitHub\scaphandre" +#define RaplDriverSourceFolder "C:\Users\bpeti\Documents\GitHub\windows-rapl-driver" +#define SystemFolder "C:\Windows\System32" +#define System64Folder "C:\Windows\SysWOW64" + +[Setup] +; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications. +; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) +AppId={{7DB7B851-1DD2-4FF5-BFC7-282FEBA3B28D} +AppName={#MyAppName} +AppVersion={#MyAppVersion} +;AppVerName={#MyAppName} {#MyAppVersion} +AppPublisher={#MyAppPublisher} +AppPublisherURL={#MyAppURL} +AppSupportURL={#MyAppURL} +AppUpdatesURL={#MyAppURL} +DefaultDirName={autopf}\{#MyAppName} +DefaultGroupName={#MyAppName} +LicenseFile=C:\Users\bpeti\Documents\GitHub\scaphandre\LICENSE +; Uncomment the following line to run in non administrative install mode (install for current user only.) +;PrivilegesRequired=lowest +OutputBaseFilename={#MyAppName}_{#MyAppVersion}_installer +Compression=lzma +SolidCompression=yes +WizardStyle=modern +Uninstallable=yes +SetupIconFile=C:\Users\bpeti\Documents\GitHub\scaphandre\docs_src\scaphandre.ico + +[Languages] +Name: "english"; MessagesFile: "compiler:Default.isl" + +[Files] +Source: "{#MyAppSourceFolder}\target\release\{#MyAppExeName}"; DestDir: "{app}"; Flags: ignoreversion +Source: "{#RaplDriverSourceFolder}\x64\Release\DriverLoader.exe"; DestDir: "{app}"; Flags: ignoreversion +Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.inf"; DestDir: "{app}"; Flags: ignoreversion +; Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.sys"; DestDir: "{#SystemFolder}"; +; Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.sys"; DestDir: "{#System64Folder}"; +Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.sys"; DestDir: "{app}"; +Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.cat"; DestDir: "{app}"; +; Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.cat"; DestDir: "{#SystemFolder}"; +; Source: "{#RaplDriverSourceFolder}\x64\Release\ScaphandreDrv\ScaphandreDrv.cat"; DestDir: "{#System64Folder}"; +Source: "C:\Program Files (x86)\Windows Kits\10\Tools\10.0.22621.0\x64\devcon.exe"; DestDir: "{app}"; Flags: ignoreversion +Source: "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64\certmgr.exe"; DestDir: "{app}"; Flags: ignoreversion +Source: "{#MyAppSourceFolder}\README.md"; DestDir: "{app}"; Flags: ignoreversion +Source: "{#MyAppSourceFolder}\CHANGELOG.md"; DestDir: "{app}"; Flags: ignoreversion +Source: "{#RaplDriverSourceFolder}\ScaphandreDrvTest.cer"; DestDir: "{app}"; Flags: ignoreversion +; NOTE: Don't use "Flags: ignoreversion" on any shared system files + +[Icons] +Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}" + +[Run] +Filename: "C:\windows\System32\WindowsPowershell\v1.0\powershell.exe"; Parameters: "Import-Certificate -FilePath {app}\ScaphandreDrvTest.cer -CertStoreLocation Cert:\LocalMachine\Root"; Description: "Register test certificate"; Flags: waituntilidle shellexec +Filename: "{app}/devcon.exe"; Parameters: "install {app}\ScaphandreDrv.inf root\SCAPHANDREDRV"; Description: "Install Driver"; Flags: waituntilidle +Filename: "{app}/devcon.exe"; Parameters: "enable {app}\ScaphandreDrv.inf root\SCAPHANDREDRV"; Description: "Enable Driver"; Flags: waituntilidle +Filename: "{app}/DriverLoader.exe"; Parameters: "install"; WorkingDir: "{app}"; Description: "Install Driver Service"; +Filename: "{app}/DriverLoader.exe"; Parameters: "start"; WorkingDir: "{app}"; Description: "Start Driver Service"; +; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; +; Filename: "schtasks.exe"; Parameters: "/Create /SC ONSTART {app}\scaphandre.exe prometheus-push " + +[UninstallRun] +Filename: "{app}/DriverLoader.exe"; Parameters: "stop"; WorkingDir: "{app}"; RunOnceId: "StopService"; +Filename: "{app}/DriverLoader.exe"; Parameters: "remove"; WorkingDir: "{app}"; RunOnceId: "RemoveService"; +Filename: "{app}/devcon.exe"; Parameters: "disable ScaphandreDrv"; RunOnceId: "DisableDrier"; +Filename: "{app}/devcon.exe"; Parameters: "remove ScaphandreDrv"; RunOnceId: "RemoveService"; + + diff --git a/packaging/windows/installer.iss b/packaging/windows/installer.iss new file mode 100644 index 00000000..4817f2a9 --- /dev/null +++ b/packaging/windows/installer.iss @@ -0,0 +1,71 @@ + ; Script generated by the Inno Setup Script Wizard. +; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! + +#define MyAppName "scaphandre" +#define MyAppVersion "0.5.0" +#define MyAppPublisher "Hubblo" +#define MyAppURL "https://hubblo-org.github.io/scaphandre-documentation" +#define MyAppExeName "scaphandre.exe" +#define SystemFolder "C:\Windows\System32" +#define System64Folder "C:\Windows\SysWOW64" + +[Setup] +; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications. +; (To generate a new GUID, click Tools | Generate GUID inside the IDE.) +AppId={{7DB7B851-1DD2-4FF5-BFC7-282FEBA3B28D} +AppName={#MyAppName} +AppVersion={#MyAppVersion} +;AppVerName={#MyAppName} {#MyAppVersion} +AppPublisher={#MyAppPublisher} +AppPublisherURL={#MyAppURL} +AppSupportURL={#MyAppURL} +AppUpdatesURL={#MyAppURL} +DefaultDirName={autopf}\{#MyAppName} +DefaultGroupName={#MyAppName} +LicenseFile=../../LICENSE +; Uncomment the following line to run in non administrative install mode (install for current user only.) +;PrivilegesRequired=lowest +OutputBaseFilename={#MyAppName}_installer +Compression=lzma +SolidCompression=yes +WizardStyle=modern +Uninstallable=yes +SetupIconFile=../../docs_src/scaphandre.ico + +[Languages] +Name: "english"; MessagesFile: "compiler:Default.isl" + +[Files] +Source: "../../target/release/{#MyAppExeName}"; DestDir: "{app}"; Flags: ignoreversion +Source: "../../DriverLoader.exe"; DestDir: "{app}"; Flags: ignoreversion +Source: "../../ScaphandreDrv.inf"; DestDir: "{app}"; Flags: ignoreversion +; Source: "../../ScaphandreDrv.sys"; DestDir: "{#SystemFolder}"; +; Source: "../../ScaphandreDrv.sys"; DestDir: "{#System64Folder}"; +Source: "../../ScaphandreDrv.sys"; DestDir: "{app}"; +Source: "../../ScaphandreDrv.cat"; DestDir: "{app}"; +; Source: "../../ScaphandreDrv.cat"; DestDir: "{#SystemFolder}"; +; Source: "../../ScaphandreDrv.cat"; DestDir: "{#System64Folder}"; +Source: "C:\Program Files (x86)\Windows Kits\10\Tools\10.0.22621.0\x64\devcon.exe"; DestDir: "{app}"; Flags: ignoreversion +Source: "C:\Program Files (x86)\Windows Kits\10\bin\10.0.22621.0\x64\certmgr.exe"; DestDir: "{app}"; Flags: ignoreversion +Source: "../../README.md"; DestDir: "{app}"; Flags: ignoreversion +Source: "../../CHANGELOG.md"; DestDir: "{app}"; Flags: ignoreversion +; NOTE: Don't use "Flags: ignoreversion" on any shared system files + +[Icons] +Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}" + +[Run] +Filename: "C:\windows\System32\WindowsPowershell\v1.0\powershell.exe"; Parameters: "Import-Certificate -FilePath {app}\ScaphandreDrvTest.cer -CertStoreLocation Cert:\LocalMachine\Root"; Description: "Register test certificate"; Flags: waituntilidle shellexec +Filename: "{app}/devcon.exe"; Parameters: "install {app}\ScaphandreDrv.inf root\SCAPHANDREDRV"; Description: "Install Driver"; Flags: waituntilidle +Filename: "{app}/devcon.exe"; Parameters: "enable {app}\ScaphandreDrv.inf root\SCAPHANDREDRV"; Description: "Enable Driver"; Flags: waituntilidle +Filename: "{app}/DriverLoader.exe"; Parameters: "install"; WorkingDir: "{app}"; Description: "Install Driver Service"; +Filename: "{app}/DriverLoader.exe"; Parameters: "start"; WorkingDir: "{app}"; Description: "Start Driver Service"; +; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; + +[UninstallRun] +Filename: "{app}/DriverLoader.exe"; Parameters: "stop"; WorkingDir: "{app}"; RunOnceId: "StopService"; +Filename: "{app}/DriverLoader.exe"; Parameters: "remove"; WorkingDir: "{app}"; RunOnceId: "RemoveService"; +Filename: "{app}/devcon.exe"; Parameters: "disable ScaphandreDrv"; RunOnceId: "DisableDrier"; +Filename: "{app}/devcon.exe"; Parameters: "remove ScaphandreDrv"; RunOnceId: "RemoveService"; + + diff --git a/packaging/windows/register_log_source.ps1 b/packaging/windows/register_log_source.ps1 new file mode 100644 index 00000000..e6d7283b --- /dev/null +++ b/packaging/windows/register_log_source.ps1 @@ -0,0 +1,40 @@ +# https://github.com/dansmith +# +$source = "scaphandre" + + +$wid=[System.Security.Principal.WindowsIdentity]::GetCurrent() +$prp=new-object System.Security.Principal.WindowsPrincipal($wid) +$adm=[System.Security.Principal.WindowsBuiltInRole]::Administrator +$IsAdmin=$prp.IsInRole($adm) + +if($IsAdmin -eq $false) +{ + [System.Reflection.Assembly]::LoadWithPartialName(“System.Windows.Forms”) + [Windows.Forms.MessageBox]::Show(“Please run this as an Administrator”, + “Not Administrator”, + [Windows.Forms.MessageBoxButtons]::OK, + [Windows.Forms.MessageBoxIcon]::Information) + exit +} + + +if ([System.Diagnostics.EventLog]::SourceExists($source) -eq $false) +{ + [System.Diagnostics.EventLog]::CreateEventSource($source, "Application") + + [System.Reflection.Assembly]::LoadWithPartialName(“System.Windows.Forms”) + [Windows.Forms.MessageBox]::Show(“Event log created successfully”, + “Complete”, + [Windows.Forms.MessageBoxButtons]::OK, + [Windows.Forms.MessageBoxIcon]::Information) +} +else +{ + [System.Reflection.Assembly]::LoadWithPartialName(“System.Windows.Forms”) + [Windows.Forms.MessageBox]::Show(“Event log already exists”, + “Complete”, + [Windows.Forms.MessageBoxButtons]::OK, + [Windows.Forms.MessageBoxIcon]::Information) + +} \ No newline at end of file diff --git a/python/.gitignore b/python/.gitignore deleted file mode 100644 index f2914074..00000000 --- a/python/.gitignore +++ /dev/null @@ -1,18 +0,0 @@ -# venv -venv - -# Byte-compiled / optimized / DLL files -__pycache__/ -/target - -# Unit test / coverage reports -.coverage -.pytest_cache/ - -# mypy -.mypy_cache/ - -# sphinx build directory -docs/build - -*.so diff --git a/python/Cargo.lock b/python/Cargo.lock deleted file mode 100644 index 8a5247a0..00000000 --- a/python/Cargo.lock +++ /dev/null @@ -1,1938 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aho-corasick" -version = "0.7.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "async-channel" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" - -[[package]] -name = "base-x" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bumpalo" -version = "3.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cache-padded" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" - -[[package]] -name = "castaway" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" - -[[package]] -name = "cc" -version = "1.0.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "serde", - "time 0.1.44", - "winapi", -] - -[[package]] -name = "clap" -version = "2.34.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" -dependencies = [ - "ansi_term", - "atty", - "bitflags", - "strsim 0.8.0", - "textwrap", - "unicode-width", - "vec_map", -] - -[[package]] -name = "colored" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd" -dependencies = [ - "atty", - "lazy_static", - "winapi", -] - -[[package]] -name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - -[[package]] -name = "const_fn" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" - -[[package]] -name = "crc32fast" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" -dependencies = [ - "cfg-if", - "lazy_static", -] - -[[package]] -name = "curl" -version = "0.4.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d855aeef205b43f65a5001e0997d81f8efca7badad4fad7d897aa7f0d0651f" -dependencies = [ - "curl-sys", - "libc", - "openssl-probe", - "openssl-sys", - "schannel", - "socket2", - "winapi", -] - -[[package]] -name = "curl-sys" -version = "0.4.55+curl-7.83.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23734ec77368ec583c2e61dd3f0b0e5c98b93abe6d2a004ca06b91dd7e3e2762" -dependencies = [ - "cc", - "libc", - "libnghttp2-sys", - "libz-sys", - "openssl-sys", - "pkg-config", - "vcpkg", - "winapi", -] - -[[package]] -name = "dirs" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30baa043103c9d0c2a57cf537cc2f35623889dc0d405e6c3cccfadbc81c71309" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "docker-sync" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c989c4ad66535edd02443e7d7699d3ab530df4523f12f6aeb7888bdc5ab7c32" -dependencies = [ - "http", - "isahc", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "docopt" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f3f119846c823f9eafcf953a8f6ffb6ed69bf6240883261a7f13b634579a51f" -dependencies = [ - "lazy_static", - "regex", - "serde", - "strsim 0.10.0", -] - -[[package]] -name = "encoding_rs" -version = "0.8.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "env_logger" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "event-listener" -version = "2.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77f3309417938f28bf8228fcff79a4a37103981e3e186d2ccd19c74b38f4eb71" - -[[package]] -name = "fastrand" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" -dependencies = [ - "instant", -] - -[[package]] -name = "flate2" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" -dependencies = [ - "matches", - "percent-encoding", -] - -[[package]] -name = "futures-channel" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" -dependencies = [ - "futures-core", -] - -[[package]] -name = "futures-core" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" - -[[package]] -name = "futures-io" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" - -[[package]] -name = "futures-lite" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] - -[[package]] -name = "futures-sink" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" - -[[package]] -name = "futures-task" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" - -[[package]] -name = "futures-util" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" -dependencies = [ - "futures-core", - "futures-task", - "pin-project-lite", - "pin-utils", -] - -[[package]] -name = "getrandom" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", -] - -[[package]] -name = "h2" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - -[[package]] -name = "http" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" - -[[package]] -name = "httpdate" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.14.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "idna" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" -dependencies = [ - "autocfg", - "hashbrown", -] - -[[package]] -name = "indoc" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a0bd019339e5d968b37855180087b7b9d512c5046fbd244cf8c95687927d6e" - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "isahc" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "334e04b4d781f436dc315cb1e7515bd96826426345d498149e4bde36b67f8ee9" -dependencies = [ - "async-channel", - "castaway", - "crossbeam-utils", - "curl", - "curl-sys", - "encoding_rs", - "event-listener", - "futures-lite", - "http", - "log", - "mime", - "once_cell", - "polling", - "serde", - "serde_json", - "slab", - "sluice", - "tracing", - "tracing-futures", - "url", - "waker-fn", -] - -[[package]] -name = "itoa" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" - -[[package]] -name = "js-sys" -version = "0.3.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "k8s-openapi" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f8de9873b904e74b3533f77493731ee26742418077503683db44e1b3c54aa5c" -dependencies = [ - "base64", - "bytes", - "chrono", - "http", - "percent-encoding", - "serde", - "serde-value", - "serde_json", - "url", -] - -[[package]] -name = "k8s-sync" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3c2f8f5cb2611742f8ceb73f23451690ff0d930149eac45fcb63ca86fbd443" -dependencies = [ - "base64", - "chrono", - "dirs", - "http", - "isahc", - "k8s-openapi", - "openssl", - "serde", - "serde_yaml", - "tempfile", - "url", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.126" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" - -[[package]] -name = "libnghttp2-sys" -version = "0.1.7+1.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ed28aba195b38d5ff02b9170cbff627e336a20925e43b4945390401c5dc93f" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "libz-sys" -version = "1.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e7e15d7610cce1d9752e137625f14e61a28cd45929b6e12e47b50fe154ee2e" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "linked-hash-map" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" - -[[package]] -name = "lock_api" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "loggerv" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60d8de15ae71e760bce7f05447f85f73624fe0d3b1e4c5a63ba5d4cb0748d374" -dependencies = [ - "ansi_term", - "atty", - "log", -] - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] -name = "matches" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" - -[[package]] -name = "memchr" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" - -[[package]] -name = "miniz_oxide" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" -dependencies = [ - "libc", - "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", -] - -[[package]] -name = "num-integer" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "once_cell" -version = "1.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b10983b38c53aebdf33f542c6275b0f58a238129d00c4ae0e6fb59738d783ca" - -[[package]] -name = "openssl" -version = "0.10.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb81a6430ac911acb25fe5ac8f1d2af1b4ea8a4fdfda0f1ee4292af2e2d8eb0e" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5fd19fb3e0a8191c1e34935718976a3e70c112ab9a24af6d7cadccd9d90bc0" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "ordered-float" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" -dependencies = [ - "num-traits", -] - -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - -[[package]] -name = "parking_lot" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-sys", -] - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "pin-project" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" - -[[package]] -name = "polling" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" -dependencies = [ - "cfg-if", - "libc", - "log", - "wepoll-ffi", - "winapi", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro2" -version = "1.0.39" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "procfs" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0941606b9934e2d98a3677759a971756eb821f75764d0e0d26946d08e74d9104" -dependencies = [ - "bitflags", - "byteorder", - "chrono", - "flate2", - "hex", - "lazy_static", - "libc", -] - -[[package]] -name = "protobuf" -version = "2.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - -[[package]] -name = "pyo3" -version = "0.16.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6302e85060011447471887705bb7838f14aba43fcb06957d823739a496b3dc" -dependencies = [ - "cfg-if", - "indoc", - "libc", - "parking_lot", - "pyo3-build-config", - "pyo3-ffi", - "pyo3-macros", - "unindent", -] - -[[package]] -name = "pyo3-build-config" -version = "0.16.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b65b546c35d8a3b1b2f0ddbac7c6a569d759f357f2b9df884f5d6b719152c8" -dependencies = [ - "once_cell", - "target-lexicon", -] - -[[package]] -name = "pyo3-ffi" -version = "0.16.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c275a07127c1aca33031a563e384ffdd485aee34ef131116fcd58e3430d1742b" -dependencies = [ - "libc", - "pyo3-build-config", -] - -[[package]] -name = "pyo3-macros" -version = "0.16.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284fc4485bfbcc9850a6d661d627783f18d19c2ab55880b021671c4ba83e90f7" -dependencies = [ - "proc-macro2", - "pyo3-macros-backend", - "quote", - "syn", -] - -[[package]] -name = "pyo3-macros-backend" -version = "0.16.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53bda0f58f73f5c5429693c96ed57f7abdb38fdfc28ae06da4101a257adb7faf" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "quote" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "redox_syscall" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" -dependencies = [ - "bitflags", -] - -[[package]] -name = "redox_users" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" -dependencies = [ - "getrandom", - "redox_syscall", - "thiserror", -] - -[[package]] -name = "regex" -version = "1.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - -[[package]] -name = "riemann_client" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1005d55a9a8cb53f6ab2380792031394fff549b020cde16cecbc0978df9e7242" -dependencies = [ - "docopt", - "libc", - "protobuf", - "rustls", - "rustls-pemfile", - "serde", - "webpki", - "webpki-roots", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi", -] - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64", -] - -[[package]] -name = "ryu" -version = "1.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" - -[[package]] -name = "scaphandre" -version = "0.4.1" -dependencies = [ - "chrono", - "clap", - "colored", - "docker-sync", - "hostname", - "hyper", - "k8s-sync", - "log", - "loggerv", - "procfs", - "protobuf", - "regex", - "riemann_client", - "serde", - "serde_json", - "time 0.2.27", - "tokio", - "warp10", -] - -[[package]] -name = "scaphandre-python" -version = "0.1.0" -dependencies = [ - "env_logger", - "pyo3", - "scaphandre", -] - -[[package]] -name = "schannel" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" -dependencies = [ - "lazy_static", - "windows-sys", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "serde" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-value" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" -dependencies = [ - "ordered-float", - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.137" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_yaml" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" -dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", -] - -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", -] - -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - -[[package]] -name = "signal-hook-registry" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" -dependencies = [ - "libc", -] - -[[package]] -name = "slab" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" - -[[package]] -name = "sluice" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7400c0eff44aa2fcb5e31a5f24ba9716ed90138769e4977a2ba6014ae63eb5" -dependencies = [ - "async-channel", - "futures-core", - "futures-io", -] - -[[package]] -name = "smallvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" - -[[package]] -name = "socket2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "strsim" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" - -[[package]] -name = "syn" -version = "1.0.95" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "target-lexicon" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fa7e55043acb85fca6b3c01485a2eeb6b69c5d21002e273c79e465f43b7ac1" - -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "time" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "tokio" -version = "1.18.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" -dependencies = [ - "bytes", - "libc", - "memchr", - "mio", - "num_cpus", - "once_cell", - "parking_lot", - "pin-project-lite", - "signal-hook-registry", - "socket2", - "tokio-macros", - "winapi", -] - -[[package]] -name = "tokio-macros" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tokio-util" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", - "tracing", -] - -[[package]] -name = "tower-service" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" - -[[package]] -name = "tracing" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" -dependencies = [ - "cfg-if", - "log", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - -[[package]] -name = "try-lock" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "unicode-bidi" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" - -[[package]] -name = "unicode-ident" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" - -[[package]] -name = "unicode-normalization" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-width" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" - -[[package]] -name = "unindent" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52fee519a3e570f7df377a06a1a7775cdbfb7aa460be7e08de2b1f0e69973a44" - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "url" -version = "2.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" -dependencies = [ - "form_urlencoded", - "idna", - "matches", - "percent-encoding", -] - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "warp10" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "140e989c5e92da4e09581133f4de7df32d1ce7de6b7a077652bdfa3f9aef97bf" -dependencies = [ - "isahc", - "percent-encoding", - "serde", - "serde_json", - "time 0.2.27", - "url", -] - -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" - -[[package]] -name = "web-sys" -version = "0.3.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" -dependencies = [ - "webpki", -] - -[[package]] -name = "wepoll-ffi" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb" -dependencies = [ - "cc", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] diff --git a/python/Cargo.toml b/python/Cargo.toml deleted file mode 100644 index 2fb3fd55..00000000 --- a/python/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "scaphandre-python" -version = "0.1.0" -authors = ["fvaleye@github.com"] -homepage = "https://hubblo-org.github.io/scaphandre-documentation" -license = "Apache-2.0" -description = "Electrical power consumption measurement agent." -readme = "README.md" -edition = "2021" -keywords = ["energy", "sustainability", "measure", "virtual-machine", "energy-monitor", "electricity", "virtual-machines", "energy-consumption", "electricity-consumption", "energy-efficiency", "carbon-footprint"] - -[lib] -name = "scaphandre" -crate-type = ["cdylib"] - -[dependencies] -env_logger = "0" - -[dependencies.pyo3] -version = "0.16" -features = ["extension-module", "abi3", "abi3-py37"] - -[dependencies.scaphandre] -path = "../" -version = "0" \ No newline at end of file diff --git a/python/LICENSE.txt b/python/LICENSE.txt deleted file mode 100644 index 261eeb9e..00000000 --- a/python/LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/python/Makefile b/python/Makefile deleted file mode 100644 index 3b805cb3..00000000 --- a/python/Makefile +++ /dev/null @@ -1,78 +0,0 @@ -.DEFAULT_GOAL := help - -VENV := venv -MATURIN_VERSION := $(shell awk -F '[ ="]+' '$$1 == "requires" { print $$4 }' pyproject.toml) -PACKAGE_VERSION := $(shell cargo pkgid | cut -d\# -f2 | cut -d: -f2) - -.PHONY: setup-venv -setup-venv: ## Setup the virtualenv - $(info --- Setup virtualenv ---) - python -m venv $(VENV) - -.PHONY: setup -setup: ## Setup the requirements - $(info --- Setup dependencies ---) - pip install maturin==$(MATURIN_VERSION) - -.PHONY: build -build: setup ## Build Python binding of scaphandre - $(info --- Build Python binding ---) - maturin build $(MATURIN_EXTRA_ARGS) - -.PHONY: develop -develop: setup ## Install Python binding of scaphandre - $(info --- Develop with Python binding ---) - maturin develop --extras=devel $(MATURIN_EXTRA_ARGS) - -.PHONY: install -install: build ## Install Python binding of scaphandre - $(info --- Uninstall Python binding ---) - pip uninstall -y scaphandre - $(info --- Install Python binding ---) - $(eval TARGET_WHEEL := $(shell ls ../target/wheels/scaphandre-${PACKAGE_VERSION}-*.whl)) - pip install $(TARGET_WHEEL)[devel] - -.PHONY: format -format: ## Format the code - $(info --- Rust format ---) - cargo fmt - $(info --- Python format ---) - black . - isort . - -.PHONY: check-rust -check-rust: ## Run check on Rust - $(info --- Check Rust clippy ---) - cargo clippy - $(info --- Check Rust format ---) - cargo fmt -- --check - -.PHONY: check-python -check-python: ## Run check on Python - $(info Check Python isort) - isort --diff --check-only . - $(info Check Python black) - black --check . - $(info Check Python mypy) - mypy - -.PHONY: unit-test -unit-test: ## Run unit test - $(info --- Run Python unit-test ---) - python -m pytest - -.PHONY: build-documentation -build-documentation: ## Build documentation with Sphinx - $(info --- Run build of the Sphinx documentation ---) - sphinx-build -Wn -b html -d ./docs/build/doctrees ./docs/source ./docs/build/html - -.PHONY: clean -clean: ## Run clean - $(warning --- Clean virtualenv and target directory ---) - cargo clean - rm -rf $(VENV) - find . -type f -name '*.pyc' -delete - -.PHONY: help -help: - @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/python/README.md b/python/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/python/docs/Makefile b/python/docs/Makefile deleted file mode 100644 index d0c3cbf1..00000000 --- a/python/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = source -BUILDDIR = build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/python/docs/make.bat b/python/docs/make.bat deleted file mode 100644 index 747ffb7b..00000000 --- a/python/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.https://www.sphinx-doc.org/ - exit /b 1 -) - -if "%1" == "" goto help - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/python/docs/source/_static/.gitignore b/python/docs/source/_static/.gitignore deleted file mode 100644 index 86d0cb27..00000000 --- a/python/docs/source/_static/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Ignore everything in this directory -* -# Except this file -!.gitignore \ No newline at end of file diff --git a/python/docs/source/api_reference.rst b/python/docs/source/api_reference.rst deleted file mode 100644 index 0b9b37a0..00000000 --- a/python/docs/source/api_reference.rst +++ /dev/null @@ -1,11 +0,0 @@ -API Reference -==================================== - -Scaphandre ----------- - -.. automodule:: scaphandre.Scaphandre - :members: - -.. automodule:: scaphandre.EnergyRecord - :members: \ No newline at end of file diff --git a/python/docs/source/conf.py b/python/docs/source/conf.py deleted file mode 100644 index c3d528a3..00000000 --- a/python/docs/source/conf.py +++ /dev/null @@ -1,80 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -# import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import sys - -import toml - -sys.path.insert(0, os.path.abspath("../scaphandre/")) -sys.path.insert(0, os.path.abspath("./_ext")) - - -def get_release_version() -> str: - """ - Get the release version from the Cargo.toml file - - :return: - """ - cargo_content = toml.load("../../Cargo.toml") - return cargo_content["package"]["version"] - - -# -- Project information ----------------------------------------------------- - -project = "scaphandre" -copyright = "2022, hubblo" -author = "hubblo" - -# The full version, including alpha/beta/rc tags -release = get_release_version() - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx_rtd_theme", - "sphinx.ext.autodoc", - "sphinx.ext.intersphinx", -] -autodoc_typehints = "description" -nitpicky = True - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = "sphinx_rtd_theme" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -page_source_prefix = "python/docs/source" diff --git a/python/docs/source/index.rst b/python/docs/source/index.rst deleted file mode 100644 index 620f3fd9..00000000 --- a/python/docs/source/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. scaphandre documentation master file, created by - sphinx-quickstart on Sun Jul 3 20:27:34 2022. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to scaphandre's documentation! -====================================== - -.. toctree:: - :maxdepth: 2 - - api_reference - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/python/pyproject.toml b/python/pyproject.toml deleted file mode 100644 index 6274c98c..00000000 --- a/python/pyproject.toml +++ /dev/null @@ -1,60 +0,0 @@ -[build-system] -requires = ["maturin==0.12.20"] -build-backend = "maturin" - -[project] -name = "scaphandre" -description = "Electrical power consumption measurement agent." -readme = "README.md" -license = {file = "LICENSE.txt"} -requires-python = ">=3.7" -keywords = ["energy", "sustainability", "measure", "virtual-machine", "energy-monitor", "electricity", "virtual-machines", "energy-consumption", "electricity-consumption", "energy-efficiency", "carbon-footprint"] -classifiers = [ - "Development Status :: 3 - Alpha", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 3 :: Only" -] -dependencies = [] - -[project.optional-dependencies] -devel = [ - "mypy", - "black", - "isort", - "pytest", - "pytest-mock", - "pytest-cov", - "sphinx", - "sphinx-rtd-theme", - "toml", -] - -[project.urls] -documentation = "https://hubblo-org.github.io/scaphandre-documentation" -repository = "https://github.com/hubblo-org/scaphandre" - -[tool.mypy] -files = "scaphandre/*.py" -exclude = "^tests" -mypy_path = "./stubs" -disallow_any_generics = true -disallow_subclassing_any = true -disallow_untyped_calls = true -disallow_untyped_defs = true -disallow_incomplete_defs = true -check_untyped_defs = true -disallow_untyped_decorators = true -no_implicit_optional = true -warn_redundant_casts = true -warn_unused_ignores = true -warn_return_any = false -implicit_reexport = false -strict_equality = true - -[tool.isort] -profile = "black" -src_paths = ["scaphandre", "tests"] - -[tool.black] -include = '\.pyi?$' -exclude = "venv" \ No newline at end of file diff --git a/python/scaphandre/__init__.py b/python/scaphandre/__init__.py deleted file mode 100644 index 5e36334c..00000000 --- a/python/scaphandre/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sensors import * diff --git a/python/scaphandre/sensors.py b/python/scaphandre/sensors.py deleted file mode 100644 index 7bfa4400..00000000 --- a/python/scaphandre/sensors.py +++ /dev/null @@ -1,59 +0,0 @@ -from dataclasses import dataclass - -from .scaphandre import RawScaphandre - - -@dataclass -class EnergyRecord: - """ - Energy record measured by Scaphandre - """ - - timestamp: str - value: str - unit: str - - -@dataclass(init=False) -class Scaphandre: - """ - Scaphandre, a metrology agent dedicated to electrical power consumption metrics. - """ - - sensor_name: str - - def __init__( - self, - is_virtual_machine: bool = False, - buffer_per_socket_max_kbytes: int = 8, - buffer_per_domain_max_kbytes: int = 8, - ): - """ - Init Scaphandre - - :param is_virtual_machine: running on a virtual machine for powercap_rapl sensor - :param buffer_per_socket_max_kbytes: max buffer per socket in kbytes for powercap_rapl sensor - :param buffer_per_domain_max_kbytes: max buffer per domain in kbytes for powercap_rapl sensor - """ - self._scaphandre = RawScaphandre( - buffer_per_socket_max_kbytes, - buffer_per_domain_max_kbytes, - is_virtual_machine, - ) - self.name = self._scaphandre.sensor_name - - def is_compatible(self) -> bool: - """ - Check if Scaphandre has a sensor available and valid depending on the hardware context. - - :return: a sensor is available and valid - """ - return self._scaphandre.is_compatible() - - def get_energy_consumption_measures(self) -> EnergyRecord: - """ - Get the energy records from Scaphandre. - - :return: the energy record measured - """ - return self._scaphandre.get_energy_consumption_measures() diff --git a/python/src/lib.rs b/python/src/lib.rs deleted file mode 100644 index 795f3499..00000000 --- a/python/src/lib.rs +++ /dev/null @@ -1,91 +0,0 @@ -#![deny(warnings)] - -extern crate pyo3; - -use pyo3::create_exception; -use pyo3::exceptions::PyException; -use pyo3::prelude::*; -use scaphandre::sensors; -use scaphandre::sensors::powercap_rapl; -use scaphandre::sensors::units; -use sensors::{powercap_rapl::PowercapRAPLSensor, Sensor}; -use std::error::Error; -use std::time::Duration; - -create_exception!(scaphandre, PyScaphandreError, PyException); - -impl PyScaphandreError { - fn from_error(err: Box) -> pyo3::PyErr { - PyScaphandreError::new_err(err.to_string()) - } -} - -#[pyclass] -struct RawScaphandre { - _scaphandre: powercap_rapl::PowercapRAPLSensor, - #[pyo3(get)] - sensor_name: String, -} - -#[pymethods] -impl RawScaphandre { - #[new] - fn new( - buffer_per_socket_max_kbytes: u16, - buffer_per_domain_max_kbytes: u16, - is_virtual_machine: bool, - ) -> PyResult { - let sensor = PowercapRAPLSensor::new( - buffer_per_socket_max_kbytes, - buffer_per_domain_max_kbytes, - is_virtual_machine, - ); - Ok(RawScaphandre { - _scaphandre: sensor, - sensor_name: "PowercapRAPL".to_string(), - }) - } - - fn is_compatible(&self) -> bool { - matches!(PowercapRAPLSensor::check_module(), Ok(_)) - } - - fn get_energy_consumption_measures(&self) -> PyResult> { - Ok(self - ._scaphandre - .generate_topology() - .map_err(PyScaphandreError::from_error)? - .record_buffer - .iter() - .map(|record| RawEnergyRecord { - _timestamp: record.timestamp, - _value: record.value.clone(), - _unit: record.unit, - }) - .collect()) - } -} - -#[pyclass] -struct RawEnergyRecord { - _timestamp: Duration, - _value: String, - _unit: units::Unit, -} - -#[pyfunction] -fn rust_core_version() -> &'static str { - scaphandre::crate_version() -} - -#[pymodule] -// module name need to match project name -fn scaphandre(py: Python, m: &PyModule) -> PyResult<()> { - env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("warn")).init(); - - m.add_function(pyo3::wrap_pyfunction!(rust_core_version, m)?)?; - m.add_class::()?; - m.add_class::()?; - m.add("PyScaphandreError", py.get_type::())?; - Ok(()) -} diff --git a/python/stubs/scaphandre/__init__.pyi b/python/stubs/scaphandre/__init__.pyi deleted file mode 100644 index c35db584..00000000 --- a/python/stubs/scaphandre/__init__.pyi +++ /dev/null @@ -1,3 +0,0 @@ -from typing import Any - -RawScaphandre: Any diff --git a/python/stubs/scaphandre/scaphandre.pyi b/python/stubs/scaphandre/scaphandre.pyi deleted file mode 100644 index c35db584..00000000 --- a/python/stubs/scaphandre/scaphandre.pyi +++ /dev/null @@ -1,3 +0,0 @@ -from typing import Any - -RawScaphandre: Any diff --git a/python/tests/test_scaphandre.py b/python/tests/test_scaphandre.py deleted file mode 100644 index 16fcfc96..00000000 --- a/python/tests/test_scaphandre.py +++ /dev/null @@ -1,5 +0,0 @@ -from scaphandre import RawScaphandre, Scaphandre - - -def test_scaphandre_should_init_with_the_good_name(): - assert Scaphandre().name == "PowercapRAPL" diff --git a/src/exporters/json.rs b/src/exporters/json.rs index 3fbeef1f..c448cd4f 100644 --- a/src/exporters/json.rs +++ b/src/exporters/json.rs @@ -1,94 +1,86 @@ use crate::exporters::*; use crate::sensors::Sensor; -use clap::Arg; +use regex::Regex; use serde::{Deserialize, Serialize}; -use std::fs; -use std::fs::File; -use std::path::PathBuf; -use std::thread; -use std::time::{Duration, Instant}; - -/// An Exporter that displays power consumption data of the host -/// and its processes on the standard output of the terminal. -pub struct JSONExporter { - sensor: Box, - reports: Vec, +use std::{ + fs::File, + io::{BufWriter, Write}, + path::{Path, PathBuf}, + thread, + time::{Duration, Instant}, +}; + +/// An Exporter that writes power consumption data of the host +/// and its processes in the JSON format, either in a file or +/// to the standard output. +pub struct JsonExporter { + metric_generator: MetricGenerator, + time_step: Duration, + time_limit: Option, + max_top_consumers: u16, + out_writer: BufWriter>, + process_regex: Option, + container_regex: Option, + monitor_resources: bool, + watch_containers: bool, } -impl Exporter for JSONExporter { - /// Lanches runner() - fn run(&mut self, parameters: ArgMatches) { - self.runner(parameters); - } +// Note: clap::Args automatically generate Args for the fields of this struct, +// using the field's name as the argument's name, and the doc comment +// above the field as the argument's description. - /// Returns options needed for that exporter, as a HashMap - - fn get_options() -> Vec> { - let mut options = Vec::new(); - let arg = Arg::with_name("timeout") - .help("Maximum time spent measuring, in seconds.") - .long("timeout") - .short("t") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("step_duration") - .default_value("2") - .help("Set measurement step duration in second.") - .long("step") - .short("s") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("step_duration_nano") - .default_value("0") - .help("Set measurement step duration in nano second.") - .long("step_nano") - .short("n") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("file_path") - .default_value("") - .help("Destination file for the report.") - .long("file") - .short("f") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("max_top_consumers") - .default_value("10") - .help("Maximum number of processes to watch.") - .long("max-top-consumers") - .short("m") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("containers") - .help("Monitor and apply labels for processes running as containers") - .long("containers") - .required(false) - .takes_value(false); - options.push(arg); - - // the resulting labels of this option are not yet used by this exporter, activate this option once we display something interesting about it - //let arg = Arg::with_name("qemu") - // .help("Apply labels to metrics of processes looking like a Qemu/KVM virtual machine") - // .long("qemu") - // .short("q") - // .required(false) - // .takes_value(false); - //options.push(arg); - - options - } +/// Holds the arguments for a JsonExporter. +/// +/// When using Scaphandre as a command-line application, such a struct will be +/// automatically populated by the clap library. If you're using Scaphandre as +/// a library, you should populate the arguments yourself. +#[derive(clap::Args, Debug)] +pub struct ExporterArgs { + /// Maximum time spent measuring, in seconds. + /// If unspecified, runs forever. + #[arg(short, long)] + pub timeout: Option, + + /// Interval between two measurements, in seconds + #[arg(short, long, value_name = "SECONDS", default_value_t = 2)] + pub step: u64, + + /// Additional step duration in _nano_ seconds. + /// This is added to `step` to get the final duration. + #[arg(long, value_name = "NANOSECS", default_value_t = 0)] + pub step_nano: u32, + + /// Maximum number of processes to watch + #[arg(long, default_value_t = 10)] + pub max_top_consumers: u16, + + /// Destination file for the report (if absent, print the report to stdout) + #[arg(short, long)] + pub file: Option, + + /// Monitor and apply labels for processes running as containers + #[arg(long)] + pub containers: bool, + + /// Filter processes based on regular expressions (example: 'scaph\\w\\w.e') + #[arg(long)] + pub process_regex: Option, + + /// Filter containers based on regular expressions + #[arg(long)] + pub container_regex: Option, + + /// Monitor and incude CPU, RAM and Disk usage per process + #[arg(long)] + pub resources: bool, + // TODO uncomment this option once we display something interesting about it + // /// Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + // #[arg(short, long)] + // pub qemu: bool } +// Below are the structures that will store the reports. + #[derive(Serialize, Deserialize)] struct Domain { name: String, @@ -106,21 +98,54 @@ struct Socket { #[derive(Serialize, Deserialize)] struct Consumer { exe: PathBuf, + cmdline: String, pid: i32, + resources_usage: Option, consumption: f32, timestamp: f64, container: Option, } + +#[derive(Serialize, Deserialize)] +struct ResourcesUsage { + cpu_usage: String, + cpu_usage_unit: String, + memory_usage: String, + memory_usage_unit: String, + memory_virtual_usage: String, + memory_virtual_usage_unit: String, + disk_usage_write: String, + disk_usage_write_unit: String, + disk_usage_read: String, + disk_usage_read_unit: String, +} + #[derive(Serialize, Deserialize)] struct Container { + name: String, id: String, runtime: String, scheduler: String, } #[derive(Serialize, Deserialize)] +struct Disk { + disk_type: String, + disk_mount_point: String, + disk_is_removable: bool, + disk_file_system: String, + disk_total_bytes: String, + disk_available_bytes: String, + disk_name: String, +} +#[derive(Serialize, Deserialize)] +struct Components { + disks: Option>, +} +#[derive(Serialize, Deserialize)] struct Host { consumption: f32, timestamp: f64, + components: Components, } #[derive(Serialize, Deserialize)] struct Report { @@ -129,109 +154,241 @@ struct Report { sockets: Vec, } -impl JSONExporter { - /// Instantiates and returns a new JSONExporter - pub fn new(sensor: Box) -> JSONExporter { - JSONExporter { - sensor, - reports: Vec::new(), +impl Exporter for JsonExporter { + /// Runs [iterate()] every `step` until `timeout` + fn run(&mut self) { + let step = self.time_step; + info!("Measurement step is: {step:?}"); + + if let Some(timeout) = self.time_limit { + let t0 = Instant::now(); + while t0.elapsed() <= timeout { + self.iterate(); + thread::sleep(self.time_step); + } + } else { + loop { + self.iterate(); + thread::sleep(self.time_step); + } } } - /// Runs iteration() every 'step', until 'timeout' - pub fn runner(&mut self, parameters: ArgMatches) { - let topology = self.sensor.get_topology().unwrap(); - let mut metric_generator = MetricGenerator::new( - topology, - utils::get_hostname(), - parameters.is_present("qemu"), - parameters.is_present("containers"), - ); + fn kind(&self) -> &str { + "json" + } +} - // We have a default value of 2s so it is safe to unwrap the option - // Panic if a non numerical value is passed - let step_duration: u64 = parameters - .value_of("step_duration") - .unwrap() - .parse() - .expect("Wrong step_duration value, should be a number of seconds"); - let step_duration_nano: u32 = parameters - .value_of("step_duration_nano") - .unwrap() - .parse() - .expect("Wrong step_duration_nano value, should be a number of nano seconds"); - - info!("Measurement step is: {}s", step_duration); - if let Some(timeout) = parameters.value_of("timeout") { - let now = Instant::now(); - - let timeout_secs: u64 = timeout.parse().unwrap(); - while now.elapsed().as_secs() <= timeout_secs { - self.iterate(¶meters, &mut metric_generator); - thread::sleep(Duration::new(step_duration, step_duration_nano)); - } +impl JsonExporter { + /// Instantiates and returns a new JsonExporter. + pub fn new(sensor: &dyn Sensor, args: ExporterArgs) -> JsonExporter { + // Prepare the retrieval of the measurements + let topo = sensor + .get_topology() + .expect("sensor topology should be available"); + let metric_generator = + MetricGenerator::new(topo, utils::get_hostname(), false, args.containers); + + // Extract the parameters we need to run the exporter + let time_step = Duration::new(args.step, args.step_nano); + let time_limit; + if let Some(t) = args.timeout { + time_limit = Some(Duration::from_secs(t.unsigned_abs())) } else { - loop { - self.iterate(¶meters, &mut metric_generator); - thread::sleep(Duration::new(step_duration, step_duration_nano)); + time_limit = None + }; + let max_top_consumers = args.max_top_consumers; + let process_regex = args.process_regex; + let container_regex = args.container_regex; + let monitor_resources = args.resources; + + // Prepare the output (either stdout or a file) + let output: Box = match args.file { + Some(f) => { + let path = Path::new(&f); + Box::new(File::create(path).unwrap_or_else(|_| panic!("failed to open file {f}"))) } + None => Box::new(std::io::stdout()), + }; + let out_writer = BufWriter::new(output); + JsonExporter { + metric_generator, + time_step, + time_limit, + max_top_consumers, + out_writer, + process_regex, + container_regex, + monitor_resources, + watch_containers: args.containers, } } - fn iterate(&mut self, parameters: &ArgMatches, metric_generator: &mut MetricGenerator) { - metric_generator.topology.refresh(); - self.retrieve_metrics(parameters, metric_generator); + fn gen_disks_report(&self, metrics: &Vec<&Metric>) -> Vec { + let mut res: Vec = vec![]; + for m in metrics { + let metric_disk_name = m.attributes.get("disk_name").unwrap(); + if let Some(disk) = res.iter_mut().find(|x| metric_disk_name == &x.disk_name) { + info!("editing disk"); + disk.disk_name = metric_disk_name.clone(); + if m.name == "scaph_host_disk_available_bytes" { + disk.disk_available_bytes = m.metric_value.to_string(); + } else if m.name == "scaph_host_disk_total_bytes" { + disk.disk_total_bytes = m.metric_value.to_string(); + } + } else { + info!("adding disk"); + res.push(Disk { + disk_name: metric_disk_name.clone(), + disk_available_bytes: { + if m.name == "scaph_host_disk_available_bytes" { + m.metric_value.to_string() + } else { + String::from("") + } + }, + disk_file_system: { + if let Some(metric_disk_file_system) = m.attributes.get("disk_file_system") + { + metric_disk_file_system.clone() + } else { + String::from("") + } + }, + disk_is_removable: { + if let Some(metric_disk_is_removable) = + m.attributes.get("disk_is_removable") + { + metric_disk_is_removable.parse::().unwrap() + } else { + false + } + }, + disk_mount_point: { + if let Some(metric_disk_mount_point) = m.attributes.get("disk_mount_point") + { + metric_disk_mount_point.clone() + } else { + String::from("") + } + }, + disk_total_bytes: { + if m.name == "scaph_host_disk_total_bytes" { + m.metric_value.to_string() + } else { + String::from("") + } + }, + disk_type: { + if let Some(metric_disk_type) = m.attributes.get("disk_type") { + metric_disk_type.clone() + } else { + String::from("") + } + }, + }) + } + } + res } - fn retrieve_metrics( - &mut self, - parameters: &ArgMatches, - metric_generator: &mut MetricGenerator, - ) { - metric_generator.gen_all_metrics(); + fn iterate(&mut self) { + self.metric_generator.topology.refresh(); + self.retrieve_metrics(); + } - let metrics = metric_generator.pop_metrics(); + fn retrieve_metrics(&mut self) { + self.metric_generator.gen_all_metrics(); + + let metrics = self.metric_generator.pop_metrics(); let mut metrics_iter = metrics.iter(); + let socket_metrics_res = metrics_iter.find(|x| x.name == "scaph_socket_power_microwatts"); + //TODO: fix for multiple sockets let mut host_report: Option = None; - if let Some(host_metric) = metrics_iter.find(|x| x.name == "scaph_host_power_microwatts") { + let disks = self.gen_disks_report( + &metrics_iter + .filter(|x| x.name.starts_with("scaph_host_disk_")) + .collect(), + ); + if let Some(host_metric) = &metrics + .iter() + .find(|x| x.name == "scaph_host_power_microwatts") + { let host_power_string = format!("{}", host_metric.metric_value); let host_power_f32 = host_power_string.parse::().unwrap(); if host_power_f32 > 0.0 { host_report = Some(Host { consumption: host_power_f32, timestamp: host_metric.timestamp.as_secs_f64(), + components: Components { disks: None }, }); } } else { info!("didn't find host metric"); + // TODO in that case, no report is written, thus I think we should return here (?) }; - let consumers = metric_generator.topology.proc_tracker.get_top_consumers( - parameters - .value_of("max_top_consumers") - .unwrap_or("10") - .parse::() - .unwrap(), - ); - let top_consumers = consumers + if let Some(host) = &mut host_report { + host.components.disks = Some(disks); + } + + let max_top = self.max_top_consumers; + let consumers: Vec<(IProcess, f64)> = if let Some(regex_filter) = &self.process_regex { + debug!("Processes filtered by '{}':", regex_filter.as_str()); + self.metric_generator + .topology + .proc_tracker + .get_filtered_processes(regex_filter) + } else if let Some(regex_filter) = &self.container_regex { + debug!("Processes filtered by '{}':", regex_filter.as_str()); + #[cfg(feature = "containers")] + { + self.metric_generator + .get_processes_filtered_by_container_name(regex_filter) + } + + #[cfg(not(feature = "containers"))] + { + self.metric_generator + .topology + .proc_tracker + .get_top_consumers(max_top) + } + } else { + self.metric_generator + .topology + .proc_tracker + .get_top_consumers(max_top) + }; + + let mut top_consumers = consumers .iter() .filter_map(|(process, _value)| { metrics .iter() .find(|x| { x.name == "scaph_process_power_consumption_microwatts" - && process.pid - == x.attributes.get("pid").unwrap().parse::().unwrap() + && &process.pid.to_string() == x.attributes.get("pid").unwrap() }) .map(|metric| Consumer { exe: PathBuf::from(metric.attributes.get("exe").unwrap()), - pid: process.pid, + cmdline: metric.attributes.get("cmdline").unwrap().clone(), + pid: process.pid.to_string().parse::().unwrap(), consumption: format!("{}", metric.metric_value).parse::().unwrap(), + resources_usage: None, timestamp: metric.timestamp.as_secs_f64(), - container: match parameters.is_present("containers") { - true => metric.attributes.get("container_id").map(|container_id| { - Container { + container: if self.watch_containers { + metric + .attributes + .get("container_id") + .map(|container_id| Container { id: String::from(container_id), + name: String::from( + metric + .attributes + .get("container_names") + .unwrap_or(&String::from("unknown")), + ), runtime: String::from( metric .attributes @@ -244,31 +401,74 @@ impl JSONExporter { .get("container_scheduler") .unwrap_or(&String::from("unknown")), ), - } - }), - false => None, + }) + } else { + None }, }) }) .collect::>(); - let all_sockets = metric_generator + if self.monitor_resources { + for c in top_consumers.iter_mut() { + let mut res = ResourcesUsage { + cpu_usage: String::from("0"), + cpu_usage_unit: String::from("%"), + disk_usage_read: String::from("0"), + disk_usage_read_unit: String::from("Bytes"), + disk_usage_write: String::from("0"), + disk_usage_write_unit: String::from("Bytes"), + memory_usage: String::from("0"), + memory_usage_unit: String::from("Bytes"), + memory_virtual_usage: String::from("0"), + memory_virtual_usage_unit: String::from("Bytes"), + }; + let mut metrics = metrics.iter().filter(|x| { + x.name.starts_with("scaph_process_") + && x.attributes.get("pid").unwrap() == &c.pid.to_string() + }); + if let Some(cpu_usage_metric) = + metrics.find(|y| y.name == "scaph_process_cpu_usage_percentage") + { + res.cpu_usage = cpu_usage_metric.metric_value.to_string(); + } + if let Some(mem_usage_metric) = + metrics.find(|y| y.name == "scaph_process_memory_bytes") + { + res.memory_usage = mem_usage_metric.metric_value.to_string(); + } + if let Some(mem_virtual_usage_metric) = + metrics.find(|y| y.name == "scaph_process_memory_virtual_bytes") + { + res.memory_virtual_usage = mem_virtual_usage_metric.metric_value.to_string(); + } + if let Some(disk_write_metric) = + metrics.find(|y| y.name == "scaph_process_disk_write_bytes") + { + res.disk_usage_write = disk_write_metric.metric_value.to_string(); + } + if let Some(disk_read_metric) = + metrics.find(|y| y.name == "scaph_process_disk_read_bytes") + { + res.disk_usage_read = disk_read_metric.metric_value.to_string(); + } + c.resources_usage = Some(res); + } + } + + let all_sockets = self + .metric_generator .topology .get_sockets_passive() .iter() .filter_map(|socket| { - if let Some(metric) = metrics_iter.find(|x| { - if x.name == "scaph_socket_power_microwatts" { - socket.id - == x.attributes - .get("socket_id") - .unwrap() - .parse::() - .unwrap() - } else { - info!("socket not found ! "); - false - } + if let Some(metric) = socket_metrics_res.iter().find(|x| { + socket.id + == x.attributes + .get("socket_id") + .unwrap() + .parse::() + .unwrap() }) { let socket_power = format!("{}", metric.metric_value).parse::().unwrap(); @@ -284,7 +484,7 @@ impl JSONExporter { == socket.id }) .map(|d| Domain { - name: d.name.clone(), + name: d.attributes.get("domain_name").unwrap().clone(), consumption: format!("{}", d.metric_value).parse::().unwrap(), timestamp: d.timestamp.as_secs_f64(), }) @@ -310,20 +510,9 @@ impl JSONExporter { sockets: all_sockets, }; - let file_path = parameters.value_of("file_path").unwrap(); - // Print json - if file_path.is_empty() { - let json: String = - serde_json::to_string(&report).expect("Unable to parse report"); - println!("{}", &json); - } else { - self.reports.push(report); - // Serialize it to a JSON string. - let json: String = - serde_json::to_string(&self.reports).expect("Unable to parse report"); - let _ = File::create(file_path); - fs::write(file_path, json).expect("Unable to write file"); - } + // Serialize the report to json + serde_json::to_writer(&mut self.out_writer, &report) + .expect("report should be serializable to JSON"); } None => { info!("No data yet, didn't write report."); diff --git a/src/exporters/mod.rs b/src/exporters/mod.rs index 8241a645..5b3d750c 100644 --- a/src/exporters/mod.rs +++ b/src/exporters/mod.rs @@ -2,9 +2,12 @@ //! //! `Exporter` is the root for all exporters. It defines the [Exporter] trait //! needed to implement an exporter. +#[cfg(feature = "json")] pub mod json; #[cfg(feature = "prometheus")] pub mod prometheus; +#[cfg(feature = "prometheuspush")] +pub mod prometheuspush; #[cfg(target_os = "linux")] pub mod qemu; #[cfg(feature = "riemann")] @@ -14,11 +17,10 @@ pub mod utils; #[cfg(feature = "warpten")] pub mod warpten; use crate::sensors::{ - utils::{current_system_time_since_epoch, page_size, IProcess}, + utils::{current_system_time_since_epoch, IProcess}, RecordGenerator, Topology, }; use chrono::Utc; -use clap::ArgMatches; use std::collections::HashMap; use std::fmt; use std::time::Duration; @@ -28,23 +30,28 @@ use { docker_sync::{container::Container, Docker}, k8s_sync::kubernetes::Kubernetes, k8s_sync::Pod, + ordered_float::*, + regex::Regex, utils::{get_docker_client, get_kubernetes_client}, }; /// General metric definition. #[derive(Debug)] -struct Metric { +pub struct Metric { /// `name` is the metric name, it will be used as service field for Riemann. name: String, // Will be used as service for Riemann /// `metric_type` mostly used by Prometheus, define is it is a gauge, counter... metric_type: String, /// `ttl` time to live for this metric used by Riemann. + #[allow(dead_code)] ttl: f32, /// `hostname` host that provides the metric. hostname: String, /// `state` used by Riemann, define a state like Ok or Ko regarding this metric. + #[allow(dead_code)] state: String, /// `tags` used by Riemann, tags attached to the metric. + #[allow(dead_code)] tags: Vec, /// `attributes` used by exporters to better qualify the metric. In Prometheus context /// this is used as a metric tag (socket_id) : `scaph_self_socket_stats_nb{socket_id="0"} 2`. @@ -59,12 +66,11 @@ struct Metric { timestamp: Duration, } -#[derive(Clone)] enum MetricValueType { // IntSigned(i64), // Float(f32), Text(String), - FloatDouble(f64), + //FloatDouble(f64), IntUnsigned(u64), } @@ -74,7 +80,7 @@ impl fmt::Display for MetricValueType { // MetricValueType::IntSigned(value) => write!(f, "{}", value), // MetricValueType::Float(value) => write!(f, "{}", value), MetricValueType::Text(text) => write!(f, "{text}"), - MetricValueType::FloatDouble(value) => write!(f, "{value}"), + //MetricValueType::FloatDouble(value) => write!(f, "{value}"), MetricValueType::IntUnsigned(value) => write!(f, "{value}"), } } @@ -86,7 +92,7 @@ impl fmt::Debug for MetricValueType { // MetricValueType::IntSigned(value) => write!(f, "{}", value), // MetricValueType::Float(value) => write!(f, "{}", value), MetricValueType::Text(text) => write!(f, "{text}"), - MetricValueType::FloatDouble(value) => write!(f, "{value}"), + //MetricValueType::FloatDouble(value) => write!(f, "{value}"), MetricValueType::IntUnsigned(value) => write!(f, "{value}"), } } @@ -101,16 +107,17 @@ impl fmt::Debug for MetricValueType { /// the metrics are generated/refreshed by calling the refresh* methods available /// with the structs provided by the sensor. pub trait Exporter { - /// Entry point for all Exporters - fn run(&mut self, parameters: ArgMatches); - /// Get the options passed via the command line - fn get_options() -> Vec>; + /// Runs the exporter. + fn run(&mut self); + + /// The name of the kind of the exporter, for example "json". + fn kind(&self) -> &str; } /// MetricGenerator is an exporter helper structure to collect Scaphandre metrics. /// The goal is to provide a standard Vec\ that can be used by exporters /// to avoid code duplication. -struct MetricGenerator { +pub struct MetricGenerator { /// `data` will be used to store the metrics retrieved. data: Vec, /// `topology` is the system physical layout retrieve via the sensors crate with @@ -159,7 +166,7 @@ struct MetricGenerator { impl MetricGenerator { /// Returns a MetricGenerator instance that will host metrics. - fn new( + pub fn new( topology: Topology, hostname: String, _qemu: bool, @@ -219,15 +226,56 @@ impl MetricGenerator { topology, hostname, #[cfg(target_os = "linux")] - qemu, + qemu: _qemu, } } + #[cfg(feature = "containers")] + pub fn get_processes_filtered_by_container_name( + &self, + container_regex: &Regex, + ) -> Vec<(IProcess, f64)> { + let mut consumers: Vec<(IProcess, OrderedFloat)> = vec![]; + for p in &self.topology.proc_tracker.procs { + if p.len() > 1 { + let diff = self.topology.proc_tracker.get_cpu_usage_percentage( + p.first().unwrap().process.pid as _, + self.topology.proc_tracker.nb_cores, + ); + let p_record = p.last().unwrap(); + let container_description = self + .topology + .proc_tracker + .get_process_container_description( + p_record.process.pid, + &self.containers, + self.docker_version.clone(), + &self.pods, + ); + if let Some(name) = container_description.get("container_names") { + if container_regex.is_match(name) { + consumers.push((p_record.process.clone(), OrderedFloat(diff as f64))); + consumers.sort_by(|x, y| y.1.cmp(&x.1)); + } + } + //if container_regex.is_match(process_exe.to_str().unwrap_or_default()) { + // consumers.push((p_record.process.clone(), OrderedFloat(diff as f64))); + // consumers.sort_by(|x, y| y.1.cmp(&x.1)); + //} else if container_regex.is_match(&process_cmdline.concat()) { + // consumers.push((p_record.process.clone(), OrderedFloat(diff as f64))); + // consumers.sort_by(|x, y| y.1.cmp(&x.1)); + //} + } + } + let mut result: Vec<(IProcess, f64)> = vec![]; + for (p, f) in consumers { + result.push((p, f.into_inner())); + } + result + } + /// Generate all scaphandre internal metrics. fn gen_self_metrics(&mut self) { - #[cfg(target_os = "linux")] - let myself = IProcess::myself().unwrap(); - #[cfg(target_os = "windows")] let myself = IProcess::myself(self.topology.get_proc_tracker()).unwrap(); let default_timestamp = current_system_time_since_epoch(); @@ -244,10 +292,7 @@ impl MetricGenerator { metric_value: MetricValueType::Text(get_scaphandre_version()), }); - if let Some(metric_value) = self - .topology - .get_process_cpu_consumption_percentage(myself.pid) - { + if let Some(metric_value) = self.topology.get_process_cpu_usage_percentage(myself.pid) { self.data.push(Metric { name: String::from("scaph_self_cpu_usage_percent"), metric_type: String::from("gauge"), @@ -257,17 +302,14 @@ impl MetricGenerator { state: String::from("ok"), tags: vec!["scaphandre".to_string()], attributes: HashMap::new(), - description: String::from("CPU % consumed by scaphandre."), - metric_value: MetricValueType::FloatDouble( - metric_value.value.parse::().unwrap(), - ), + description: format!("CPU time consumed by scaphandre, as {}", metric_value.unit), + metric_value: MetricValueType::Text(metric_value.value), }); } - if let Ok(metric_value) = myself.statm() { - let value = metric_value.size * page_size().unwrap() as u64; + if let Some(metric_value) = self.topology.get_process_memory_virtual_bytes(myself.pid) { self.data.push(Metric { - name: String::from("scaph_self_mem_total_program_size"), + name: String::from("scaph_self_memory_virtual_bytes"), metric_type: String::from("gauge"), ttl: 60.0, timestamp: default_timestamp, @@ -275,13 +317,16 @@ impl MetricGenerator { state: String::from("ok"), tags: vec!["scaphandre".to_string()], attributes: HashMap::new(), - description: String::from("Total program size, measured in bytes."), - metric_value: MetricValueType::IntUnsigned(value), + description: format!("Total program size, measured in {}.", metric_value.unit), + metric_value: MetricValueType::IntUnsigned( + metric_value.value.parse::().unwrap(), + ), }); + } - let value = metric_value.resident * page_size().unwrap() as u64; + if let Some(metric_value) = self.topology.get_process_memory_bytes(myself.pid) { self.data.push(Metric { - name: String::from("scaph_self_mem_resident_set_size"), + name: String::from("scaph_self_memory_bytes"), metric_type: String::from("gauge"), ttl: 60.0, hostname: self.hostname.clone(), @@ -290,23 +335,9 @@ impl MetricGenerator { tags: vec!["scaphandre".to_string()], attributes: HashMap::new(), description: String::from("Resident set size, measured in bytes."), - metric_value: MetricValueType::IntUnsigned(value), - }); - - let value = metric_value.shared * page_size().unwrap() as u64; - self.data.push(Metric { - name: String::from("scaph_self_mem_shared_resident_size"), - metric_type: String::from("gauge"), - ttl: 60.0, - timestamp: default_timestamp, - hostname: self.hostname.clone(), - state: String::from("ok"), - tags: vec!["scaphandre".to_string()], - attributes: HashMap::new(), - description: String::from( - "Number of resident shared bytes (i.e., backed by a file).", + metric_value: MetricValueType::IntUnsigned( + metric_value.value.parse::().unwrap(), ), - metric_value: MetricValueType::IntUnsigned(value), }); } @@ -414,6 +445,23 @@ impl MetricGenerator { if !records.is_empty() { let record = records.last().unwrap(); let host_energy_microjoules = record.value.clone(); + let mut attributes = HashMap::new(); + if self.topology._sensor_data.contains_key("psys") { + attributes.insert( + String::from("value_source"), + String::from("powercap_rapl_psys"), + ); + } else if self.topology._sensor_data.contains_key("source_file") { + attributes.insert( + String::from("value_source"), + String::from("powercap_rapl_pkg"), + ); + } else if self.topology._sensor_data.contains_key("DRIVER_NAME") { + attributes.insert( + String::from("value_source"), + String::from("scaphandredrv_rapl_pkg"), + ); + } self.data.push(Metric { name: String::from("scaph_host_energy_microjoules"), @@ -423,7 +471,7 @@ impl MetricGenerator { hostname: self.hostname.clone(), state: String::from("ok"), tags: vec!["scaphandre".to_string()], - attributes: HashMap::new(), + attributes: attributes.clone(), description: String::from( "Energy measurement for the whole host, as extracted from the sensor, in microjoules.", ), @@ -439,12 +487,149 @@ impl MetricGenerator { hostname: self.hostname.clone(), state: String::from("ok"), tags: vec!["scaphandre".to_string()], - attributes: HashMap::new(), + attributes, description: String::from("Power measurement on the whole host, in microwatts"), metric_value: MetricValueType::Text(power.value), }); } } + if let Some(metric_value) = self.topology.get_load_avg() { + self.data.push(Metric { + name: String::from("scaph_host_load_avg_one"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value[0].timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: HashMap::new(), + description: String::from("Load average on 1 minute."), + metric_value: MetricValueType::Text(metric_value[0].value.clone()), + }); + self.data.push(Metric { + name: String::from("scaph_host_load_avg_five"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value[1].timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: HashMap::new(), + description: String::from("Load average on 5 minutes."), + metric_value: MetricValueType::Text(metric_value[1].value.clone()), + }); + self.data.push(Metric { + name: String::from("scaph_host_load_avg_fifteen"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value[2].timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: HashMap::new(), + description: String::from("Load average on 15 minutes."), + metric_value: MetricValueType::Text(metric_value[2].value.clone()), + }); + } + let freq = self.topology.get_cpu_frequency(); + self.data.push(Metric { + name: String::from("scaph_host_cpu_frequency"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: freq.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: HashMap::new(), + description: format!("Global frequency of all the cpus. In {}", freq.unit), + metric_value: MetricValueType::Text(freq.value), + }); + for (metric_name, metric) in self.topology.get_disks() { + info!("pushing disk metric to data : {}", metric_name); + self.data.push(Metric { + name: metric_name, + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric.2.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: metric.1, + description: metric.0, + metric_value: MetricValueType::Text(metric.2.value), + }); + } + + let ram_attributes = HashMap::new(); + let metric_value = self.topology.get_total_memory_bytes(); + self.data.push(Metric { + name: String::from("scaph_host_memory_total_bytes"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: ram_attributes.clone(), + description: String::from("Random Access Memory installed on the host, in bytes."), + metric_value: MetricValueType::Text(metric_value.value), + }); + let metric_value = self.topology.get_available_memory_bytes(); + self.data.push(Metric { + name: String::from("scaph_host_memory_available_bytes"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: ram_attributes.clone(), + description: String::from( + "Random Access Memory available to be re-used on the host, in bytes.", + ), + metric_value: MetricValueType::Text(metric_value.value), + }); + let metric_value = self.topology.get_free_memory_bytes(); + self.data.push(Metric { + name: String::from("scaph_host_memory_free_bytes"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: ram_attributes.clone(), + description: String::from( + "Random Access Memory free to be used (not reused) on the host, in bytes.", + ), + metric_value: MetricValueType::Text(metric_value.value), + }); + let metric_value = self.topology.get_free_swap_bytes(); + self.data.push(Metric { + name: String::from("scaph_host_swap_free_bytes"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: ram_attributes.clone(), + description: String::from("Swap space free to be used on the host, in bytes."), + metric_value: MetricValueType::Text(metric_value.value), + }); + let metric_value = self.topology.get_total_swap_bytes(); + self.data.push(Metric { + name: String::from("scaph_host_swap_total_bytes"), + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: metric_value.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: ram_attributes, + description: String::from("Total swap space on the host, in bytes."), + metric_value: MetricValueType::Text(metric_value.value), + }); } /// Generate socket metrics. @@ -452,14 +637,13 @@ impl MetricGenerator { let sockets = self.topology.get_sockets_passive(); for socket in sockets { let records = socket.get_records_passive(); + let mut attributes = HashMap::new(); + attributes.insert("socket_id".to_string(), socket.id.to_string()); if !records.is_empty() { let metric = records.last().unwrap(); let metric_value = metric.value.clone(); let metric_timestamp = metric.timestamp; - let mut attributes = HashMap::new(); - attributes.insert("socket_id".to_string(), socket.id.to_string()); - self.data.push(Metric { name: String::from("scaph_socket_energy_microjoules"), metric_type: String::from("counter"), @@ -492,6 +676,23 @@ impl MetricGenerator { }); } } + if let Some(mmio) = socket.get_rapl_mmio_energy_microjoules() { + self.data.push(Metric { + name: String::from("scaph_socket_rapl_mmio_energy_microjoules"), + metric_type: String::from("counter"), + ttl: 60.0, + timestamp: mmio.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: attributes.clone(), + description: format!( + "Energy counter from RAPL mmio interface for Package-0 of CPU socket {}", + socket.id + ), + metric_value: MetricValueType::Text(mmio.value), + }); + } for domain in socket.get_domains_passive() { let records = domain.get_records_passive(); if !records.is_empty() { @@ -536,6 +737,27 @@ impl MetricGenerator { metric_value: MetricValueType::Text(domain_power_microwatts.clone()), }); } + let mut mmio_attributes = attributes.clone(); + mmio_attributes.insert( + String::from("value_source"), + String::from("powercap_rapl_mmio"), + ); + if let Some(mmio) = domain.get_rapl_mmio_energy_microjoules() { + self.data.push(Metric { + name: String::from("scaph_domain_rapl_mmio_energy_microjoules"), + metric_type: String::from("counter"), + ttl: 60.0, + timestamp: mmio.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: mmio_attributes, + description: format!( + "Energy counter from RAPL mmio interface for the {} domain, socket {}.", domain.name, socket.id + ), + metric_value: MetricValueType::Text(mmio.value), + }); + } } } } @@ -620,7 +842,7 @@ impl MetricGenerator { current_system_time_since_epoch().as_secs().to_string(); } } else { - info!("Docker socket is None."); + debug!("Docker socket is None."); } } } @@ -637,7 +859,7 @@ impl MetricGenerator { self.pods = pods_result; debug!("Found {} pods", &self.pods.len()); } else { - info!("Failed getting pods list, despite client seems ok."); + debug!("Failed getting pods list, despite client seems ok."); } } else { debug!("Kubernetes socket is not some."); @@ -648,7 +870,7 @@ impl MetricGenerator { /// Generate process metrics. fn gen_process_metrics(&mut self) { - debug!("In gen_process_metrics."); + trace!("In gen_process_metrics."); #[cfg(feature = "containers")] if self.watch_containers { let now = current_system_time_since_epoch().as_secs().to_string(); @@ -675,7 +897,6 @@ impl MetricGenerator { Ok(events) => { if !events.is_empty() { self.gen_docker_containers_basic_metadata(); - } else { } } Err(err) => debug!("couldn't get docker events - {:?} - {}", err, err), @@ -687,11 +908,11 @@ impl MetricGenerator { if self.watch_kubernetes && self.kubernetes_client.is_some() { if self.pods_last_check.is_empty() { self.gen_kubernetes_pods_basic_metadata(); - info!("First check done on pods."); + debug!("First check done on pods."); } let last_check = self.pods_last_check.clone(); if (now.parse::().unwrap() - last_check.parse::().unwrap()) > 20 { - info!( + debug!( "Just refreshed pod list ! last: {} now: {}, diff: {}", last_check, now, @@ -745,20 +966,21 @@ impl MetricGenerator { } } - let metric_name = String::from("scaph_process_power_consumption_microwatts"); - if let Some(power) = self.topology.get_process_power_consumption_microwatts(pid) { - self.data.push(Metric { - name: metric_name, - metric_type: String::from("gauge"), - ttl: 60.0, - timestamp: power.timestamp, - hostname: self.hostname.clone(), - state: String::from("ok"), - tags: vec!["scaphandre".to_string()], - attributes, - description: String::from("Power consumption due to the process, measured on at the topology level, in microwatts"), - metric_value: MetricValueType::Text(power.value), - }); + if let Some(metrics) = self.topology.get_all_per_process(pid) { + for (k, v) in metrics { + self.data.push(Metric { + name: k, + metric_type: String::from("gauge"), + ttl: 60.0, + timestamp: v.1.timestamp, + hostname: self.hostname.clone(), + state: String::from("ok"), + tags: vec!["scaphandre".to_string()], + attributes: attributes.clone(), + description: v.0, + metric_value: MetricValueType::Text(v.1.value), + }) + } } } } @@ -790,7 +1012,7 @@ impl MetricGenerator { Utc::now().format("%Y-%m-%dT%H:%M:%S") ); self.gen_process_metrics(); - debug!("self_metrics: {:#?}", self.data); + trace!("self_metrics: {:#?}", self.data); } pub fn pop_metrics(&mut self) -> Vec { diff --git a/src/exporters/prometheus.rs b/src/exporters/prometheus.rs index 1cd2db12..29d7cd01 100644 --- a/src/exporters/prometheus.rs +++ b/src/exporters/prometheus.rs @@ -1,136 +1,99 @@ //! # PrometheusExporter //! -//! `PrometheusExporter` implementation, expose metrics to -//! a [Prometheus](https://prometheus.io/) server. -use super::utils::get_hostname; -use crate::current_system_time_since_epoch; +//! The Prometheus Exporter expose metrics to a [Prometheus](https://prometheus.io/) server. +//! This is achieved by exposing an HTTP endpoint, which the Prometheus will +//! [scrape](https://prometheus.io/docs/prometheus/latest/getting_started). + +use super::utils; use crate::exporters::{Exporter, MetricGenerator, MetricValueType}; +use crate::sensors::utils::current_system_time_since_epoch; use crate::sensors::{Sensor, Topology}; use chrono::Utc; -use clap::{Arg, ArgMatches}; use hyper::service::{make_service_fn, service_fn}; use hyper::{Body, Request, Response, Server}; use std::convert::Infallible; -use std::fmt::Write as _; use std::{ collections::HashMap, - net::{IpAddr, SocketAddr}, + fmt::Write, + net::{IpAddr, Ipv4Addr, SocketAddr}, sync::{Arc, Mutex}, time::Duration, }; /// Default ipv4/ipv6 address to expose the service is any -const DEFAULT_IP_ADDRESS: &str = "::"; +const DEFAULT_IP_ADDRESS: IpAddr = IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)); /// Exporter that exposes metrics to an HTTP endpoint /// matching the Prometheus.io metrics format. pub struct PrometheusExporter { - /// Sensor instance that is used to generate the Topology and - /// thus get power consumption metrics. - sensor: Box, + topo: Topology, + hostname: String, + args: ExporterArgs, +} + +/// Hold the arguments for a PrometheusExporter. +#[derive(clap::Args, Debug)] +pub struct ExporterArgs { + /// IP address (v4 or v6) of the metrics endpoint for Prometheus + #[arg(short, long, default_value_t = DEFAULT_IP_ADDRESS)] + pub address: IpAddr, + + /// TCP port of the metrics endpoint for Prometheus + #[arg(short, long, default_value_t = 8080)] + pub port: u16, + + #[arg(short, long, default_value_t = String::from("metrics"))] + pub suffix: String, + + /// Apply labels to metrics of processes that look like a Qemu/KVM virtual machine + #[arg(long)] + pub qemu: bool, + + /// Apply labels to metrics of processes running as containers + #[arg(long)] + pub containers: bool, } impl PrometheusExporter { /// Instantiates PrometheusExporter and returns the instance. - pub fn new(sensor: Box) -> PrometheusExporter { - PrometheusExporter { sensor } + pub fn new(sensor: &dyn Sensor, args: ExporterArgs) -> PrometheusExporter { + // Prepare the retrieval of the measurements, catch most of the errors early + let topo = sensor + .get_topology() + .expect("sensor topology should be available"); + let hostname = utils::get_hostname(); + PrometheusExporter { + topo, + hostname, + args, + } } } impl Exporter for PrometheusExporter { - /// Entry point ot the PrometheusExporter. - /// - /// Runs HTTP server and metrics exposure through the runner function. - fn run(&mut self, parameters: ArgMatches) { + /// Starts an HTTP server to expose the metrics in Prometheus format. + fn run(&mut self) { info!( "{}: Starting Prometheus exporter", Utc::now().format("%Y-%m-%dT%H:%M:%S") ); println!("Press CTRL-C to stop scaphandre"); - - runner( - (*self.sensor.get_topology()).unwrap(), - parameters.value_of("address").unwrap().to_string(), - parameters.value_of("port").unwrap().to_string(), - parameters.value_of("suffix").unwrap().to_string(), - parameters.is_present("qemu"), - parameters.is_present("containers"), - get_hostname(), + let socket_addr = SocketAddr::new(self.args.address, self.args.port); + let metric_generator = MetricGenerator::new( + self.topo.clone(), // improvement possible here: avoid cloning by adding a lifetime param to MetricGenerator + self.hostname.clone(), + self.args.qemu, + self.args.containers, ); + run_server(socket_addr, metric_generator, &self.args.suffix); } - /// Returns options understood by the exporter. - fn get_options() -> Vec> { - let mut options = Vec::new(); - let arg = Arg::with_name("address") - .default_value(DEFAULT_IP_ADDRESS) - .help("ipv6 or ipv4 address to expose the service to") - .long("address") - .short("a") - .required(false) - .takes_value(true); - options.push(arg); - let arg = Arg::with_name("port") - .default_value("8080") - .help("TCP port number to expose the service") - .long("port") - .short("p") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("suffix") - .default_value("metrics") - .help("url suffix to access metrics") - .long("suffix") - .short("s") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("qemu") - .help("Apply labels to metrics of processes looking like a Qemu/KVM virtual machine") - .long("qemu") - .short("q") - .required(false) - .takes_value(false); - options.push(arg); - - let arg = Arg::with_name("containers") - .help("Monitor and apply labels for processes running as containers") - .long("containers") - .required(false) - .takes_value(false); - options.push(arg); - - let arg = Arg::with_name("kubernetes_host") - .help("FQDN of the kubernetes API server") - .long("kubernetes-host") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("kubernetes_scheme") - .help("Protocol used to access kubernetes API server") - .long("kubernetes-scheme") - .default_value("http") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("kubernetes_port") - .help("Kubernetes API server port number") - .long("kubernetes-port") - .default_value("6443") - .required(false) - .takes_value(true); - options.push(arg); - - options + fn kind(&self) -> &str { + "prometheus" } } -/// Contains a mutex holding a Topology object. +/// Contains a mutex holding a MetricGenerator. /// Used to pass the topology data from one http worker to another. struct PowerMetrics { last_request: Mutex, @@ -138,70 +101,36 @@ struct PowerMetrics { } #[tokio::main] -async fn runner( - topology: Topology, - address: String, - port: String, - suffix: String, - qemu: bool, - watch_containers: bool, - hostname: String, +async fn run_server( + socket_addr: SocketAddr, + metric_generator: MetricGenerator, + endpoint_suffix: &str, ) { - if let Ok(addr) = address.parse::() { - if let Ok(port) = port.parse::() { - let socket_addr = SocketAddr::new(addr, port); - - let power_metrics = PowerMetrics { - last_request: Mutex::new(Duration::new(0, 0)), - metric_generator: Mutex::new(MetricGenerator::new( - topology, - hostname.clone(), - qemu, - watch_containers, - )), - }; - let context = Arc::new(power_metrics); - let make_svc = make_service_fn(move |_| { - let ctx = context.clone(); - let sfx = suffix.clone(); - async { - Ok::<_, Infallible>(service_fn(move |req| { - show_metrics(req, ctx.clone(), sfx.clone()) - })) - } - }); - let server = Server::bind(&socket_addr); - let res = server.serve(make_svc); - let (tx, rx) = tokio::sync::oneshot::channel::<()>(); - let graceful = res.with_graceful_shutdown(async { - rx.await.ok(); - }); - - if let Err(e) = graceful.await { - error!("server error: {}", e); - } - let _ = tx.send(()); - } else { - panic!("{} is not a valid TCP port number", port); - } - } else { - panic!("{} is not a valid ip address", address); - } -} - -/// Returns a well formatted Prometheus metric string. -fn format_metric(key: &str, value: &str, labels: Option<&HashMap>) -> String { - let mut result = key.to_string(); - if let Some(labels) = labels { - result.push('{'); - for (k, v) in labels.iter() { - let _ = write!(result, "{}=\"{}\",", k, v.replace('\"', "_")); + let power_metrics = PowerMetrics { + last_request: Mutex::new(Duration::new(0, 0)), + metric_generator: Mutex::new(metric_generator), + }; + let context = Arc::new(power_metrics); + let make_svc = make_service_fn(move |_| { + let ctx = context.clone(); + let sfx = endpoint_suffix.to_string(); + async { + Ok::<_, Infallible>(service_fn(move |req| { + show_metrics(req, ctx.clone(), sfx.clone()) + })) } - result.remove(result.len() - 1); - result.push('}'); + }); + let server = Server::bind(&socket_addr); + let res = server.serve(make_svc); + let (tx, rx) = tokio::sync::oneshot::channel::<()>(); + let graceful = res.with_graceful_shutdown(async { + rx.await.ok(); + }); + + if let Err(e) = graceful.await { + error!("server error: {}", e); } - let _ = writeln!(result, " {value}"); - result + let _ = tx.send(()); } /// Adds lines related to a metric in the body (String) of response. @@ -230,62 +159,75 @@ async fn show_metrics( trace!("{}", req.uri()); let mut body = String::new(); if req.uri().path() == format!("/{}", &suffix) { - trace!("in metrics !"); let now = current_system_time_since_epoch(); - let mut last_request = context.last_request.lock().unwrap(); - let mut metric_generator = context.metric_generator.lock().unwrap(); - if now - (*last_request) > Duration::from_secs(2) { - { - info!( - "{}: Refresh topology", - Utc::now().format("%Y-%m-%dT%H:%M:%S") - ); - metric_generator - .topology - .proc_tracker - .clean_terminated_process_records_vectors(); - metric_generator.topology.refresh(); - } - } - *last_request = now; - - info!("{}: Refresh data", Utc::now().format("%Y-%m-%dT%H:%M:%S")); - - metric_generator.gen_all_metrics(); - - let mut metrics_pushed: Vec = vec![]; - - // Send all data - for msg in metric_generator.pop_metrics() { - let mut attributes: Option<&HashMap> = None; - if !msg.attributes.is_empty() { - attributes = Some(&msg.attributes); + match context.last_request.lock() { + Ok(mut last_request) => { + match context.metric_generator.lock() { + Ok(mut metric_generator) => { + if now - (*last_request) > Duration::from_secs(2) { + { + info!( + "{}: Refresh topology", + Utc::now().format("%Y-%m-%dT%H:%M:%S") + ); + metric_generator + .topology + .proc_tracker + .clean_terminated_process_records_vectors(); + metric_generator.topology.refresh(); + } + } + *last_request = now; + + info!("{}: Refresh data", Utc::now().format("%Y-%m-%dT%H:%M:%S")); + + metric_generator.gen_all_metrics(); + + let mut metrics_pushed: Vec = vec![]; + + // Send all data + for msg in metric_generator.pop_metrics() { + let mut attributes: Option<&HashMap> = None; + if !msg.attributes.is_empty() { + attributes = Some(&msg.attributes); + } + + let value = match msg.metric_value { + // MetricValueType::IntSigned(value) => event.set_metric_sint64(value), + // MetricValueType::Float(value) => event.set_metric_f(value), + //MetricValueType::FloatDouble(value) => value.to_string(), + MetricValueType::IntUnsigned(value) => value.to_string(), + MetricValueType::Text(ref value) => value.to_string(), + }; + + let mut should_i_add_help = true; + + if metrics_pushed.contains(&msg.name) { + should_i_add_help = false; + } else { + metrics_pushed.insert(0, msg.name.clone()); + } + + body = push_metric( + body, + msg.description.clone(), + msg.metric_type.clone(), + msg.name.clone(), + utils::format_prometheus_metric(&msg.name, &value, attributes), + should_i_add_help, + ); + } + } + Err(e) => { + error!("Error while locking metric_generator: {e:?}"); + error!("Error while locking metric_generator: {}", e.to_string()); + } + } } - - let value = match msg.metric_value { - // MetricValueType::IntSigned(value) => event.set_metric_sint64(value), - // MetricValueType::Float(value) => event.set_metric_f(value), - MetricValueType::FloatDouble(value) => value.to_string(), - MetricValueType::IntUnsigned(value) => value.to_string(), - MetricValueType::Text(ref value) => value.to_string(), - }; - - let mut should_i_add_help = true; - - if metrics_pushed.contains(&msg.name) { - should_i_add_help = false; - } else { - metrics_pushed.insert(0, msg.name.clone()); + Err(e) => { + error!("Error in show_metrics : {e:?}"); + error!("Error details : {}", e.to_string()); } - - body = push_metric( - body, - msg.description.clone(), - msg.metric_type.clone(), - msg.name.clone(), - format_metric(&msg.name, &value, attributes), - should_i_add_help, - ); } } else { let _ = write!(body, "Scaphandre's prometheus exporter here. Metrics available on /{suffix}"); diff --git a/src/exporters/prometheuspush.rs b/src/exporters/prometheuspush.rs new file mode 100644 index 00000000..73981e4f --- /dev/null +++ b/src/exporters/prometheuspush.rs @@ -0,0 +1,164 @@ +//! # PrometheusPushExporter +//! +//! `PrometheusPushExporter` implementation, push/send metrics to +//! a [Prometheus](https://prometheus.io/) pushgateway. +//! + +use super::utils::{format_prometheus_metric, get_hostname}; +use crate::exporters::{Exporter, MetricGenerator}; +use crate::sensors::{Sensor, Topology}; +use chrono::Utc; +use isahc::config::SslOption; +use isahc::{prelude::*, Request}; +use std::fmt::Write; +use std::thread; +use std::time::Duration; + +pub struct PrometheusPushExporter { + topo: Topology, + hostname: String, + args: ExporterArgs, +} + +/// Hold the arguments for a PrometheusExporter. +#[derive(clap::Args, Debug)] +pub struct ExporterArgs { + /// IP address (v4 or v6) of the metrics endpoint for Prometheus + #[arg(short = 'H', long = "host", default_value_t = String::from("localhost"))] + pub host: String, + + /// TCP port of the metrics endpoint for Prometheus + #[arg(short, long, default_value_t = 9091)] + pub port: u16, + + #[arg(long, default_value_t = String::from("metrics"))] + pub suffix: String, + + #[arg(short = 'S', long, default_value_t = String::from("http"))] + pub scheme: String, + + #[arg(short, long, default_value_t = 30)] + pub step: u64, + + /// Apply labels to metrics of processes that look like a Qemu/KVM virtual machine + #[arg(long)] + pub qemu: bool, + + /// Apply labels to metrics of processes running as containers + #[arg(long)] + pub containers: bool, + + /// Job name to apply as a label for pushed metrics + #[arg(short, long, default_value_t = String::from("scaphandre"))] + pub job: String, + + /// Don't verify remote TLS certificate (works with --scheme="https") + #[arg(long)] + pub no_tls_check: bool, +} + +impl PrometheusPushExporter { + pub fn new(sensor: &dyn Sensor, args: ExporterArgs) -> PrometheusPushExporter { + let topo = sensor + .get_topology() + .expect("sensor topology should be available"); + let hostname = get_hostname(); + PrometheusPushExporter { + topo, + hostname, + args, + } + } +} + +impl Exporter for PrometheusPushExporter { + fn run(&mut self) { + info!( + "{}: Starting Prometheus Push exporter", + Utc::now().format("%Y-%m-%dT%H:%M:%S") + ); + + let uri = format!( + "{}://{}:{}/{}/job/{}/instance/{}", + self.args.scheme, + self.args.host, + self.args.port, + self.args.suffix, + self.args.job, + self.hostname.clone() + ); + + let mut metric_generator = MetricGenerator::new( + self.topo.clone(), + self.hostname.clone(), + self.args.qemu, + self.args.containers, + ); + + loop { + metric_generator.topology.refresh(); + metric_generator.gen_all_metrics(); + let mut body = String::from(""); + let mut metrics_pushed: Vec = vec![]; + //let mut counter = 0; + for mut m in metric_generator.pop_metrics() { + let mut should_i_add_help = true; + + if metrics_pushed.contains(&m.name) { + should_i_add_help = false; + } else { + metrics_pushed.insert(0, m.name.clone()); + } + + if should_i_add_help { + let _ = write!(body, "# HELP {} {}", m.name, m.description); + let _ = write!(body, "\n# TYPE {} {}\n", m.name, m.metric_type); + } + if !&m.attributes.contains_key("instance") { + m.attributes + .insert(String::from("instance"), m.hostname.clone()); + } + if !&m.attributes.contains_key("hostname") { + m.attributes + .insert(String::from("hostname"), m.hostname.clone()); + } + let attributes = Some(&m.attributes); + + let _ = write!( + body, + "{}", + format_prometheus_metric(&m.name, &m.metric_value.to_string(), attributes) + ); + } + + let pre_request = Request::post(uri.clone()) + .timeout(Duration::from_secs(5)) + .header("Content-Type", "text/plain"); + let final_request = match self.args.no_tls_check { + true => pre_request.ssl_options( + SslOption::DANGER_ACCEPT_INVALID_CERTS + | SslOption::DANGER_ACCEPT_REVOKED_CERTS + | SslOption::DANGER_ACCEPT_INVALID_HOSTS, + ), + false => pre_request, + }; + if let Ok(request) = final_request.body(body) { + match request.send() { + Ok(mut response) => { + debug!("Got {:?}", response); + debug!("Response Text {:?}", response.text()); + } + Err(err) => { + warn!("Got error : {:?}", err) + } + } + } + + thread::sleep(Duration::new(self.args.step, 0)); + } + } + + fn kind(&self) -> &str { + "prometheuspush" + } +} diff --git a/src/exporters/qemu.rs b/src/exporters/qemu.rs index f8e5e8a0..5ed27073 100644 --- a/src/exporters/qemu.rs +++ b/src/exporters/qemu.rs @@ -1,5 +1,6 @@ use crate::exporters::Exporter; -use crate::sensors::{utils::ProcessRecord, Sensor, Topology}; +use crate::sensors::Topology; +use crate::sensors::{utils::ProcessRecord, Sensor}; use std::{fs, io, thread, time}; /// An Exporter that extracts power consumption data of running @@ -9,18 +10,20 @@ use std::{fs, io, thread, time}; /// to collect and deal with their power consumption metrics, the same way /// they would do it if they managed bare metal machines. pub struct QemuExporter { + // We don't need a MetricGenerator for this exporter, because it "justs" + // puts the metrics in files in the same way as the powercap kernel module. topology: Topology, } impl Exporter for QemuExporter { - /// Runs iteration() in a loop. - fn run(&mut self, _parameters: clap::ArgMatches) { + /// Runs [iterate()] in a loop. + fn run(&mut self) { info!("Starting qemu exporter"); let path = "/var/lib/libvirt/scaphandre"; let cleaner_step = 120; let mut timer = time::Duration::from_secs(cleaner_step); loop { - self.iteration(String::from(path)); + self.iterate(String::from(path)); let step = time::Duration::from_secs(5); thread::sleep(step); if timer - step > time::Duration::from_millis(0) { @@ -34,62 +37,60 @@ impl Exporter for QemuExporter { } } - fn get_options() -> Vec> { - Vec::new() + fn kind(&self) -> &str { + "qemu" } } impl QemuExporter { /// Instantiates and returns a new QemuExporter - pub fn new(mut sensor: Box) -> QemuExporter { - let some_topology = *sensor.get_topology(); - QemuExporter { - topology: some_topology.unwrap(), - } + pub fn new(sensor: &dyn Sensor) -> QemuExporter { + let topology = sensor + .get_topology() + .expect("sensor topology should be available"); + QemuExporter { topology } } - /// Performs processing of metrics, using self.topology - pub fn iteration(&mut self, path: String) { + /// Processes the metrics of `self.topology` and exposes them at the given `path`. + pub fn iterate(&mut self, path: String) { trace!("path: {}", path); + self.topology.refresh(); - let topo_uj_diff = self.topology.get_records_diff(); - let topo_stat_diff = self.topology.get_stats_diff(); - if let Some(topo_rec_uj) = topo_uj_diff { - debug!("Got topo uj diff: {:?}", topo_rec_uj); - let proc_tracker = self.topology.get_proc_tracker(); - let processes = proc_tracker.get_alive_processes(); + if let Some(topo_energy) = self.topology.get_records_diff_power_microwatts() { + let processes = self.topology.proc_tracker.get_alive_processes(); let qemu_processes = QemuExporter::filter_qemu_vm_processes(&processes); - debug!( - "Number of filtered qemu processes: {}", - qemu_processes.len() - ); for qp in qemu_processes { - info!("Working on {:?}", qp); if qp.len() > 2 { let last = qp.first().unwrap(); - let previous = qp.get(1).unwrap(); let vm_name = QemuExporter::get_vm_name_from_cmdline( - &last.process.original.cmdline().unwrap(), + &last.process.cmdline(&self.topology.proc_tracker).unwrap(), ); - let time_pdiff = last.total_time_jiffies() - previous.total_time_jiffies(); - if let Some(time_tdiff) = &topo_stat_diff { - let first_domain_path = format!("{path}/{vm_name}/intel-rapl:0:0"); - if fs::read_dir(&first_domain_path).is_err() { - match fs::create_dir_all(&first_domain_path) { - Ok(_) => info!("Created {} folder.", &path), - Err(error) => panic!("Couldn't create {}. Got: {}", &path, error), - } + let first_domain_path = format!("{path}/{vm_name}/intel-rapl:0:0"); + if fs::read_dir(&first_domain_path).is_err() { + match fs::create_dir_all(&first_domain_path) { + Ok(_) => info!("Created {} folder.", &path), + Err(error) => panic!("Couldn't create {}. Got: {}", &path, error), } - let tdiff = time_tdiff.total_time_jiffies(); - trace!("Time_pdiff={} time_tdiff={}", time_pdiff.to_string(), tdiff); - let ratio = time_pdiff / tdiff; - trace!("Ratio is {}", ratio.to_string()); - let uj_to_add = ratio * topo_rec_uj.value.parse::().unwrap(); - trace!("Adding {} uJ", uj_to_add); + } + if let Some(ratio) = self + .topology + .get_process_cpu_usage_percentage(last.process.pid) + { + let uj_to_add = ratio.value.parse::().unwrap() + * topo_energy.value.parse::().unwrap() + / 100.0; let complete_path = format!("{path}/{vm_name}/intel-rapl:0"); - if let Ok(result) = QemuExporter::add_or_create(&complete_path, uj_to_add) { - trace!("{:?}", result); - debug!("Updated {}", complete_path); + match QemuExporter::add_or_create(&complete_path, uj_to_add as u64) { + Ok(result) => { + trace!("{:?}", result); + debug!("Updated {}", complete_path); + } + Err(err) => { + error!( + "Could'nt edit {}. Please check file permissions : {}", + complete_path, err + ); + } } } } @@ -107,7 +108,7 @@ impl QemuExporter { return String::from(splitted.next().unwrap().split(',').next().unwrap()); } } - String::from("") + String::from("") // TODO return Option None instead, and stop at line 76 (it won't work with {path}//intel-rapl) } /// Either creates an energy_uj file (as the ones managed by powercap kernel module) @@ -136,16 +137,19 @@ impl QemuExporter { trace!("Got {} processes to filter.", processes.len()); for vecp in processes.iter() { if !vecp.is_empty() { - if let Some(pr) = vecp.get(0) { - if let Ok(cmdline) = pr.process.original.cmdline() { - if let Some(res) = cmdline.iter().find(|x| x.contains("qemu-system")) { - debug!("Found a process with {}", res); - let mut tmp: Vec = vec![]; - for p in vecp.iter() { - tmp.push(p.clone()); - } - qemu_processes.push(tmp); + if let Some(pr) = vecp.first() { + if let Some(res) = pr + .process + .cmdline + .iter() + .find(|x| x.contains("qemu-system")) + { + debug!("Found a process with {}", res); + let mut tmp: Vec = vec![]; + for p in vecp.iter() { + tmp.push(p.clone()); } + qemu_processes.push(tmp); } } } diff --git a/src/exporters/riemann.rs b/src/exporters/riemann.rs index 5533ef14..7635db04 100644 --- a/src/exporters/riemann.rs +++ b/src/exporters/riemann.rs @@ -1,25 +1,22 @@ //! # RiemannExporter //! -//! `RiemannExporter` implementation, sends metrics to a [Riemann](https://riemann.io/) -//! server. +//! The Riemann exporter sends metrics to a [Riemann](https://riemann.io/) server. + use crate::exporters::utils::get_hostname; use crate::exporters::*; use crate::sensors::Sensor; use chrono::Utc; -use clap::Arg; -use riemann_client::proto::Attribute; -use riemann_client::proto::Event; +use riemann_client::proto::{Attribute, Event}; use riemann_client::Client; use std::collections::HashMap; use std::convert::TryFrom; -use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; /// Riemann server default ipv4/ipv6 address const DEFAULT_IP_ADDRESS: &str = "localhost"; /// Riemann server default port -const DEFAULT_PORT: &str = "5555"; +const DEFAULT_PORT: u16 = 5555; /// RiemannClient is a simple client implementation on top of the /// [rust-riemann_client](https://github.com/borntyping/rust-riemann_client) library. @@ -30,27 +27,6 @@ struct RiemannClient { } impl RiemannClient { - /// Instanciate the Riemann client either with mTLS or using raw TCP. - fn new(parameters: &ArgMatches) -> RiemannClient { - let address = String::from(parameters.value_of("address").unwrap()); - let port = parameters - .value_of("port") - .unwrap() - .parse::() - .expect("Fail parsing port number"); - let client: Client = if parameters.is_present("mtls") { - let cafile = parameters.value_of("cafile").unwrap(); - let certfile = parameters.value_of("certfile").unwrap(); - let keyfile = parameters.value_of("keyfile").unwrap(); - Client::connect_tls(&address, port, cafile, certfile, keyfile) - .expect("Fail to connect to Riemann server using mTLS") - } else { - Client::connect(&(address, port)) - .expect("Fail to connect to Riemann server using raw TCP") - }; - RiemannClient { client } - } - /// Send metrics to the server. fn send_metric(&mut self, metric: &Metric) { let mut event = Event::new(); @@ -82,7 +58,7 @@ impl RiemannClient { match metric.metric_value { // MetricValueType::IntSigned(value) => event.set_metric_sint64(value), // MetricValueType::Float(value) => event.set_metric_f(value), - MetricValueType::FloatDouble(value) => event.set_metric_d(value), + //MetricValueType::FloatDouble(value) => event.set_metric_d(value), MetricValueType::IntUnsigned(value) => event.set_metric_sint64( i64::try_from(value).expect("Metric cannot be converted to signed integer."), ), @@ -104,47 +80,103 @@ impl RiemannClient { } } -/// Exporter sends metrics to a Riemann server. +/// An exporter that sends metrics to a Riemann server. pub struct RiemannExporter { - /// Sensor instance that is used to generate the Topology and - /// thus get power consumption metrics. - sensor: Box, + metric_generator: MetricGenerator, + riemann_client: RiemannClient, + args: ExporterArgs, +} + +/// Contains the options of the Riemann exporter. +#[derive(clap::Args, Debug)] +pub struct ExporterArgs { + /// Address of the Riemann server. If mTLS is used this must be the server's FQDN. + #[arg(short, long, default_value = DEFAULT_IP_ADDRESS)] + pub address: String, + + /// TCP port number of the Riemann server + #[arg(short, long, default_value_t = DEFAULT_PORT)] + pub port: u16, + + /// Duration between each metric dispatch, in seconds + #[arg(short, long, default_value_t = 5)] + pub dispatch_interval: u64, + + /// Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + #[arg(short, long)] + pub qemu: bool, + + /// Monitor and apply labels for processes running as containers + #[arg(long)] + pub containers: bool, + + /// Connect to Riemann using mTLS instead of plain TCP. + #[arg( + long, + requires = "address", + requires = "ca_file", + requires = "cert_file", + requires = "key_file" + )] + pub mtls: bool, + + /// CA certificate file (.pem format) + #[arg(long = "ca", requires = "mtls")] + pub ca_file: Option, + + /// Client certificate file (.pem format) + #[arg(long = "cert", requires = "mtls")] + pub cert_file: Option, + + /// Client RSA key file + #[arg(long = "key", requires = "mtls")] + pub key_file: Option, } impl RiemannExporter { /// Returns a RiemannExporter instance. - pub fn new(sensor: Box) -> RiemannExporter { - RiemannExporter { sensor } + pub fn new(sensor: &dyn Sensor, args: ExporterArgs) -> RiemannExporter { + // Prepare the retrieval of the measurements + let topo = sensor + .get_topology() + .expect("sensor topology should be available"); + let metric_generator = + MetricGenerator::new(topo, utils::get_hostname(), args.qemu, args.containers); + + // Initialize the connection to the Riemann server + let client = if args.mtls { + Client::connect_tls( + &args.address, + args.port, + &args.ca_file.clone().unwrap(), + &args.cert_file.clone().unwrap(), + &args.key_file.clone().unwrap(), + ) + .expect("failed to connect to Riemann using mTLS") + } else { + Client::connect(&(args.address.clone(), args.port)) + .expect("failed to connect to Riemann using raw TCP") + }; + let riemann_client = RiemannClient { client }; + RiemannExporter { + metric_generator, + riemann_client, + args, + } } } impl Exporter for RiemannExporter { /// Entry point of the RiemannExporter. - fn run(&mut self, parameters: ArgMatches) { - let dispatch_duration: u64 = parameters - .value_of("dispatch_duration") - .unwrap() - .parse() - .expect("Wrong dispatch_duration value, should be a number of seconds"); - - let hostname = get_hostname(); - - let mut rclient = RiemannClient::new(¶meters); - + fn run(&mut self) { info!( "{}: Starting Riemann exporter", Utc::now().format("%Y-%m-%dT%H:%M:%S") ); println!("Press CTRL-C to stop scaphandre"); - println!("Measurement step is: {dispatch_duration}s"); - - let topology = self.sensor.get_topology().unwrap(); - let mut metric_generator = MetricGenerator::new( - topology, - hostname, - parameters.is_present("qemu"), - parameters.is_present("containers"), - ); + + let dispatch_interval = Duration::from_secs(self.args.dispatch_interval); + println!("Dispatch interval is {dispatch_interval:?}"); loop { info!( @@ -152,7 +184,7 @@ impl Exporter for RiemannExporter { Utc::now().format("%Y-%m-%dT%H:%M:%S") ); - metric_generator + self.metric_generator .topology .proc_tracker .clean_terminated_process_records_vectors(); @@ -161,17 +193,17 @@ impl Exporter for RiemannExporter { "{}: Refresh topology", Utc::now().format("%Y-%m-%dT%H:%M:%S") ); - metric_generator.topology.refresh(); + self.metric_generator.topology.refresh(); info!("{}: Refresh data", Utc::now().format("%Y-%m-%dT%H:%M:%S")); // Here we need a specific behavior for process metrics, so we call each gen function // and then implement that specific behavior (we don't use gen_all_metrics). - metric_generator.gen_self_metrics(); - metric_generator.gen_host_metrics(); - metric_generator.gen_socket_metrics(); + self.metric_generator.gen_self_metrics(); + self.metric_generator.gen_host_metrics(); + self.metric_generator.gen_socket_metrics(); let mut data = vec![]; - let processes_tracker = &metric_generator.topology.proc_tracker; + let processes_tracker = &self.metric_generator.topology.proc_tracker; for pid in processes_tracker.get_alive_pids() { let exe = processes_tracker.get_process_name(pid); @@ -185,7 +217,7 @@ impl Exporter for RiemannExporter { if let Some(cmdline_str) = cmdline { attributes.insert("cmdline".to_string(), cmdline_str.replace('\"', "\\\"")); - if parameters.is_present("qemu") { + if self.args.qemu { if let Some(vmname) = utils::filter_qemu_cmdline(&cmdline_str) { attributes.insert("vmname".to_string(), vmname); } @@ -198,7 +230,8 @@ impl Exporter for RiemannExporter { "{}_{}_{}", "scaph_process_power_consumption_microwatts", pid, exe ); - if let Some(power) = metric_generator + if let Some(power) = self + .metric_generator .topology .get_process_power_consumption_microwatts(pid) { @@ -218,91 +251,20 @@ impl Exporter for RiemannExporter { } // Send all data info!("{}: Send data", Utc::now().format("%Y-%m-%dT%H:%M:%S")); - for metric in metric_generator.pop_metrics() { - rclient.send_metric(&metric); + for metric in self.metric_generator.pop_metrics() { + self.riemann_client.send_metric(&metric); } for metric in data { - rclient.send_metric(&metric); + self.riemann_client.send_metric(&metric); } - thread::sleep(Duration::new(dispatch_duration, 0)); + // Pause for some time + std::thread::sleep(dispatch_interval); } } - /// Returns options understood by the exporter. - fn get_options() -> Vec> { - let mut options = Vec::new(); - let arg = Arg::with_name("address") - .default_value(DEFAULT_IP_ADDRESS) - .help("Riemann ipv6 or ipv4 address. If mTLS is used then server fqdn must be provided") - .long("address") - .short("a") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("port") - .default_value(DEFAULT_PORT) - .help("Riemann TCP port number") - .long("port") - .short("p") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("dispatch_duration") - .default_value("5") - .help("Duration between metrics dispatch") - .long("dispatch") - .short("d") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("qemu") - .help("Instruct that scaphandre is running on an hypervisor") - .long("qemu") - .short("q") - .required(false) - .takes_value(false); - options.push(arg); - - let arg = Arg::with_name("mtls") - .help("Connect to a Riemann server using mTLS. Parameters address, ca, cert and key must be defined.") - .long("mtls") - .required(false) - .takes_value(false) - .requires_all(&["address","cafile", "certfile", "keyfile"]); - options.push(arg); - - let arg = Arg::with_name("cafile") - .help("CA certificate file (.pem format)") - .long("ca") - .required(false) - .takes_value(true) - .display_order(1000) - .requires("mtls"); - options.push(arg); - - let arg = Arg::with_name("certfile") - .help("Client certificate file (.pem format)") - .long("cert") - .required(false) - .takes_value(true) - .display_order(1001) - .requires("mtls"); - options.push(arg); - - let arg = Arg::with_name("keyfile") - .help("Client RSA key") - .long("key") - .required(false) - .takes_value(true) - .display_order(1001) - .requires("mtls"); - options.push(arg); - - options + fn kind(&self) -> &str { + "riemann" } } diff --git a/src/exporters/stdout.rs b/src/exporters/stdout.rs index c466d6a3..ce50c727 100644 --- a/src/exporters/stdout.rs +++ b/src/exporters/stdout.rs @@ -1,185 +1,135 @@ -use clap::Arg; - use crate::exporters::*; -use crate::sensors::{utils::IProcess, Sensor}; -use colored::*; +use crate::sensors::{utils::current_system_time_since_epoch, utils::IProcess, Sensor}; use regex::Regex; -use std::fmt::Write as _; +use std::fmt::Write; use std::thread; use std::time::{Duration, Instant}; /// An Exporter that displays power consumption data of the host /// and its processes on the standard output of the terminal. pub struct StdoutExporter { - sensor: Box, + metric_generator: MetricGenerator, + args: ExporterArgs, } -impl Exporter for StdoutExporter { - /// Lanches runner() - fn run(&mut self, parameters: ArgMatches) { - self.runner(parameters); - } - - /// Returns options needed for that exporter, as a HashMap - fn get_options() -> Vec> { - let mut options = Vec::new(); - let arg = Arg::with_name("timeout") - .default_value("10") - .help("Maximum time spent measuring, in seconds. 0 means continuous measurement.") - .long("timeout") - .short("t") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("step_duration") - .default_value("2") - .help("Set measurement step duration in second.") - .long("step") - .short("s") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("process_number") - .default_value("5") - .help("Number of processes to display.") - .long("process") - .short("p") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("regex_filter") - .help("Filter processes based on regular expressions (e.g: 'scaph\\w\\wd.e'). This option disable '-p' or '--process' one.") - .long("regex") - .short("r") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("qemu") - .help("Apply labels to metrics of processes looking like a Qemu/KVM virtual machine") - .long("qemu") - .short("q") - .required(false) - .takes_value(false); - options.push(arg); - - options - } +/// Holds the arguments for a StdoutExporter. +/// +/// When using Scaphandre as a command-line application, such a struct will be +/// automatically populated by the clap library. If you're using Scaphandre as +/// a library, you should populate the arguments yourself. +#[derive(clap::Args, Debug)] +// The command group makes `processes` and `regex_filter` exclusive. +#[command(group(clap::ArgGroup::new("disp").args(["processes", "regex_filter"])))] +pub struct ExporterArgs { + /// Maximum time spent measuring, in seconds. + /// If negative, runs forever. + #[arg(short, long, default_value_t = 10)] + pub timeout: i64, + + /// Interval between two measurements, in seconds + #[arg(short, long, value_name = "SECONDS", default_value_t = 2)] + pub step: u64, + + /// Maximum number of processes to display + #[arg(short, long, default_value_t = 5)] + pub processes: u16, + + /// Filter processes based on regular expressions (example: 'scaph\\w\\w.e') + #[arg(short, long)] + pub regex_filter: Option, + + /// Monitor and apply labels for processes running as containers + #[arg(long)] + pub containers: bool, + + /// Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + #[arg(short, long)] + pub qemu: bool, + + /// Display metrics with their names + #[arg(long)] + pub raw_metrics: bool, } -impl StdoutExporter { - /// Instantiates and returns a new StdoutExporter - pub fn new(sensor: Box) -> StdoutExporter { - StdoutExporter { sensor } - } - - /// Runs iteration() every 'step', during until 'timeout' - pub fn runner(&mut self, parameters: ArgMatches) { - // Parse parameters - // All parameters have a default values so it is safe to unwrap them. - // Panic if a non numerical value is passed except for regex_filter. - - let timeout_secs: u64 = parameters - .value_of("timeout") - .unwrap() - .parse() - .expect("Wrong timeout value, should be a number of seconds"); - - let step_duration: u64 = parameters - .value_of("step_duration") - .unwrap() - .parse() - .expect("Wrong step_duration value, should be a number of seconds"); - - let process_number: u16 = parameters - .value_of("process_number") - .unwrap() - .parse() - .expect("Wrong process_number value, should be a number"); - - let regex_filter: Option = if !parameters.is_present("regex_filter") - || parameters.value_of("regex_filter").unwrap().is_empty() - { +impl Exporter for StdoutExporter { + /// Runs [iterate()] every `step` until `timeout` + fn run(&mut self) { + let time_step = Duration::from_secs(self.args.step); + let time_limit = if self.args.timeout < 0 { None } else { - Some( - Regex::new(parameters.value_of("regex_filter").unwrap()) - .expect("Wrong regex_filter, regexp is invalid"), - ) + Some(Duration::from_secs(self.args.timeout.unsigned_abs())) }; - if parameters.occurrences_of("regex_filter") == 1 - && parameters.occurrences_of("process_number") == 1 - { - let warning = - String::from("Warning: (-p / --process) and (-r / --regex) used at the same time. (-p / --process) disabled"); - eprintln!("{}", warning.bright_yellow()); - } - - let topology = self.sensor.get_topology().unwrap(); - let mut metric_generator = MetricGenerator::new( - topology, - utils::get_hostname(), - parameters.is_present("qemu"), - parameters.is_present("containers"), - ); - - println!("Measurement step is: {step_duration}s"); - if timeout_secs == 0 { - loop { - self.iterate(®ex_filter, process_number, &mut metric_generator); - thread::sleep(Duration::new(step_duration, 0)); + println!("Measurement step is: {time_step:?}"); + if let Some(timeout) = time_limit { + let t0 = Instant::now(); + while t0.elapsed() <= timeout { + self.iterate(); + thread::sleep(time_step); } } else { - let now = Instant::now(); - - while now.elapsed().as_secs() <= timeout_secs { - self.iterate(®ex_filter, process_number, &mut metric_generator); - thread::sleep(Duration::new(step_duration, 0)); + loop { + self.iterate(); + thread::sleep(time_step); } } } - fn iterate( - &mut self, - regex_filter: &Option, - process_number: u16, - metric_generator: &mut MetricGenerator, - ) { - metric_generator + fn kind(&self) -> &str { + "stdout" + } +} + +impl StdoutExporter { + /// Instantiates and returns a new StdoutExporter + pub fn new(sensor: &dyn Sensor, args: ExporterArgs) -> StdoutExporter { + // Prepare the retrieval of the measurements + let topo = sensor + .get_topology() + .expect("sensor topology should be available"); + + let metric_generator = + MetricGenerator::new(topo, utils::get_hostname(), args.qemu, args.containers); + + StdoutExporter { + metric_generator, + args, + } + } + + fn iterate(&mut self) { + self.metric_generator .topology .proc_tracker .clean_terminated_process_records_vectors(); - metric_generator.topology.refresh(); - self.show_metrics(regex_filter, process_number, metric_generator); + self.metric_generator.topology.refresh(); + self.show_metrics(); } - fn show_metrics( - &self, - regex_filter: &Option, - process_number: u16, - metric_generator: &mut MetricGenerator, - ) { - metric_generator.gen_all_metrics(); - - let metrics = metric_generator.pop_metrics(); + fn summarized_view(&mut self, metrics: Vec) { let mut metrics_iter = metrics.iter(); + let none_value = MetricValueType::Text("0".to_string()); + let mut host_power_source = String::from(""); let host_power = match metrics_iter.find(|x| x.name == "scaph_host_power_microwatts") { - Some(m) => m.metric_value.clone(), - None => MetricValueType::Text("0".to_string()), + Some(m) => { + if let Some(src) = &m.attributes.get("value_source") { + host_power_source = src.to_string() + } + &m.metric_value + } + None => &none_value, }; - let domain_names = metric_generator.topology.domains_names.as_ref(); + let domain_names = self.metric_generator.topology.domains_names.as_ref(); if domain_names.is_some() { info!("domain_names: {:?}", domain_names.unwrap()); } println!( - "Host:\t{} W", - (format!("{host_power}").parse::().unwrap() / 1000000.0) + "Host:\t{} W from {}", + (format!("{host_power}").parse::().unwrap() / 1000000.0), + host_power_source ); if domain_names.is_some() { @@ -190,6 +140,7 @@ impl StdoutExporter { .iter() .filter(|x| x.name == "scaph_socket_power_microwatts") { + debug!("✅ Found socket power metric !"); let power = format!("{}", s.metric_value).parse::().unwrap() / 1000000.0; let mut power_str = String::from("----"); if power > 0.0 { @@ -233,22 +184,27 @@ impl StdoutExporter { } } println!("{to_print}\n"); + } else { + println!("{to_print} Could'nt get per-domain metrics.\n"); } } let consumers: Vec<(IProcess, f64)>; - if let Some(regex_filter) = regex_filter { - println!("Processes filtered by '{}':", regex_filter.as_str()); - consumers = metric_generator + if let Some(regex) = &self.args.regex_filter { + println!("Processes filtered by '{regex}':"); + consumers = self + .metric_generator .topology .proc_tracker - .get_filtered_processes(regex_filter); + .get_filtered_processes(regex); } else { - println!("Top {process_number} consumers:"); - consumers = metric_generator + let n = self.args.processes; + println!("Top {n} consumers:"); + consumers = self + .metric_generator .topology .proc_tracker - .get_top_consumers(process_number); + .get_top_consumers(n); } info!("consumers : {:?}", consumers); @@ -260,7 +216,7 @@ impl StdoutExporter { if let Some(process) = metrics.iter().find(|x| { if x.name == "scaph_process_power_consumption_microwatts" { let pid = x.attributes.get("pid").unwrap(); - pid.parse::().unwrap() == c.0.pid + pid == &c.0.pid.to_string() } else { false } @@ -276,6 +232,29 @@ impl StdoutExporter { } println!("------------------------------------------------------------\n"); } + + fn raw_metrics_view(&mut self, metrics: Vec) { + println!("## At {}", current_system_time_since_epoch().as_secs()); + for m in metrics { + let serialized_data = serde_json::to_string(&m.attributes).unwrap(); + println!( + "{} = {} {} # {}", + m.name, m.metric_value, serialized_data, m.description + ); + } + } + + fn show_metrics(&mut self) { + self.metric_generator.gen_all_metrics(); + + let metrics = self.metric_generator.pop_metrics(); + + if self.args.raw_metrics { + self.raw_metrics_view(metrics); + } else { + self.summarized_view(metrics); + } + } } #[cfg(test)] diff --git a/src/exporters/utils.rs b/src/exporters/utils.rs index a0be7ba0..2c0e4c8a 100644 --- a/src/exporters/utils.rs +++ b/src/exporters/utils.rs @@ -2,12 +2,17 @@ //! //! The utils module provides common functions used by the exporters. use clap::crate_version; +use std::collections::HashMap; +use std::fmt::Write; #[cfg(feature = "containers")] use { docker_sync::Docker, k8s_sync::{errors::KubernetesError, kubernetes::Kubernetes}, }; +/// Default ipv4/ipv6 address to expose the service is any +pub const DEFAULT_IP_ADDRESS: &str = "::"; + /// Returns a cmdline String filtered from potential characters that /// could break exporters output. /// @@ -18,6 +23,30 @@ pub fn filter_cmdline(cmdline: &str) -> String { cmdline.replace('\"', "\\\"").replace('\n', "") } +/// Returns a well formatted Prometheus metric string. +pub fn format_prometheus_metric( + key: &str, + value: &str, + labels: Option<&HashMap>, +) -> String { + let mut result = key.to_string(); + if let Some(labels) = labels { + result.push('{'); + for (k, v) in labels.iter() { + let _ = write!( + result, + "{}=\"{}\",", + k, + v.replace('\"', "_").replace('\\', "") + ); + } + result.remove(result.len() - 1); + result.push('}'); + } + let _ = writeln!(result, " {value}"); + result +} + /// Returns an Option containing the VM name of a qemu process. /// /// Then VM name is extracted from the command line. diff --git a/src/exporters/warpten.rs b/src/exporters/warpten.rs index 4ebf4ce2..5de93371 100644 --- a/src/exporters/warpten.rs +++ b/src/exporters/warpten.rs @@ -1,365 +1,132 @@ +use super::utils::get_hostname; use crate::exporters::*; -use crate::sensors::{RecordGenerator, Sensor, Topology}; -use clap::Arg; +use crate::sensors::Sensor; use std::time::Duration; -use std::{env, thread}; -use utils::get_scaphandre_version; -//use warp10::data::Format; /// An exporter that sends power consumption data of the host and its processes to /// a [Warp10](https://warp10.io) instance through **HTTP(s)** /// (contributions welcome to support websockets). pub struct Warp10Exporter { - topology: Topology, + metric_generator: MetricGenerator, + /// Warp10 client + client: warp10::Client, + /// Warp10 auth token + write_token: String, + + step: Duration, } -impl Exporter for Warp10Exporter { - /// Control loop for self.iteration() - fn run(&mut self, parameters: clap::ArgMatches) { - let host = parameters.value_of("host").unwrap(); - let scheme = parameters.value_of("scheme").unwrap(); - let port = parameters.value_of("port").unwrap(); - let write_token = if let Some(token) = parameters.value_of("write-token") { - token.to_owned() - } else { - match env::var("SCAPH_WARP10_WRITE_TOKEN") { - Ok(val) => val, - Err(_e) => panic!( - "SCAPH_WARP10_WRITE_TOKEN not found in env, nor write-token flag was used." - ), - } - }; - //let read_token = parameters.value_of("read-token"); - let step = parameters.value_of("step").unwrap(); - let qemu = parameters.is_present("qemu"); +/// Holds the arguments for a Warp10Exporter. +#[derive(clap::Args, Debug)] +pub struct ExporterArgs { + /// FQDN or IP address of the Warp10 instance + #[arg(short = 'H', long, default_value = "localhost")] + pub host: String, + + /// TCP port of the Warp10 instance + #[arg(short, long, default_value_t = 8080)] + pub port: u16, + + /// "http" or "https" + #[arg(short = 'S', long, default_value = "http")] + pub scheme: String, + + /// Auth token to write data to Warp10. + /// If not specified, you must set the env variable SCAPH_WARP10_WRITE_TOKEN + #[arg(short = 't', long)] + pub write_token: Option, + + /// Interval between two measurements, in seconds + #[arg(short, long, value_name = "SECONDS", default_value_t = 2)] + pub step: u64, + + /// Apply labels to metrics of processes looking like a Qemu/KVM virtual machine + #[arg(short, long)] + pub qemu: bool, +} +const TOKEN_ENV_VAR: &str = "SCAPH_WARP10_WRITE_TOKEN"; + +impl Exporter for Warp10Exporter { + /// Control loop for self.iterate() + fn run(&mut self) { loop { - match self.iteration( - host, - scheme, - port.parse::().unwrap(), - &write_token, - //read_token, - qemu, - ) { + match self.iterate() { Ok(res) => debug!("Result: {:?}", res), Err(err) => error!("Failed ! {:?}", err), } - thread::sleep(Duration::new(step.parse::().unwrap(), 0)); + std::thread::sleep(self.step); } } - /// Options for configuring the exporter. - fn get_options() -> Vec> { - let mut options = Vec::new(); - let arg = Arg::with_name("host") - .default_value("localhost") - .help("Warp10 host's FQDN or IP address to send data to") - .long("host") - .short("H") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("scheme") - .default_value("http") - .help("Either 'http' or 'https'") - .long("scheme") - .short("s") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("port") - .default_value("8080") - .help("TCP port to join Warp10 on the host") - .long("port") - .short("p") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("write-token") - .help("Auth. token to write on Warp10") - .long("write-token") - .short("t") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("step") - .default_value("30") - .help("Time step between measurements, in seconds.") - .long("step") - .short("S") - .required(false) - .takes_value(true); - options.push(arg); - - let arg = Arg::with_name("qemu") - .help("Tells scaphandre it is running on a Qemu hypervisor.") - .long("qemu") - .short("q") - .required(false) - .takes_value(false); - options.push(arg); - - options + fn kind(&self) -> &str { + "warp10" } } impl Warp10Exporter { /// Instantiates and returns a new Warp10Exporter - pub fn new(mut sensor: Box) -> Warp10Exporter { - if let Some(topo) = *sensor.get_topology() { - Warp10Exporter { topology: topo } - } else { - error!("Could'nt generate the Topology."); - panic!("Could'nt generate the Topology."); + pub fn new(sensor: &dyn Sensor, args: ExporterArgs) -> Warp10Exporter { + // Prepare for measurement + let topology = sensor + .get_topology() + .expect("sensor topology should be available"); + let metric_generator = MetricGenerator::new(topology, get_hostname(), args.qemu, false); + + // Prepare for sending data to Warp10 + let scheme = args.scheme; + let host = args.host; + let port = args.port; + let client = warp10::Client::new(&format!("{scheme}://{host}:{port}")) + .expect("warp10 Client could not be created"); + let write_token = args.write_token.unwrap_or_else(|| { + std::env::var(TOKEN_ENV_VAR).unwrap_or_else(|_| panic!("No token found, you must provide either --write-token or the env var {TOKEN_ENV_VAR}")) + }); + + Warp10Exporter { + metric_generator, + client, + write_token, + step: Duration::from_secs(args.step), } } /// Collects data from the Topology, creates warp10::Data objects containing the /// metric itself and some labels attaches, stores them in a vector and sends it /// to Warp10 - pub fn iteration( - &mut self, - host: &str, - scheme: &str, - port: u16, - write_token: &str, - //read_token: Option<&str>, - qemu: bool, - ) -> Result, warp10::Error> { - let client = warp10::Client::new(&format!("{scheme}://{host}:{port}"))?; - let writer = client.get_writer(write_token.to_string()); - self.topology + pub fn iterate(&mut self) -> Result, warp10::Error> { + let writer = self.client.get_writer(self.write_token.clone()); + self.metric_generator + .topology .proc_tracker .clean_terminated_process_records_vectors(); debug!("Refreshing topology."); - self.topology.refresh(); - - let records = self.topology.get_records_passive(); - let scaphandre_version = get_scaphandre_version(); - - let labels = vec![]; - - let mut data = vec![warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_version"), - labels.clone(), - warp10::Value::Double(scaphandre_version.parse::().unwrap()), - )]; - - if let Some(metric_value) = self - .topology - .get_process_cpu_consumption_percentage(procfs::process::Process::myself().unwrap().pid) - { - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_cpu_usage_percent"), - labels.clone(), - warp10::Value::Int(metric_value.value.parse::().unwrap()), - )); - } - - if let Some(metric_value) = self - .topology - .get_process_cpu_consumption_percentage(procfs::process::Process::myself().unwrap().pid) - { - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_cpu_usage_percent"), - labels.clone(), - warp10::Value::Int(metric_value.value.parse::().unwrap()), - )); - } - - if let Ok(metric_value) = procfs::process::Process::myself().unwrap().statm() { - let value = metric_value.size * procfs::page_size().unwrap() as u64; - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_mem_total_program_size"), - labels.clone(), - warp10::Value::Int(value as i32), - )); - let value = metric_value.resident * procfs::page_size().unwrap() as u64; - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_mem_resident_set_size"), - labels.clone(), - warp10::Value::Int(value as i32), - )); - let value = metric_value.shared * procfs::page_size().unwrap() as u64; - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_mem_shared_resident_size"), - labels.clone(), - warp10::Value::Int(value as i32), - )); - } - - let metric_value = self.topology.stat_buffer.len(); - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_topo_stats_nb"), - labels.clone(), - warp10::Value::Int(metric_value as i32), - )); - - let metric_value = self.topology.record_buffer.len(); - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_topo_records_nb"), - labels.clone(), - warp10::Value::Int(metric_value as i32), - )); - - let metric_value = self.topology.proc_tracker.procs.len(); - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_topo_procs_nb"), - labels.clone(), - warp10::Value::Int(metric_value as i32), - )); + self.metric_generator.topology.refresh(); - for socket in &self.topology.sockets { - let mut metric_labels = labels.clone(); - metric_labels.push(warp10::Label::new("socket_id", &socket.id.to_string())); - let metric_value = socket.stat_buffer.len(); - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_socket_stats_nb"), - metric_labels.clone(), - warp10::Value::Int(metric_value as i32), - )); - let metric_value = socket.record_buffer.len(); - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_socket_records_nb"), - metric_labels.clone(), - warp10::Value::Int(metric_value as i32), - )); + self.metric_generator.gen_all_metrics(); - let socket_records = socket.get_records_passive(); - if !socket_records.is_empty() { - let socket_energy_microjoules = &socket_records.last().unwrap().value; - if let Ok(metric_value) = socket_energy_microjoules.parse::() { - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_socket_energy_microjoules"), - metric_labels.clone(), - warp10::Value::Long(metric_value), - )); - } + let mut process_data: Vec = vec![]; - if let Some(metric_value) = socket.get_records_diff_power_microwatts() { - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_socket_power_microwatts"), - metric_labels.clone(), - warp10::Value::Long(metric_value.value.parse::().unwrap()), - )); - } - } + for metric in self.metric_generator.pop_metrics() { + let mut labels = vec![]; - for domain in &socket.domains { - let mut metric_labels = labels.clone(); - metric_labels.push(warp10::Label::new("rapl_domain_name", &domain.name)); - let metric_value = domain.record_buffer.len(); - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_domain_records_nb"), - metric_labels.clone(), - warp10::Value::Int(metric_value as i32), - )); + for (k, v) in &metric.attributes { + labels.push(warp10::Label::new(k, v)); } - } - - if !records.is_empty() { - let record = records.last().unwrap(); - let metric_value = record.value.clone(); - data.push(warp10::Data::new( + process_data.push(warp10::Data::new( time::OffsetDateTime::now_utc(), None, - String::from("scaph_host_energy_microjoules"), - labels.clone(), - warp10::Value::Long(metric_value.parse::().unwrap()), + metric.name, + labels, + warp10::Value::String(metric.metric_value.to_string().replace('`', "")), )); - - if let Some(metric_value) = self.topology.get_records_diff_power_microwatts() { - data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_host_power_microwatts"), - labels.clone(), - warp10::Value::Long(metric_value.value.parse::().unwrap()), - )); - } } - let res = writer.post_sync(data)?; + let res = writer.post_sync(process_data)?; - let mut results = vec![res]; - - let mut process_data = vec![warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - String::from("scaph_self_version"), - labels.clone(), - warp10::Value::Double(scaphandre_version.parse::().unwrap()), - )]; - - let processes_tracker = &self.topology.proc_tracker; - for pid in processes_tracker.get_alive_pids() { - let exe = processes_tracker.get_process_name(pid); - let cmdline = processes_tracker.get_process_cmdline(pid); - - let mut plabels = labels.clone(); - plabels.push(warp10::Label::new("pid", &pid.to_string())); - plabels.push(warp10::Label::new("exe", &exe)); - if let Some(cmdline_str) = cmdline { - if qemu { - if let Some(vmname) = utils::filter_qemu_cmdline(&cmdline_str) { - plabels.push(warp10::Label::new("vmname", &vmname)); - } - } - plabels.push(warp10::Label::new( - "cmdline", - &cmdline_str.replace('\"', "\\\""), - )); - } - let metric_name = format!( - "{}_{}_{}", - "scaph_process_power_consumption_microwats", pid, exe - ); - if let Some(power) = self.topology.get_process_power_consumption_microwatts(pid) { - process_data.push(warp10::Data::new( - time::OffsetDateTime::now_utc(), - None, - metric_name, - plabels, - warp10::Value::Long(power.value.parse::().unwrap()), - )); - } - } - let process_res = writer.post_sync(process_data)?; + let results = vec![res]; //if let Some(token) = read_token { //let reader = client.get_reader(token.to_owned()); @@ -377,8 +144,6 @@ impl Warp10Exporter { //} //} - results.push(process_res); - Ok(results) } } diff --git a/src/lib.rs b/src/lib.rs index b9631b19..af59dc34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,189 +7,25 @@ extern crate log; pub mod exporters; pub mod sensors; -use clap::ArgMatches; -use colored::*; -#[cfg(feature = "json")] -use exporters::json::JSONExporter; -#[cfg(feature = "prometheus")] -use exporters::prometheus::PrometheusExporter; -#[cfg(all(target_os = "linux", not(feature = "warpten")))] -use exporters::qemu::QemuExporter; -#[cfg(feature = "riemann")] -use exporters::riemann::RiemannExporter; -#[cfg(feature = "warpten")] -use exporters::warpten::Warp10Exporter; -use exporters::{stdout::StdoutExporter, Exporter}; -#[cfg(target_os = "windows")] -use sensors::msr_rapl::MsrRAPLSensor; -#[cfg(target_os = "linux")] -use sensors::powercap_rapl::PowercapRAPLSensor; -use sensors::Sensor; -use std::collections::HashMap; -use std::time::{Duration, SystemTime}; - -/// Helper function to get an argument from ArgMatches -fn get_argument(matches: &ArgMatches, arg: &'static str) -> String { - if let Some(value) = matches.value_of(arg) { - return String::from(value); - } - panic!("Couldn't get argument {}", arg); -} - -/// Helper function to get a Sensor instance from ArgMatches -fn get_sensor(matches: &ArgMatches) -> Box { - let sensor = match &get_argument(matches, "sensor")[..] { - #[cfg(target_os = "linux")] - "powercap_rapl" => PowercapRAPLSensor::new( - get_argument(matches, "sensor-buffer-per-socket-max-kB") - .parse() - .unwrap(), - get_argument(matches, "sensor-buffer-per-domain-max-kB") - .parse() - .unwrap(), - matches.is_present("vm"), - ), - #[cfg(target_os = "linux")] - _ => PowercapRAPLSensor::new( - get_argument(matches, "sensor-buffer-per-socket-max-kB") - .parse() - .unwrap(), - get_argument(matches, "sensor-buffer-per-domain-max-kB") - .parse() - .unwrap(), - matches.is_present("vm"), - ), - #[cfg(not(target_os = "linux"))] - _ => MsrRAPLSensor::new(), - }; - Box::new(sensor) -} -/// Matches the sensor and exporter name and options requested from the command line and -/// creates the appropriate instances. Launchs the standardized entrypoint of -/// the choosen exporter: run() -/// This function should be updated to take new exporters into account. -pub fn run(matches: ArgMatches) { - loggerv::init_with_verbosity(matches.occurrences_of("v")).unwrap(); - - let sensor_boxed = get_sensor(&matches); - let exporter_parameters; - - let mut header = true; - if matches.is_present("no-header") { - header = false; - } +#[cfg(target_os = "windows")] +use sensors::msr_rapl; - if let Some(stdout_exporter_parameters) = matches.subcommand_matches("stdout") { - if header { - scaphandre_header("stdout"); - } - exporter_parameters = stdout_exporter_parameters.clone(); - let mut exporter = StdoutExporter::new(sensor_boxed); - exporter.run(exporter_parameters); - } else if let Some(json_exporter_parameters) = matches.subcommand_matches("json") { - if header { - scaphandre_header("json"); - } - exporter_parameters = json_exporter_parameters.clone(); - let mut exporter = JSONExporter::new(sensor_boxed); - exporter.run(exporter_parameters); - } else if let Some(riemann_exporter_parameters) = matches.subcommand_matches("riemann") { - if header { - scaphandre_header("riemann"); - } - exporter_parameters = riemann_exporter_parameters.clone(); - let mut exporter = RiemannExporter::new(sensor_boxed); - exporter.run(exporter_parameters); - } else if let Some(prometheus_exporter_parameters) = matches.subcommand_matches("prometheus") { - if header { - scaphandre_header("prometheus"); - } - exporter_parameters = prometheus_exporter_parameters.clone(); - let mut exporter = PrometheusExporter::new(sensor_boxed); - exporter.run(exporter_parameters); - } else { - #[cfg(target_os = "linux")] - { - #[cfg(feature = "warpten")] - { - if let Some(warp10_exporter_parameters) = matches.subcommand_matches("warp10") { - if header { - scaphandre_header("warp10"); - } - exporter_parameters = warp10_exporter_parameters.clone(); - let mut exporter = Warp10Exporter::new(sensor_boxed); - exporter.run(exporter_parameters); - } - } - #[cfg(not(feature = "warpten"))] - { - if let Some(qemu_exporter_parameters) = matches.subcommand_matches("qemu") { - if header { - scaphandre_header("qemu"); - } - exporter_parameters = qemu_exporter_parameters.clone(); - let mut exporter = QemuExporter::new(sensor_boxed); - exporter.run(exporter_parameters); - } - error!("Warp10 exporter feature was not included in this build."); - } - } - error!("Couldn't determine which exporter to run."); - } -} +#[cfg(target_os = "linux")] +use sensors::powercap_rapl; -/// Returns options needed for each exporter as a HashMap. -/// This function has to be updated to enable a new exporter. -pub fn get_exporters_options() -> HashMap>> { - let mut options = HashMap::new(); - options.insert( - String::from("stdout"), - exporters::stdout::StdoutExporter::get_options(), - ); - #[cfg(feature = "json")] - options.insert( - String::from("json"), - exporters::json::JSONExporter::get_options(), - ); - #[cfg(feature = "prometheus")] - options.insert( - String::from("prometheus"), - exporters::prometheus::PrometheusExporter::get_options(), - ); - #[cfg(feature = "riemann")] - options.insert( - String::from("riemann"), - exporters::riemann::RiemannExporter::get_options(), - ); +/// Create a new [`Sensor`] instance with the default sensor available, +/// with its default options. +pub fn get_default_sensor() -> impl sensors::Sensor { #[cfg(target_os = "linux")] - options.insert( - String::from("qemu"), - exporters::qemu::QemuExporter::get_options(), - ); - #[cfg(feature = "warp10")] - options.insert( - String::from("warp10"), - exporters::warpten::Warp10Exporter::get_options(), + return powercap_rapl::PowercapRAPLSensor::new( + powercap_rapl::DEFAULT_BUFFER_PER_SOCKET_MAX_KBYTES, + powercap_rapl::DEFAULT_BUFFER_PER_DOMAIN_MAX_KBYTES, + false, ); - options -} - -fn current_system_time_since_epoch() -> Duration { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() -} - -pub fn scaphandre_header(exporter_name: &str) { - let title = format!("Scaphandre {exporter_name} exporter"); - println!("{}", title.red().bold()); - println!("Sending ⚡ metrics"); -} -/// Returns rust crate version, can be use used in language bindings to expose Rust core version -pub fn crate_version() -> &'static str { - env!("CARGO_PKG_VERSION") + #[cfg(target_os = "windows")] + return msr_rapl::MsrRAPLSensor::new(); } // Copyright 2020 The scaphandre authors. diff --git a/src/main.rs b/src/main.rs index bd2ad3c4..f1d68881 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,92 +1,373 @@ //! Generic sensor and transmission agent for energy consumption related metrics. -use clap::{crate_authors, crate_version, App, AppSettings, Arg, SubCommand}; -use scaphandre::{get_exporters_options, run}; -fn main() { + +use clap::{command, ArgAction, Parser, Subcommand}; +use colored::Colorize; +use scaphandre::{exporters, sensors::Sensor}; + +#[cfg(target_os = "linux")] +use scaphandre::sensors::powercap_rapl; + +#[cfg(target_os = "windows")] +use scaphandre::sensors::msr_rapl; + +#[cfg(target_os = "windows")] +use windows_service::{ + service::ServiceControl, + service::ServiceControlAccept, + service::ServiceExitCode, + service::ServiceState, + service::ServiceStatus, + service::ServiceType, + service_control_handler::{self, ServiceControlHandlerResult}, + service_dispatcher, +}; + +#[cfg(target_os = "windows")] +define_windows_service!(ffi_service_main, my_service_main); + +#[cfg(target_os = "windows")] +#[macro_use] +extern crate windows_service; + +#[cfg(target_os = "windows")] +use std::time::Duration; + +#[cfg(target_os = "windows")] +use std::ffi::OsString; + +// the struct below defines the main Scaphandre command-line interface +/// Extensible metrology agent for electricity consumption related metrics. +#[derive(Parser)] +#[command(author, version)] +struct Cli { + /// The exporter module to use to output the energy consumption metrics + #[command(subcommand)] + exporter: ExporterChoice, + + /// Increase the verbosity level + #[arg(short, action = ArgAction::Count, default_value_t = 0)] + verbose: u8, + + /// Don't print the header to the standard output + #[arg(long, default_value_t = false)] + no_header: bool, + + /// Tell Scaphandre that it's running in a virtual machine. + /// You should have another instance of Scaphandre running on the hypervisor (see docs). + #[arg(long, default_value_t = false)] + vm: bool, + + /// The sensor module to use to gather the energy consumption metrics + #[arg(short, long)] + sensor: Option, + + /// Maximum memory size allowed, in KiloBytes, for storing energy consumption of each **domain**. + /// Only available for the RAPL sensor (on Linux). #[cfg(target_os = "linux")] - let sensors = ["powercap_rapl"]; + #[arg(long, default_value_t = powercap_rapl::DEFAULT_BUFFER_PER_DOMAIN_MAX_KBYTES)] + sensor_buffer_per_domain_max_kb: u16, + + /// Maximum memory size allowed, in KiloBytes, for storing energy consumption of each **socket**. + /// Only available for the RAPL sensor (on Linux). + #[cfg(target_os = "linux")] + #[arg(long, default_value_t = powercap_rapl::DEFAULT_BUFFER_PER_SOCKET_MAX_KBYTES)] + sensor_buffer_per_socket_max_kb: u16, +} + +/// Defines the possible subcommands, one per exporter. +/// +/// ### Description style +/// Per the clap documentation, the description of commands and arguments should be written in the style applied here, +/// *not* in the third-person. That is, use "Do xyz" instead of "Does xyz". +#[derive(Subcommand)] +enum ExporterChoice { + /// Write the metrics to the terminal + Stdout(exporters::stdout::ExporterArgs), + + /// Write the metrics in the JSON format to a file or to stdout + #[cfg(feature = "json")] + Json(exporters::json::ExporterArgs), + + /// Expose the metrics to a Prometheus HTTP endpoint + #[cfg(feature = "prometheus")] + Prometheus(exporters::prometheus::ExporterArgs), + + /// Watch all Qemu-KVM virtual machines running on the host and expose the metrics + /// of each of them in a dedicated folder + #[cfg(feature = "qemu")] + Qemu, + + /// Expose the metrics to a Riemann server + #[cfg(feature = "riemann")] + Riemann(exporters::riemann::ExporterArgs), + + /// Expose the metrics to a Warp10 host, through HTTP + #[cfg(feature = "warpten")] + Warpten(exporters::warpten::ExporterArgs), + + /// Push metrics to Prometheus Push Gateway + #[cfg(feature = "prometheuspush")] + PrometheusPush(exporters::prometheuspush::ExporterArgs), +} + +#[cfg(target_os = "windows")] +fn my_service_main(_arguments: Vec) { + use std::thread::JoinHandle; + let graceful_period = 3; + + let start_status = ServiceStatus { + service_type: ServiceType::OWN_PROCESS, // Should match the one from system service registry + current_state: ServiceState::Running, // The new state + controls_accepted: ServiceControlAccept::STOP, // Accept stop events when running + exit_code: ServiceExitCode::Win32(0), // Used to report an error when starting or stopping only, otherwise must be zero + checkpoint: 0, // Only used for pending states, otherwise must be zero + wait_hint: Duration::default(), // Only used for pending states, otherwise must be zero + process_id: None, // Unused for setting status + }; + let stop_status = ServiceStatus { + service_type: ServiceType::OWN_PROCESS, + current_state: ServiceState::Stopped, + controls_accepted: ServiceControlAccept::STOP, + exit_code: ServiceExitCode::Win32(0), + checkpoint: 0, + wait_hint: Duration::default(), + process_id: None, + }; + let stoppending_status = ServiceStatus { + service_type: ServiceType::OWN_PROCESS, + current_state: ServiceState::StopPending, + controls_accepted: ServiceControlAccept::STOP, + exit_code: ServiceExitCode::Win32(0), + checkpoint: 0, + wait_hint: Duration::from_secs(graceful_period), + process_id: None, + }; + + let thread_handle: Option>; + let mut _stop = false; + let event_handler = move |control_event| -> ServiceControlHandlerResult { + println!("Got service control event: {:?}", control_event); + match control_event { + ServiceControl::Stop => { + // Handle stop event and return control back to the system. + _stop = true; + ServiceControlHandlerResult::NoError + } + // All services must accept Interrogate even if it's a no-op. + ServiceControl::Interrogate => ServiceControlHandlerResult::NoError, + _ => ServiceControlHandlerResult::NotImplemented, + } + }; + + if let Ok(system_handler) = service_control_handler::register("scaphandre", event_handler) { + // Tell the system that the service is running now and run it + match system_handler.set_service_status(start_status.clone()) { + Ok(status_set) => { + println!( + "Starting main thread, service status has been set: {:?}", + status_set + ); + thread_handle = Some(std::thread::spawn(move || { + parse_cli_and_run_exporter(); + })); + } + Err(e) => { + panic!("Couldn't set Windows service status. Error: {:?}", e); + } + } + loop { + if _stop { + // Wait for the thread to finnish, then end the current function + match system_handler.set_service_status(stoppending_status.clone()) { + Ok(status_set) => { + println!("Stop status has been set for service: {:?}", status_set); + if let Some(thr) = thread_handle { + if thr.join().is_ok() { + match system_handler.set_service_status(stop_status.clone()) { + Ok(laststatus_set) => { + println!( + "Scaphandre gracefully stopped: {:?}", + laststatus_set + ); + } + Err(e) => { + panic!( + "Could'nt set Stop status on scaphandre service: {:?}", + e + ); + } + } + } else { + panic!("Joining the thread failed."); + } + break; + } else { + panic!("Thread handle was not initialized."); + } + } + Err(e) => { + panic!("Couldn't set Windows service status. Error: {:?}", e); + } + } + } + } + } else { + panic!("Failed getting system_handle."); + } +} + +fn main() { #[cfg(target_os = "windows")] - let sensors = ["msr_rapl"]; - let exporters_options = get_exporters_options(); - let exporters = exporters_options.keys(); - let exporters: Vec<&str> = exporters.into_iter().map(|x| x.as_str()).collect(); + match service_dispatcher::start("Scaphandre", ffi_service_main) { + Ok(_) => {} + Err(e) => { + println!("Couldn't start Windows service dispatcher. Got : {}", e); + } + } + + parse_cli_and_run_exporter(); +} + +fn parse_cli_and_run_exporter() { + let cli = Cli::parse(); + loggerv::init_with_verbosity(cli.verbose.into()).expect("unable to initialize the logger"); + + let sensor = build_sensor(&cli); + let mut exporter = build_exporter(cli.exporter, &sensor); + if !cli.no_header { + print_scaphandre_header(exporter.kind()); + } + + exporter.run(); +} + +fn build_exporter(choice: ExporterChoice, sensor: &dyn Sensor) -> Box { + match choice { + ExporterChoice::Stdout(args) => { + Box::new(exporters::stdout::StdoutExporter::new(sensor, args)) + } + #[cfg(feature = "json")] + ExporterChoice::Json(args) => { + Box::new(exporters::json::JsonExporter::new(sensor, args)) // keep this in braces + } + #[cfg(feature = "prometheus")] + ExporterChoice::Prometheus(args) => { + Box::new(exporters::prometheus::PrometheusExporter::new(sensor, args)) + } + #[cfg(feature = "qemu")] + ExporterChoice::Qemu => { + Box::new(exporters::qemu::QemuExporter::new(sensor)) // keep this in braces + } + #[cfg(feature = "riemann")] + ExporterChoice::Riemann(args) => { + Box::new(exporters::riemann::RiemannExporter::new(sensor, args)) + } + #[cfg(feature = "warpten")] + ExporterChoice::Warpten(args) => { + Box::new(exporters::warpten::Warp10Exporter::new(sensor, args)) + } + #[cfg(feature = "prometheuspush")] + ExporterChoice::PrometheusPush(args) => Box::new( + exporters::prometheuspush::PrometheusPushExporter::new(sensor, args), + ), + } + // Note that invalid choices are automatically turned into errors by `parse()` before the Cli is populated, + // that's why they don't appear in this function. +} +/// Returns the sensor to use, given the command-line arguments. +/// Unless sensor-specific options are provided, this should return +/// the same thing as [`scaphandre::get_default_sensor`]. +fn build_sensor(cli: &Cli) -> impl Sensor { #[cfg(target_os = "linux")] - let sensor_default_value = String::from("powercap_rapl"); - #[cfg(not(target_os = "linux"))] - let sensor_default_value = String::from("msr_rapl"); - - let mut matches = App::new("scaphandre") - .author(crate_authors!()) - .version(crate_version!()) - .about("Extensible metrology agent for energy/electricity consumption related metrics") - .setting(AppSettings::SubcommandRequiredElseHelp) - .arg( - Arg::with_name("v") - .short("v") - .multiple(true) - .help("Sets the level of verbosity.") - ) - .arg( - Arg::with_name("no-header") - .value_name("no-header") - .help("Prevents the header to be displayed in the terminal output.") - .required(false) - .takes_value(false) - .long("no-header") + let rapl_sensor = || { + powercap_rapl::PowercapRAPLSensor::new( + cli.sensor_buffer_per_socket_max_kb, + cli.sensor_buffer_per_domain_max_kb, + cli.vm, ) - .arg( - Arg::with_name("sensor") - .value_name("sensor") - .help("Sensor module to apply on the host to get energy consumption metrics.") - .required(false) - .takes_value(true) - .default_value(&sensor_default_value) - .possible_values(&sensors) - .short("s") - .long("sensor") - ).arg( - Arg::with_name("sensor-buffer-per-domain-max-kB") - .value_name("sensor-buffer-per-domain-max-kB") - .help("Maximum memory size allowed, in KiloBytes, for storing energy consumption of each domain.") - .required(false) - .takes_value(true) - .default_value("1") - ).arg( - Arg::with_name("sensor-buffer-per-socket-max-kB") - .value_name("sensor-buffer-per-socket-max-kB") - .help("Maximum memory size allowed, in KiloBytes, for storing energy consumption of each socket.") - .required(false) - .takes_value(true) - .default_value("1") - ).arg( - Arg::with_name("vm") - .value_name("vm") - .help("Tell scaphandre if he is running in a virtual machine.") - .long("vm") - .required(false) - .takes_value(false) - ); - - for exporter in exporters { - let mut subcmd = SubCommand::with_name(exporter).about( - match exporter { - "stdout" => "Stdout exporter allows you to output the power consumption data in the terminal", - "json" => "JSON exporter allows you to output the power consumption data in a json file", - "prometheus" => "Prometheus exporter exposes power consumption metrics on an http endpoint (/metrics is default) in prometheus accepted format", - "riemann" => "Riemann exporter sends power consumption metrics to a Riemann server", - "qemu" => "Qemu exporter watches all Qemu/KVM virtual machines running on the host and exposes metrics of each of them in a dedicated folder", - "warp10" => "Warp10 exporter sends data to a Warp10 host, through HTTP", - _ => "Unknown exporter", + }; + + #[cfg(target_os = "windows")] + let msr_sensor_win = msr_rapl::MsrRAPLSensor::new; + + match cli.sensor.as_deref() { + Some("powercap_rapl") => { + #[cfg(target_os = "linux")] + { + rapl_sensor() + } + #[cfg(not(target_os = "linux"))] + panic!("Invalid sensor: Scaphandre's powercap_rapl only works on Linux") + } + Some("msr") => { + #[cfg(target_os = "windows")] + { + msr_sensor_win() } - ); + #[cfg(not(target_os = "windows"))] + panic!("Invalid sensor: Scaphandre's msr only works on Windows") + } + Some(s) => panic!("Unknown sensor type {}", s), + None => { + #[cfg(target_os = "linux")] + return rapl_sensor(); + + #[cfg(target_os = "windows")] + return msr_sensor_win(); - let myopts = exporters_options.get(exporter).unwrap(); - for opt in myopts { - subcmd = subcmd.arg(opt); + #[cfg(not(any(target_os = "linux", target_os = "windows")))] + compile_error!("Unsupported target OS") + } + } +} + +fn print_scaphandre_header(exporter_name: &str) { + let title = format!("Scaphandre {exporter_name} exporter"); + println!("{}", title.red().bold()); + println!("Sending ⚡ metrics"); +} + +#[cfg(test)] +mod test { + use super::*; + + const SUBCOMMANDS: &[&str] = &[ + "stdout", + #[cfg(feature = "prometheus")] + "prometheus", + #[cfg(feature = "riemann")] + "riemann", + #[cfg(feature = "json")] + "json", + #[cfg(feature = "warpten")] + "warpten", + #[cfg(feature = "qemu")] + "qemu", + ]; + + /// Test that `--help` works for Scaphandre _and_ for each subcommand. + /// This also ensures that all the subcommands are properly defined, as Clap will check some constraints + /// when trying to parse a subcommand (for instance, it will check that no two short options have the same name). + #[test] + fn test_help() { + fn assert_shows_help(args: &[&str]) { + match Cli::try_parse_from(args) { + Ok(_) => panic!( + "The CLI didn't generate a help message for {args:?}, are the inputs correct?" + ), + Err(e) => assert_eq!( + e.kind(), + clap::error::ErrorKind::DisplayHelp, + "The CLI emitted an error for {args:?}:\n{e}" + ), + }; + } + assert_shows_help(&["scaphandre", "--help"]); + for cmd in SUBCOMMANDS { + assert_shows_help(&["scaphandre", cmd, "--help"]); } - matches = matches.subcommand(subcmd); } - run(matches.get_matches()); } // Copyright 2020 The scaphandre authors. diff --git a/src/sensors/mod.rs b/src/sensors/mod.rs index 00e370e7..1085f9ea 100644 --- a/src/sensors/mod.rs +++ b/src/sensors/mod.rs @@ -3,27 +3,26 @@ //! `Sensor` is the root for all sensors. It defines the [Sensor] trait //! needed to implement a sensor. -#[cfg(not(target_os = "linux"))] +#[cfg(target_os = "windows")] pub mod msr_rapl; +#[cfg(target_os = "windows")] +use msr_rapl::get_msr_value; #[cfg(target_os = "linux")] pub mod powercap_rapl; pub mod units; pub mod utils; #[cfg(target_os = "linux")] -use procfs::{process, CpuInfo, CpuTime, KernelStats}; -use std::collections::HashMap; -use std::error::Error; -use std::fmt; -use std::mem::size_of_val; -use std::time::Duration; -#[cfg(not(target_os = "linux"))] -use sysinfo::{ProcessorExt, System, SystemExt}; +use procfs::{CpuInfo, CpuTime, KernelStats}; +use std::{collections::HashMap, error::Error, fmt, fs, mem::size_of_val, str, time::Duration}; +#[allow(unused_imports)] +use sysinfo::{CpuExt, Pid, System, SystemExt}; +use sysinfo::{DiskExt, DiskType}; use utils::{current_system_time_since_epoch, IProcess, ProcessTracker}; // !!!!!!!!!!!!!!!!! Sensor !!!!!!!!!!!!!!!!!!!!!!! /// Sensor trait, the Sensor API. pub trait Sensor { - fn get_topology(&mut self) -> Box>; + fn get_topology(&self) -> Box>; fn generate_topology(&self) -> Result>; } @@ -58,10 +57,8 @@ pub struct Topology { pub buffer_max_kbytes: u16, /// Sorted list of all domains names pub domains_names: Option>, - /// - #[cfg(target_os = "windows")] - #[allow(dead_code)] - sensor_data: HashMap, + /// Sensor-specific data needed in the topology + pub _sensor_data: HashMap, } impl RecordGenerator for Topology { @@ -69,26 +66,20 @@ impl RecordGenerator for Topology { /// and returns a clone of this record. /// fn refresh_record(&mut self) { - let mut value: u64 = 0; - let mut last_timestamp = current_system_time_since_epoch(); - for s in self.get_sockets() { - let records = s.get_records_passive(); - if !records.is_empty() { - let last = records.last(); - let last_record = last.unwrap(); - last_timestamp = last_record.timestamp; - let res = last_record.value.trim(); - if let Ok(val) = res.parse::() { - value += val; - } else { - trace!("couldn't parse value : {}", res); - } + match self.read_record() { + Ok(record) => { + self.record_buffer.push(record); + } + Err(e) => { + warn!( + "Could'nt read record from {}, error was : {:?}", + self._sensor_data + .get("source_file") + .unwrap_or(&String::from("SRCFILENOTKNOWN")), + e + ); } } - debug!("Record value from topo (addition of sockets) : {}", value); - let record = Record::new(last_timestamp, value.to_string(), units::Unit::MicroJoule); - - self.record_buffer.push(record); if !self.record_buffer.is_empty() { self.clean_old_records(); @@ -141,19 +132,14 @@ impl RecordGenerator for Topology { impl Default for Topology { fn default() -> Self { - #[cfg(target_os = "windows")] { Self::new(HashMap::new()) } - - #[cfg(target_os = "linux")] - Self::new() } } impl Topology { /// Instanciates Topology and returns the instance - #[cfg(target_os = "windows")] pub fn new(sensor_data: HashMap) -> Topology { Topology { sockets: vec![], @@ -162,19 +148,7 @@ impl Topology { record_buffer: vec![], buffer_max_kbytes: 1, domains_names: None, - sensor_data, - } - } - /// Instanciates Topology and returns the instance - #[cfg(target_os = "linux")] - pub fn new() -> Topology { - Topology { - sockets: vec![], - proc_tracker: ProcessTracker::new(5), - stat_buffer: vec![], - record_buffer: vec![], - buffer_max_kbytes: 1, - domains_names: None, + _sensor_data: sensor_data, } } @@ -188,37 +162,31 @@ impl Topology { /// if let Some(cores) = Topology::generate_cpu_cores() { /// println!("There are {} cores on this host.", cores.len()); /// for c in &cores { - /// println!("Here is CPU Core number {}", c.attributes.get("processor").unwrap()); + /// println!("CPU info {:?}", c.attributes); /// } /// } /// ``` pub fn generate_cpu_cores() -> Option> { let mut cores = vec![]; + let sysinfo_system = System::new_all(); + let sysinfo_cores = sysinfo_system.cpus(); + warn!("Sysinfo sees {}", sysinfo_cores.len()); #[cfg(target_os = "linux")] - { - let cpuinfo = CpuInfo::new().unwrap(); - for id in 0..(cpuinfo.num_cores() - 1) { - let mut info = HashMap::new(); - for (k, v) in cpuinfo.get_info(id).unwrap().iter() { + let cpuinfo = CpuInfo::new().unwrap(); + for (id, c) in (0_u16..).zip(sysinfo_cores.iter()) { + let mut info = HashMap::::new(); + #[cfg(target_os = "linux")] + { + for (k, v) in cpuinfo.get_info(id as usize).unwrap().iter() { info.insert(String::from(*k), String::from(*v)); } - cores.push(CPUCore::new(id as u16, info)); - } - } - #[cfg(target_os = "windows")] - { - warn!("generate_cpu_info is not implemented yet on this OS."); - let sysinfo_system = System::new_all(); - let sysinfo_cores = sysinfo_system.processors(); - for (id, c) in (0_u16..).zip(sysinfo_cores.iter()) { - let mut info = HashMap::new(); - info.insert(String::from("frequency"), c.frequency().to_string()); - info.insert(String::from("name"), c.name().to_string()); - info.insert(String::from("vendor_id"), c.vendor_id().to_string()); - info.insert(String::from("brand"), c.brand().to_string()); - cores.push(CPUCore::new(id, info)); } + info.insert(String::from("frequency"), c.frequency().to_string()); + info.insert(String::from("name"), c.name().to_string()); + info.insert(String::from("vendor_id"), c.vendor_id().to_string()); + info.insert(String::from("brand"), c.brand().to_string()); + cores.push(CPUCore::new(id, info)); } Some(cores) } @@ -233,7 +201,7 @@ impl Topology { counter_uj_path: String, buffer_max_kbytes: u16, sensor_data: HashMap, - ) { + ) -> Option { if !self.sockets.iter().any(|s| s.id == socket_id) { let socket = CPUSocket::new( socket_id, @@ -243,6 +211,16 @@ impl Topology { buffer_max_kbytes, sensor_data, ); + let res = socket.clone(); + self.sockets.push(socket); + Some(res) + } else { + None + } + } + + pub fn safe_insert_socket(&mut self, socket: CPUSocket) { + if !self.sockets.iter().any(|s| s.id == socket.id) { self.sockets.push(socket); } } @@ -275,6 +253,10 @@ impl Topology { self.domains_names = Some(domain_names); } + pub fn set_domains_names(&mut self, names: Vec) { + self.domains_names = Some(names); + } + /// Adds a Domain instance to a given socket, if and only if the domain /// id doesn't exist already for the socket. pub fn safe_add_domain_to_socket( @@ -303,27 +285,56 @@ impl Topology { /// Generates CPUCore instances for the host and adds them /// to appropriate CPUSocket instance from self.sockets + #[cfg(target_os = "linux")] pub fn add_cpu_cores(&mut self) { if let Some(mut cores) = Topology::generate_cpu_cores() { - while !cores.is_empty() { - let c = cores.pop().unwrap(); + while let Some(c) = cores.pop() { let socket_id = &c .attributes .get("physical id") .unwrap() .parse::() .unwrap(); - let socket = self - .sockets - .iter_mut() - .find(|x| &x.id == socket_id) - .expect("Trick: if you are running on a vm, do not forget to use --vm parameter invoking scaphandre at the command line"); + let socket_match = self.sockets.iter_mut().find(|x| &x.id == socket_id); + + //In VMs there might be a missmatch betwen Sockets and Cores - see Issue#133 as a first fix we just map all cores that can't be mapped to the first + let socket = match socket_match { + Some(x) => x, + None =>self.sockets.first_mut().expect("Trick: if you are running on a vm, do not forget to use --vm parameter invoking scaphandre at the command line") + }; + if socket_id == &socket.id { socket.add_cpu_core(c); + } else { + socket.add_cpu_core(c); + warn!("coud't not match core to socket - mapping to first socket instead - if you are not using --vm there is something wrong") } } + + //#[cfg(target_os = "windows")] + //{ + //TODO: fix + //let nb_sockets = &self.sockets.len(); + //let mut socket_counter = 0; + //let nb_cores_per_socket = &cores.len() / nb_sockets; + //warn!("nb_cores_per_socket: {} cores_len: {} sockets_len: {}", nb_cores_per_socket, &cores.len(), &self.sockets.len()); + //for s in self.sockets.iter_mut() { + // for c in (socket_counter * nb_cores_per_socket)..((socket_counter+1) * nb_cores_per_socket) { + // match cores.pop() { + // Some(core) => { + // warn!("adding core {} to socket {}", core.id, s.id); + // s.add_cpu_core(core); + // }, + // None => { + // error!("Uneven number of CPU cores !"); + // } + // } + // } + // socket_counter = socket_counter + 1; + //} + //} } else { - warn!("Couldn't retrieve any CPU Core from the topology. (generate_cpu_cores)"); + panic!("Couldn't retrieve any CPU Core from the topology. (generate_cpu_cores)"); } } @@ -345,6 +356,7 @@ impl Topology { // //} } + self.proc_tracker.refresh(); self.refresh_procs(); self.refresh_record(); self.refresh_stats(); @@ -353,36 +365,14 @@ impl Topology { /// Gets currently running processes (as procfs::Process instances) and stores /// them in self.proc_tracker fn refresh_procs(&mut self) { - #[cfg(target_os = "linux")] - { - //current_procs is the up to date list of processus running on the host - if let Ok(procs) = process::all_processes() { - info!("Before refresh procs init."); - procs - .iter() - .map(IProcess::from_linux_process) - .for_each(|p| { - let pid = p.pid; - let res = self.proc_tracker.add_process_record(p); - match res { - Ok(_) => {} - Err(msg) => { - panic!("Failed to track process with pid {} !\nGot: {}", pid, msg) - } - } - }); - } - } - #[cfg(target_os = "windows")] { let pt = &mut self.proc_tracker; pt.sysinfo.refresh_processes(); - pt.sysinfo.refresh_cpu(); let current_procs = pt .sysinfo .processes() .values() - .map(IProcess::from_windows_process) + .map(IProcess::new) .collect::>(); for p in current_procs { match pt.add_process_record(p) { @@ -460,20 +450,37 @@ impl Topology { .record_buffer .get(self.record_buffer.len() - 2) .unwrap(); - let last_microjoules = last_record.value.parse::().unwrap(); - let previous_microjoules = previous_record.value.parse::().unwrap(); - if previous_microjoules > last_microjoules { - return None; + match previous_record.value.trim().parse::() { + Ok(previous_microjoules) => match last_record.value.trim().parse::() { + Ok(last_microjoules) => { + if previous_microjoules > last_microjoules { + return None; + } + let microjoules = last_microjoules - previous_microjoules; + let time_diff = last_record.timestamp.as_secs_f64() + - previous_record.timestamp.as_secs_f64(); + let microwatts = microjoules as f64 / time_diff; + return Some(Record::new( + last_record.timestamp, + (microwatts as u64).to_string(), + units::Unit::MicroWatt, + )); + } + Err(e) => { + warn!( + "Could'nt get previous_microjoules - value : '{}' - error : {:?}", + previous_record.value, e + ); + } + }, + Err(e) => { + warn!( + "Couldn't parse previous_microjoules - value : '{}' - error : {:?}", + previous_record.value.trim(), + e + ); + } } - let microjoules = last_microjoules - previous_microjoules; - let time_diff = - last_record.timestamp.as_secs_f64() - previous_record.timestamp.as_secs_f64(); - let microwatts = microjoules as f64 / time_diff; - return Some(Record::new( - last_record.timestamp, - (microwatts as u64).to_string(), - units::Unit::MicroWatt, - )); } None } @@ -593,79 +600,356 @@ impl Topology { None } - /// Returns the power consumed between last and previous measurement for a given process ID, in microwatts - pub fn get_process_power_consumption_microwatts(&self, pid: i32) -> Option { - let tracker = self.get_proc_tracker(); - if let Some(recs) = tracker.find_records(pid) { - if recs.len() > 1 { - #[cfg(target_os = "linux")] - { - let last = recs.first().unwrap(); - let previous = recs.get(1).unwrap(); - if let Some(topo_stats_diff) = self.get_stats_diff() { - //trace!("Topology stats measured diff: {:?}", topo_stats_diff); - let process_total_time = - last.total_time_jiffies() - previous.total_time_jiffies(); - let topo_total_time = topo_stats_diff.total_time_jiffies(); - let usage_percent = process_total_time as f64 / topo_total_time as f64; - let topo_conso = self.get_records_diff_power_microwatts(); - if let Some(val) = &topo_conso { - //trace!("topo conso: {}", val); - let val_f64 = val.value.parse::().unwrap(); - //trace!("val f64: {}", val_f64); - let result = (val_f64 * usage_percent) as u64; - //trace!("result: {}", result); - return Some(Record::new( - last.timestamp, - result.to_string(), - units::Unit::MicroWatt, - )); - } - } + pub fn get_cpu_frequency(&self) -> Record { + Record::new( + current_system_time_since_epoch(), + self.proc_tracker.get_cpu_frequency().to_string(), + units::Unit::MegaHertz, + ) + } + + pub fn get_load_avg(&self) -> Option> { + let load = self.get_proc_tracker().sysinfo.load_average(); + let timestamp = current_system_time_since_epoch(); + Some(vec![ + Record::new(timestamp, load.one.to_string(), units::Unit::Numeric), + Record::new(timestamp, load.five.to_string(), units::Unit::Numeric), + Record::new(timestamp, load.five.to_string(), units::Unit::Numeric), + ]) + } + + pub fn get_disks(&self) -> HashMap, Record)> { + let timestamp = current_system_time_since_epoch(); + let mut res = HashMap::new(); + for d in self.proc_tracker.sysinfo.disks() { + let mut attributes = HashMap::new(); + if let Ok(file_system) = str::from_utf8(d.file_system()) { + attributes.insert(String::from("disk_file_system"), String::from(file_system)); + } + if let Some(mount_point) = d.mount_point().to_str() { + attributes.insert(String::from("disk_mount_point"), String::from(mount_point)); + } + match d.type_() { + DiskType::SSD => { + attributes.insert(String::from("disk_type"), String::from("SSD")); } - #[cfg(target_os = "windows")] - { - let last = recs.first().unwrap(); - let process_cpu_percentage = - tracker.get_cpu_usage_percentage(pid as usize, tracker.nb_cores); - let topo_conso = self.get_records_diff_power_microwatts(); - if let Some(conso) = &topo_conso { - let conso_f64 = conso.value.parse::().unwrap(); - let result = (conso_f64 * process_cpu_percentage as f64) / 100.0_f64; - return Some(Record::new( - last.timestamp, - result.to_string(), - units::Unit::MicroWatt, - )); - } + DiskType::HDD => { + attributes.insert(String::from("disk_type"), String::from("HDD")); + } + DiskType::Unknown(_) => { + attributes.insert(String::from("disk_type"), String::from("Unknown")); } } + attributes.insert( + String::from("disk_is_removable"), + d.is_removable().to_string(), + ); + if let Some(disk_name) = d.name().to_str() { + attributes.insert(String::from("disk_name"), String::from(disk_name)); + } + res.insert( + String::from("scaph_host_disk_total_bytes"), + ( + String::from("Total disk size, in bytes."), + attributes.clone(), + Record::new(timestamp, d.total_space().to_string(), units::Unit::Bytes), + ), + ); + res.insert( + String::from("scaph_host_disk_available_bytes"), + ( + String::from("Available disk space, in bytes."), + attributes.clone(), + Record::new( + timestamp, + d.available_space().to_string(), + units::Unit::Bytes, + ), + ), + ); + } + res + } + + pub fn get_total_memory_bytes(&self) -> Record { + Record { + timestamp: current_system_time_since_epoch(), + value: self.proc_tracker.sysinfo.total_memory().to_string(), + unit: units::Unit::Bytes, + } + } + + pub fn get_available_memory_bytes(&self) -> Record { + Record { + timestamp: current_system_time_since_epoch(), + value: self.proc_tracker.sysinfo.available_memory().to_string(), + unit: units::Unit::Bytes, + } + } + + pub fn get_free_memory_bytes(&self) -> Record { + Record { + timestamp: current_system_time_since_epoch(), + value: self.proc_tracker.sysinfo.free_memory().to_string(), + unit: units::Unit::Bytes, + } + } + + pub fn get_total_swap_bytes(&self) -> Record { + Record { + timestamp: current_system_time_since_epoch(), + value: self.proc_tracker.sysinfo.total_swap().to_string(), + unit: units::Unit::Bytes, + } + } + + pub fn get_free_swap_bytes(&self) -> Record { + Record { + timestamp: current_system_time_since_epoch(), + value: self.proc_tracker.sysinfo.free_swap().to_string(), + unit: units::Unit::Bytes, + } + } + + /// Returns the power consumed between last and previous measurement for a given process ID, in microwatts + pub fn get_process_power_consumption_microwatts(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + let process_cpu_percentage = self.get_process_cpu_usage_percentage(pid).unwrap(); + let topo_conso = self.get_records_diff_power_microwatts(); + if let Some(conso) = &topo_conso { + let conso_f64 = conso.value.parse::().unwrap(); + let result = + (conso_f64 * process_cpu_percentage.value.parse::().unwrap()) / 100.0_f64; + return Some(Record::new( + record.timestamp, + result.to_string(), + units::Unit::MicroWatt, + )); + } } else { trace!("Couldn't find records for PID: {}", pid); } None } - pub fn get_process_cpu_consumption_percentage(&self, pid: i32) -> Option { - let tracker = self.get_proc_tracker(); - if let Some(recs) = tracker.find_records(pid) { - if recs.len() > 1 { - let last = recs.first().unwrap(); - let previous = recs.get(1).unwrap(); - if let Some(topo_stats_diff) = self.get_stats_diff() { - let process_total_time = - last.total_time_jiffies() - previous.total_time_jiffies(); + pub fn get_all_per_process(&self, pid: Pid) -> Option> { + let mut res = HashMap::new(); + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + let process_cpu_percentage = + record.process.cpu_usage_percentage / self.proc_tracker.nb_cores as f32; + res.insert( + String::from("scaph_process_cpu_usage_percentage"), + (String::from("CPU time consumed by the process, as a percentage of the capacity of all the CPU Cores"), + Record::new( + record.timestamp, + process_cpu_percentage.to_string(), + units::Unit::Percentage, + ) + ) + ); + res.insert( + String::from("scaph_process_memory_virtual_bytes"), + ( + String::from("Virtual RAM usage by the process, in bytes"), + Record::new( + record.timestamp, + record.process.virtual_memory.to_string(), + units::Unit::Percentage, + ), + ), + ); + res.insert( + String::from("scaph_process_memory_bytes"), + ( + String::from("Physical RAM usage by the process, in bytes"), + Record::new( + record.timestamp, + record.process.memory.to_string(), + units::Unit::Bytes, + ), + ), + ); + res.insert( + String::from("scaph_process_disk_write_bytes"), + ( + String::from("Data written on disk by the process, in bytes"), + Record::new( + record.timestamp, + record.process.disk_written.to_string(), + units::Unit::Bytes, + ), + ), + ); + res.insert( + String::from("scaph_process_disk_read_bytes"), + ( + String::from("Data read on disk by the process, in bytes"), + Record::new( + record.timestamp, + record.process.disk_read.to_string(), + units::Unit::Bytes, + ), + ), + ); + res.insert( + String::from("scaph_process_disk_total_write_bytes"), + ( + String::from("Total data written on disk by the process, in bytes"), + Record::new( + record.timestamp, + record.process.total_disk_written.to_string(), + units::Unit::Bytes, + ), + ), + ); + res.insert( + String::from("scaph_process_disk_total_read_bytes"), + ( + String::from("Total data read on disk by the process, in bytes"), + Record::new( + record.timestamp, + record.process.total_disk_read.to_string(), + units::Unit::Bytes, + ), + ), + ); + let topo_conso = self.get_records_diff_power_microwatts(); + if let Some(conso) = &topo_conso { + let conso_f64 = conso.value.parse::().unwrap(); + let result = (conso_f64 * process_cpu_percentage as f64) / 100.0_f64; + res.insert( + String::from("scaph_process_power_consumption_microwatts"), + ( + String::from("Total data read on disk by the process, in bytes"), + Record::new(record.timestamp, result.to_string(), units::Unit::MicroWatt), + ), + ); + } + } + Some(res) + } - let topo_total_time = topo_stats_diff.total_time_jiffies(); + // Per process metrics, from ProcessRecord during last refresh, returned in Record structs - let usage = process_total_time as f64 / topo_total_time as f64; + pub fn get_process_cpu_usage_percentage(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + (record.process.cpu_usage_percentage / self.proc_tracker.nb_cores as f32) + .to_string(), + units::Unit::Percentage, + )); + } + None + } + pub fn get_process_memory_virtual_bytes(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + record.process.virtual_memory.to_string(), + units::Unit::Bytes, + )); + } + None + } + + pub fn get_process_memory_bytes(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + record.process.memory.to_string(), + units::Unit::Bytes, + )); + } + None + } + + pub fn get_process_disk_written_bytes(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + record.process.disk_written.to_string(), + units::Unit::Bytes, + )); + } + None + } + + pub fn get_process_disk_read_bytes(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + record.process.disk_read.to_string(), + units::Unit::Bytes, + )); + } + None + } + pub fn get_process_disk_total_read_bytes(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + record.process.total_disk_read.to_string(), + units::Unit::Bytes, + )); + } + None + } + + pub fn get_process_disk_total_write_bytes(&self, pid: Pid) -> Option { + if let Some(record) = self.get_proc_tracker().get_process_last_record(pid) { + return Some(Record::new( + record.timestamp, + record.process.total_disk_written.to_string(), + units::Unit::Bytes, + )); + } + None + } + + #[cfg(target_os = "linux")] + pub fn get_rapl_psys_energy_microjoules(&self) -> Option { + if let Some(psys) = self._sensor_data.get("psys") { + match &fs::read_to_string(format!("{psys}/energy_uj")) { + Ok(val) => { + debug!("Read PSYS from {psys}/energy_uj: {}", val.to_string()); return Some(Record::new( current_system_time_since_epoch(), - usage.to_string(), - units::Unit::Percentage, + val.to_string(), + units::Unit::MicroJoule, )); } + Err(e) => { + warn!("PSYS Error: {:?}", e); + } + } + } else { + debug!("Asked for PSYS but there is no psys entry in sensor_data."); + } + None + } + + /// # Safety + /// + /// This function is unsafe rust as it calls get_msr_value function from msr_rapl sensor module. + /// It calls the msr_RAPL::MSR_PLATFORM_ENERGY_STATUS MSR address, which has been tested on several Intel x86 processors + /// but might fail on AMD (needs testing). That being said, it returns None if the msr query fails (which means if the Windows + /// driver fails.) and should not prevent from using a value coming from elsewhere, which means from another get_msr_value calls + /// targeting another msr address. + #[cfg(target_os = "windows")] + pub unsafe fn get_rapl_psys_energy_microjoules(&self) -> Option { + let msr_addr = msr_rapl::MSR_PLATFORM_ENERGY_STATUS; + match get_msr_value(0, msr_addr.into(), &self._sensor_data) { + Ok(res) => { + return Some(Record::new( + current_system_time_since_epoch(), + res.value.to_string(), + units::Unit::MicroJoule, + )) + } + Err(e) => { + debug!("get_msr_value returned error : {}", e); } } None @@ -695,16 +979,26 @@ pub struct CPUSocket { pub stat_buffer: Vec, /// #[allow(dead_code)] - sensor_data: HashMap, + pub sensor_data: HashMap, } impl RecordGenerator for CPUSocket { /// Generates a new record of the socket energy consumption and stores it in the record_buffer. /// Returns a clone of this Record instance. fn refresh_record(&mut self) { - //if let Ok(record) = self.read_record_uj() { - if let Ok(record) = self.read_record() { - self.record_buffer.push(record); + match self.read_record() { + Ok(record) => { + self.record_buffer.push(record); + } + Err(e) => { + warn!( + "Could'nt read record from {}, error was: {:?}", + self.sensor_data + .get("source_file") + .unwrap_or(&String::from("SRCFILENOTKNOWN")), + e + ); + } } if !self.record_buffer.is_empty() { @@ -782,6 +1076,10 @@ impl CPUSocket { } } + pub fn set_id(&mut self, id: u16) { + self.id = id + } + /// Adds a new Domain instance to the domains vector if and only if it doesn't exist in the vector already. fn safe_add_domain(&mut self, domain: Domain) { if !self.domains.iter().any(|d| d.id == domain.id) { @@ -881,16 +1179,17 @@ impl CPUSocket { steal: Some(0), }; for c in &self.cpu_cores { - let c_stats = c.read_stats().unwrap(); - stats.user += c_stats.user; - stats.nice += c_stats.nice; - stats.system += c_stats.system; - stats.idle += c_stats.idle; - stats.iowait = - Some(stats.iowait.unwrap_or_default() + c_stats.iowait.unwrap_or_default()); - stats.irq = Some(stats.irq.unwrap_or_default() + c_stats.irq.unwrap_or_default()); - stats.softirq = - Some(stats.softirq.unwrap_or_default() + c_stats.softirq.unwrap_or_default()); + if let Some(c_stats) = c.read_stats() { + stats.user += c_stats.user; + stats.nice += c_stats.nice; + stats.system += c_stats.system; + stats.idle += c_stats.idle; + stats.iowait = + Some(stats.iowait.unwrap_or_default() + c_stats.iowait.unwrap_or_default()); + stats.irq = Some(stats.irq.unwrap_or_default() + c_stats.irq.unwrap_or_default()); + stats.softirq = + Some(stats.softirq.unwrap_or_default() + c_stats.softirq.unwrap_or_default()); + } } Some(stats) } @@ -952,13 +1251,13 @@ impl CPUSocket { .get(self.record_buffer.len() - 2) .unwrap(); debug!( - "last_record value: {} previous_record value: {}", + "socket : last_record value: {} previous_record value: {}", &last_record.value, &previous_record.value ); let last_rec_val = last_record.value.trim(); - debug!("l851 : trying to parse {} as u64", last_rec_val); + debug!("socket : l1187 : trying to parse {} as u64", last_rec_val); let prev_rec_val = previous_record.value.trim(); - debug!("l853 : trying to parse {} as u64", prev_rec_val); + debug!("socket : l1189 : trying to parse {} as u64", prev_rec_val); if let (Ok(last_microjoules), Ok(previous_microjoules)) = (last_rec_val.parse::(), prev_rec_val.parse::()) { @@ -967,14 +1266,14 @@ impl CPUSocket { microjoules = last_microjoules - previous_microjoules; } else { debug!( - "previous_microjoules ({}) > last_microjoules ({})", + "socket: previous_microjoules ({}) > last_microjoules ({})", previous_microjoules, last_microjoules ); } let time_diff = last_record.timestamp.as_secs_f64() - previous_record.timestamp.as_secs_f64(); let microwatts = microjoules as f64 / time_diff; - debug!("l866: microwatts: {}", microwatts); + debug!("socket : l1067: microwatts: {}", microwatts); return Some(Record::new( last_record.timestamp, (microwatts as u64).to_string(), @@ -982,7 +1281,25 @@ impl CPUSocket { )); } } else { - debug!("Not enough records for socket"); + warn!("Not enough records for socket"); + } + None + } + + pub fn get_rapl_mmio_energy_microjoules(&self) -> Option { + if let Some(mmio) = self.sensor_data.get("mmio") { + match &fs::read_to_string(mmio) { + Ok(val) => { + return Some(Record::new( + current_system_time_since_epoch(), + val.to_string(), + units::Unit::MicroJoule, + )); + } + Err(e) => { + debug!("MMIO Error: {:?}", e) + } + } } None } @@ -1042,9 +1359,19 @@ impl RecordGenerator for Domain { /// Computes a measurement of energy comsumption for this CPU domain, /// stores a copy in self.record_buffer and returns it. fn refresh_record(&mut self) { - //if let Ok(record) = self.read_record_uj() { - if let Ok(record) = self.read_record() { - self.record_buffer.push(record); + match self.read_record() { + Ok(record) => { + self.record_buffer.push(record); + } + Err(e) => { + warn!( + "Could'nt read record from {}. Error was : {:?}.", + self.sensor_data + .get("source_file") + .unwrap_or(&String::from("SRCFILENOTKNOWN")), + e + ); + } } if !self.record_buffer.is_empty() { @@ -1132,6 +1459,24 @@ impl Domain { } None } + + pub fn get_rapl_mmio_energy_microjoules(&self) -> Option { + if let Some(mmio) = self.sensor_data.get("mmio") { + match &fs::read_to_string(mmio) { + Ok(val) => { + return Some(Record::new( + current_system_time_since_epoch(), + val.to_string(), + units::Unit::MicroJoule, + )); + } + Err(e) => { + debug!("MMIO Error in get microjoules: {:?}", e); + } + } + } + None + } } impl fmt::Display for Domain { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -1255,20 +1600,20 @@ mod tests { cores[0].attributes.len() ); for c in &cores { - println!("{:?}", c.attributes.get("processor")); + println!("{:?}", c.attributes); } assert_eq!(!cores.is_empty(), true); for c in &cores { - assert_eq!(c.attributes.len() > 5, true); + assert_eq!(c.attributes.len() > 3, true); } } #[test] fn read_topology_stats() { #[cfg(target_os = "linux")] - let mut sensor = powercap_rapl::PowercapRAPLSensor::new(8, 8, false); + let sensor = powercap_rapl::PowercapRAPLSensor::new(8, 8, false); #[cfg(not(target_os = "linux"))] - let mut sensor = msr_rapl::MsrRAPLSensor::new(); + let sensor = msr_rapl::MsrRAPLSensor::new(); let topo = (*sensor.get_topology()).unwrap(); println!("{:?}", topo.read_stats()); } @@ -1276,9 +1621,9 @@ mod tests { #[test] fn read_core_stats() { #[cfg(target_os = "linux")] - let mut sensor = powercap_rapl::PowercapRAPLSensor::new(8, 8, false); + let sensor = powercap_rapl::PowercapRAPLSensor::new(8, 8, false); #[cfg(not(target_os = "linux"))] - let mut sensor = msr_rapl::MsrRAPLSensor::new(); + let sensor = msr_rapl::MsrRAPLSensor::new(); let mut topo = (*sensor.get_topology()).unwrap(); for s in topo.get_sockets() { for c in s.get_cores() { @@ -1290,9 +1635,9 @@ mod tests { #[test] fn read_socket_stats() { #[cfg(target_os = "linux")] - let mut sensor = powercap_rapl::PowercapRAPLSensor::new(8, 8, false); + let sensor = powercap_rapl::PowercapRAPLSensor::new(8, 8, false); #[cfg(not(target_os = "linux"))] - let mut sensor = msr_rapl::MsrRAPLSensor::new(); + let sensor = msr_rapl::MsrRAPLSensor::new(); let mut topo = (*sensor.get_topology()).unwrap(); for s in topo.get_sockets() { println!("{:?}", s.read_stats()); diff --git a/src/sensors/msr_rapl.rs b/src/sensors/msr_rapl.rs index 0ccf1d50..36dd1395 100644 --- a/src/sensors/msr_rapl.rs +++ b/src/sensors/msr_rapl.rs @@ -1,29 +1,39 @@ use crate::sensors::utils::current_system_time_since_epoch; -use crate::sensors::{CPUSocket, Domain, Record, RecordReader, Sensor, Topology}; +use crate::sensors::{CPUCore, CPUSocket, Domain, Record, RecordReader, Sensor, Topology}; +use raw_cpuid::{CpuId, TopologyType}; use std::collections::HashMap; use std::error::Error; use std::mem::size_of; -use sysinfo::{System, SystemExt}; +use sysinfo::{CpuExt, System, SystemExt}; use windows::Win32::Foundation::{CloseHandle, GetLastError, HANDLE, INVALID_HANDLE_VALUE}; use windows::Win32::Storage::FileSystem::{ CreateFileW, FILE_FLAG_OVERLAPPED, FILE_GENERIC_READ, FILE_GENERIC_WRITE, FILE_READ_DATA, FILE_SHARE_READ, FILE_SHARE_WRITE, FILE_WRITE_DATA, OPEN_EXISTING, }; use windows::Win32::System::Ioctl::{FILE_DEVICE_UNKNOWN, METHOD_BUFFERED}; +use windows::Win32::System::SystemInformation::GROUP_AFFINITY; +use windows::Win32::System::Threading::{ + GetActiveProcessorGroupCount, GetCurrentProcess, GetCurrentThread, GetProcessGroupAffinity, + GetThreadGroupAffinity, SetThreadGroupAffinity, +}; use windows::Win32::System::IO::DeviceIoControl; -const MSR_RAPL_POWER_UNIT: u16 = 0x606; // - //const MSR_PKG_POWER_LIMIT: u16 = 0x610; // PKG RAPL Power Limit Control (R/W) See Section 14.7.3, Package RAPL Domain. -const MSR_PKG_ENERGY_STATUS: u16 = 0x611; -//const MSR_PKG_POWER_INFO: u16 = 0x614; -//const MSR_DRAM_ENERGY_STATUS: u16 = 0x619; -//const MSR_PP0_ENERGY_STATUS: u16 = 0x639; //PP0 Energy Status (R/O) See Section 14.7.4, PP0/PP1 RAPL Domains. -//const MSR_PP0_PERF_STATUS: u16 = 0x63b; // PP0 Performance Throttling Status (R/O) See Section 14.7.4, PP0/PP1 RAPL Domains. -//const MSR_PP0_POLICY: u16 = 0x63a; //PP0 Balance Policy (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. -//const MSR_PP0_POWER_LIMIT: u16 = 0x638; // PP0 RAPL Power Limit Control (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. -//const MSR_PP1_ENERGY_STATUS: u16 = 0x641; // PP1 Energy Status (R/O) See Section 14.7.4, PP0/PP1 RAPL Domains. -//const MSR_PP1_POLICY: u16 = 0x642; // PP1 Balance Policy (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. -//const MSR_PP1_POWER_LIMIT: u16 = 0x640; // PP1 RAPL Power Limit Control (R/W) See Section 14.7.4, PP0/PP1 RAPL Domains. +use core_affinity::{self, CoreId}; + +pub use x86::cpuid; +// Intel RAPL MSRs +pub use x86::msr::{ + MSR_DRAM_ENERGY_STATUS, MSR_DRAM_PERF_STATUS, MSR_PKG_ENERGY_STATUS, MSR_PKG_POWER_INFO, + MSR_PKG_POWER_LIMIT, MSR_PP0_ENERGY_STATUS, MSR_PP0_PERF_STATUS, MSR_PP1_ENERGY_STATUS, + MSR_RAPL_POWER_UNIT, +}; +pub const MSR_PLATFORM_ENERGY_STATUS: u32 = 0x0000064d; +pub const MSR_PLATFORM_POWER_LIMIT: u32 = 0x0000065c; + +// AMD RAPL MSRs +pub const MSR_AMD_RAPL_POWER_UNIT: u32 = 0xc0010299; +pub const MSR_AMD_CORE_ENERGY_STATUS: u32 = 0xc001029a; +pub const MSR_AMD_PKG_ENERGY_STATUS: u32 = 0xc001029b; unsafe fn ctl_code(device_type: u32, request_code: u32, method: u32, access: u32) -> u32 { ((device_type) << 16) | ((access) << 14) | ((request_code) << 2) | (method) @@ -159,18 +169,48 @@ impl MsrRAPLSensor { impl RecordReader for Topology { fn read_record(&self) -> Result> { - let randval: i32 = rand::random(); - Ok(Record { - timestamp: current_system_time_since_epoch(), - unit: super::units::Unit::MicroJoule, - value: format!("{}", randval), - }) + let record: Option; + unsafe { + record = self.get_rapl_psys_energy_microjoules(); + } + if let Some(psys_record) = record { + Ok(psys_record) + } else { + let mut res: u128 = 0; + debug!("Topology: I have {} sockets", self.sockets.len()); + for s in &self.sockets { + match s.read_record() { + Ok(rec) => { + debug!("rec: {:?}", rec); + res += rec.value.trim().parse::()?; + } + Err(e) => { + warn!("Failed to get socket record : {:?}", e); + } + } + let dram_filter: Vec<&Domain> = s + .get_domains_passive() + .iter() + .filter(|d| d.name == "dram") + .collect(); + if let Some(dram) = dram_filter.first() { + if let Ok(val) = dram.read_record() { + res += val.value.trim().parse::()?; + } + } + } + Ok(Record { + timestamp: current_system_time_since_epoch(), + unit: super::units::Unit::MicroJoule, + value: res.to_string(), + }) + } } } unsafe fn send_request( device: HANDLE, - request_code: u16, + request_code: u32, request: *const u64, request_length: usize, reply: *mut u64, @@ -180,7 +220,7 @@ unsafe fn send_request( let len_ptr: *mut u32 = &mut len; if DeviceIoControl( - device, // envoi 8 octet et je recoi 8 octet + device, // send 8 bytes, receive 8 bytes crate::sensors::msr_rapl::ctl_code( FILE_DEVICE_UNKNOWN, request_code as _, @@ -205,80 +245,114 @@ unsafe fn send_request( info!("Device answered"); Ok(String::from("Device answered !")) } else { - error!("DeviceIoControl failed"); + info!("DeviceIoControl failed"); Err(String::from("DeviceIoControl failed")) } } impl RecordReader for CPUSocket { fn read_record(&self) -> Result> { unsafe { - let driver_name = self.sensor_data.get("DRIVER_NAME").unwrap(); - if let Ok(device) = get_handle(driver_name) { - let mut msr_result: u64 = 0; - let ptr_result = &mut msr_result as *mut u64; - let mut src = MSR_RAPL_POWER_UNIT as u64; - let ptr = &src as *const u64; - - src = MSR_PKG_ENERGY_STATUS as u64; - trace!("src: {:x}", src); - trace!("src: {:b}", src); - - trace!("*ptr: {}", *ptr); - trace!("&request: {:?} ptr (as *const u8): {:?}", &src, ptr); - - if let Ok(res) = send_request( - device, - MSR_PKG_ENERGY_STATUS, - // nouvelle version à integrer : request_code est ignoré et request doit contenir - // request_code sous forme d'un char * - ptr, - 8, - ptr_result, - size_of::(), - ) { - debug!("{}", res); - - close_handle(device); - - let energy_unit = self - .sensor_data - .get("ENERGY_UNIT") - .unwrap() - .parse::() - .unwrap(); - - Ok(Record { - timestamp: current_system_time_since_epoch(), - unit: super::units::Unit::MicroJoule, - value: MsrRAPLSensor::extract_rapl_current_power(msr_result, energy_unit), - }) + let current_thread = GetCurrentThread(); + let processorgroup_id = self + .sensor_data + .get("PROCESSORGROUP_ID") + .unwrap() + .parse::() + .unwrap(); + let mut thread_group_affinity: GROUP_AFFINITY = GROUP_AFFINITY { + Mask: 255, + Group: processorgroup_id, + Reserved: [0, 0, 0], + }; + let thread_affinity = + GetThreadGroupAffinity(current_thread, &mut thread_group_affinity); + if thread_affinity.as_bool() { + debug!("got thead_affinity : {:?}", thread_group_affinity); + let core_id = self.cpu_cores.last().unwrap().id; //(self.cpu_cores.last().unwrap().id + self.id * self.cpu_cores.len() as u16) as usize + let newaffinity = GROUP_AFFINITY { + Mask: self.cpu_cores.len() + self.id as usize * self.cpu_cores.len() - 1, + Group: processorgroup_id, + Reserved: [0, 0, 0], + }; + let res = SetThreadGroupAffinity( + current_thread, + &newaffinity, + &mut thread_group_affinity, + ); + if res.as_bool() { + debug!( + "Asking get_msr_value, from socket, with core_id={}", + core_id + ); + match get_msr_value( + core_id as usize, + MSR_PKG_ENERGY_STATUS as u64, + &self.sensor_data, + ) { + Ok(rec) => Ok(Record { + timestamp: current_system_time_since_epoch(), + value: rec.value, + unit: super::units::Unit::MicroJoule, + }), + Err(e) => { + error!( + "Could'nt get MSR value for {}: {}", + MSR_PKG_ENERGY_STATUS, e + ); + Ok(Record { + timestamp: current_system_time_since_epoch(), + value: String::from("0"), + unit: super::units::Unit::MicroJoule, + }) + } + } } else { - error!("Failed to get data from send_request."); - close_handle(device); - Ok(Record { - timestamp: current_system_time_since_epoch(), - unit: super::units::Unit::MicroJoule, - value: String::from("0"), - }) + panic!("Couldn't set Thread affinity !"); } + //TODO add DRAM domain to result when available } else { - error!("Couldn't get handle."); - Ok(Record { - timestamp: current_system_time_since_epoch(), - unit: super::units::Unit::MicroJoule, - value: String::from("0"), - }) + panic!("Coudld'nt get Thread affinity !"); } } } } impl RecordReader for Domain { fn read_record(&self) -> Result> { - Ok(Record { - timestamp: current_system_time_since_epoch(), - unit: super::units::Unit::MicroJoule, - value: String::from("10"), - }) + if let Some(core_id) = self.sensor_data.get("CORE_ID") { + let usize_coreid = core_id.parse::().unwrap(); + debug!("Reading Domain {} on Core {}", self.name, usize_coreid); + if let Some(msr_addr) = self.sensor_data.get("MSR_ADDR") { + unsafe { + debug!( + "Asking, from Domain, get_msr_value with core_id={}", + usize_coreid + ); + match get_msr_value( + usize_coreid, + msr_addr.parse::().unwrap(), + &self.sensor_data, + ) { + Ok(rec) => Ok(Record { + timestamp: current_system_time_since_epoch(), + unit: super::units::Unit::MicroJoule, + value: rec.value, + }), + Err(e) => { + error!("Could'nt get MSR value for {}: {}", msr_addr, e); + Ok(Record { + timestamp: current_system_time_since_epoch(), + value: String::from("0"), + unit: super::units::Unit::MicroJoule, + }) + } + } + } + } else { + panic!("Couldn't get msr_addr to target for domain {}", self.name); + } + } else { + panic!("Couldn't get core_id to target for domain {}", self.name); + } } } @@ -293,14 +367,348 @@ impl Sensor for MsrRAPLSensor { let mut topology = Topology::new(sensor_data.clone()); let mut sys = System::new_all(); sys.refresh_all(); - let i = 0; - //TODO fix that to actually count the number of sockets - topology.safe_add_socket(i, vec![], vec![], String::from(""), 4, sensor_data.clone()); + unsafe { + let current_thread = GetCurrentThread(); + + let group_count = GetActiveProcessorGroupCount(); + debug!("GROUP COUNT : {}", group_count); + + for group_id in 0..group_count { + //TODO fix that to actually count the number of sockets + let logical_cpus = sys.cpus(); + let cpuid = CpuId::new(); + let mut logical_cpus_from_cpuid = 1; + match cpuid.get_extended_topology_info() { + Some(info) => { + for t in info { + if t.level_type() == TopologyType::Core { + logical_cpus_from_cpuid = t.processors(); + } + } + } + None => { + panic!("Could'nt get cpuid data."); + } + } + let mut i: u16 = 0; + let mut no_more_sockets = false; + debug!("Entering ProcessorGroup {}", group_id); + let newaffinity = GROUP_AFFINITY { + Mask: 255, + Group: group_id, + Reserved: [0, 0, 0], + }; + let mut thread_group_affinity: GROUP_AFFINITY = GROUP_AFFINITY { + Mask: 255, + Group: 0, + Reserved: [0, 0, 0], + }; + let thread_affinity = + GetThreadGroupAffinity(current_thread, &mut thread_group_affinity); + debug!("Thread group affinity result : {:?}", thread_affinity); + if thread_affinity.as_bool() { + debug!("got thead_affinity : {:?}", thread_group_affinity); + let res = SetThreadGroupAffinity( + current_thread, + &newaffinity, + &mut thread_group_affinity, + ); + if res.as_bool() { + debug!("Have set thread affinity: {:?}", newaffinity); + match core_affinity::get_core_ids() { + Some(core_ids) => { + debug!( + "CPU SETUP - Cores from core_affinity, len={} : {:?}", + core_ids.len(), + core_ids + ); + debug!( + "CPU SETUP - Logical CPUs from sysinfo: {}", + logical_cpus.len() + ); + while !no_more_sockets { + let start = i * logical_cpus_from_cpuid; + let stop = (i + 1) * logical_cpus_from_cpuid; + debug!("Looping over {} .. {}", start, stop); + sensor_data.insert( + String::from("PROCESSORGROUP_ID"), + group_id.to_string(), + ); + let mut current_socket = CPUSocket::new( + i, + vec![], + vec![], + String::from(""), + 1, + sensor_data.clone(), + ); + for c in start..stop { + //core_ids { + if core_affinity::set_for_current(CoreId { id: c.into() }) { + match cpuid.get_vendor_info() { + Some(info) => { + debug!("Got CPU {:?}", info); + } + None => { + warn!("Couldn't get cpuinfo"); + } + } + debug!("Set core_affinity to {}", c); + match cpuid.get_extended_topology_info() { + Some(info) => { + debug!("Got CPU topo info {:?}", info); + for t in info { + if t.level_type() == TopologyType::Core { + //logical_cpus_from_cpuid = t.processors() + let x2apic_id = t.x2apic_id(); + let socket_id = (x2apic_id & 240) >> 4; // upper bits of x2apic_id are socket_id, mask them, then bit shift to get socket_id + current_socket.set_id(socket_id as u16); + let core_id = x2apic_id & 15; // 4 last bits of x2apic_id are the core_id (per-socket) + debug!( + "Found socketid={} and coreid={}", + socket_id, core_id + ); + let mut attributes = + HashMap::::new(); + let ref_core = + logical_cpus.first().unwrap(); + attributes.insert( + String::from("frequency"), + ref_core.frequency().to_string(), + ); + attributes.insert( + String::from("name"), + ref_core.name().to_string(), + ); + attributes.insert( + String::from("vendor_id"), + ref_core.vendor_id().to_string(), + ); + attributes.insert( + String::from("brand"), + ref_core.brand().to_string(), + ); + debug!( + "Adding core id {} to socket_id {}", + ((i * (logical_cpus_from_cpuid + - 1)) + + core_id as u16), + current_socket.id + ); + current_socket.add_cpu_core( + CPUCore::new( + (i * (logical_cpus_from_cpuid + - 1)) + + core_id as u16, + attributes, + ), + ); + debug!( + "Reviewing sockets : {:?}", + topology.get_sockets_passive() + ); + } + } + } + None => { + warn!("Couldn't get cpu topo info"); + } + } + } else { + no_more_sockets = true; + debug!( + "There's likely to be no more socket to explore." + ); + break; + } + } + if !no_more_sockets { + debug!("inserting socket {:?}", current_socket); + topology.safe_insert_socket(current_socket); + i += 1; + } + } + } + None => { + panic!("Could'nt get core ids from core_affinity."); + } + } + if let Some(info) = CpuId::new().get_extended_topology_info() { + for c in info { + if c.level_type() == TopologyType::Core { + debug!("CPUID : {:?}", c); + } + } + } + } else { + error!("Could'nt set thread affinity !"); + let last_error = GetLastError(); + panic!("Error was : {:?}", last_error); + } + } else { + error!("Getting thread group affinity failed !"); + let last_error = GetLastError(); + panic!("Error was: {:?}", last_error); // win32 error 122 is insufficient buffer + } + } + //let process_information = GetProcessInformation(current_process, , , ); + } + //let mut core_id_counter = logical_cpus.len(); + + //match cpuid.get_advanced_power_mgmt_info() { + // Some(info) => { + // warn!("Got CPU power mgmt info {:?}", info); + // }, + // None => { + // warn!("Couldn't get cpu power info"); + // } + //} + //match cpuid.get_extended_feature_info() { + // Some(info) => { + // warn!("Got CPU feature info {:?}", info); + // }, + // None => { + // warn!("Couldn't get cpu feature info"); + // } + //} + //match cpuid.get_performance_monitoring_info() { + // Some(info) => { + // warn!("Got CPU perfmonitoring info {:?}", info); + // }, + // None => { + // warn!("Couldn't get cpu perfmonitoring info"); + // } + //} + //match cpuid.get_thermal_power_info() { + // Some(info) => { + // warn!("Got CPU thermal info {:?}", info); + // }, + // None => { + // warn!("Couldn't get cpu thermal info"); + // } + //} + //match cpuid.get_extended_state_info() { + // Some(info) => { + // warn!("Got CPU state info {:?}", info); + // }, + // None => { + // warn!("Couldn't get cpu state info"); + // } + //} + //match cpuid.get_processor_capacity_feature_info() { + // Some(info) => { + // warn!("Got CPU capacity info {:?}", info); + // }, + // None => { + // warn!("Couldn't get cpu capacity info"); + // } + //} + + //topology.add_cpu_cores(); + let mut domains = vec![]; + for s in topology.get_sockets() { + debug!("Inspecting CPUSocket: {:?}", s); + unsafe { + let core_id = + s.get_cores_passive().last().unwrap().id + s.id * s.cpu_cores.len() as u16; + debug!( + "Asking get_msr_value, from generate_tpopo, with core_id={}", + core_id + ); + match get_msr_value( + core_id as usize, + MSR_DRAM_ENERGY_STATUS as u64, + &sensor_data, + ) { + Ok(_rec) => { + debug!("Adding domain Dram !"); + let mut domain_sensor_data = sensor_data.clone(); + domain_sensor_data + .insert(String::from("MSR_ADDR"), MSR_DRAM_ENERGY_STATUS.to_string()); + domain_sensor_data.insert(String::from("CORE_ID"), core_id.to_string()); // nb of cores in a socket * socket_id + local_core_id + domains.push(String::from("dram")); + s.safe_add_domain(Domain::new( + 2, + String::from("dram"), + String::from(""), + 5, + domain_sensor_data, + )) + } + Err(e) => { + warn!("Could'nt add Dram domain: {}", e); + } + } + match get_msr_value(core_id as usize, MSR_PP0_ENERGY_STATUS as u64, &sensor_data) { + Ok(_rec) => { + debug!("Adding domain Core !"); + let mut domain_sensor_data = sensor_data.clone(); + domain_sensor_data + .insert(String::from("MSR_ADDR"), MSR_PP0_ENERGY_STATUS.to_string()); + domain_sensor_data.insert(String::from("CORE_ID"), core_id.to_string()); + domains.push(String::from("core")); + s.safe_add_domain(Domain::new( + 2, + String::from("core"), + String::from(""), + 5, + domain_sensor_data, + )) + } + Err(e) => { + warn!("Could'nt add Core domain: {}", e); + } + } + match get_msr_value(core_id as usize, MSR_PP1_ENERGY_STATUS as u64, &sensor_data) { + Ok(_rec) => { + debug!("Adding domain Uncore !"); + let mut domain_sensor_data = sensor_data.clone(); + domain_sensor_data + .insert(String::from("MSR_ADDR"), MSR_PP1_ENERGY_STATUS.to_string()); + domain_sensor_data.insert(String::from("CORE_ID"), core_id.to_string()); + domains.push(String::from("uncore")); + s.safe_add_domain(Domain::new( + 2, + String::from("uncore"), + String::from(""), + 5, + domain_sensor_data, + )) + } + Err(e) => { + warn!("Could'nt add Uncore domain: {}", e); + } + } + //match get_msr_value(core_id as usize, MSR_PLATFORM_ENERGY_STATUS as u64, &sensor_data) { + // Ok(rec) => { + // }, + // Err(e) => { + // error!("Could'nt find Platform/PSYS domain."); + // } + //} + } + } + + unsafe { + match get_msr_value(0, MSR_PLATFORM_ENERGY_STATUS as u64, &sensor_data) { + Ok(_rec) => { + debug!("Adding domain Platform / PSYS !"); + topology + ._sensor_data + .insert(String::from("psys"), String::from("")); + } + Err(e) => { + warn!("Could'nt add Uncore domain: {}", e); + } + } + } + + topology.set_domains_names(domains); Ok(topology) } - fn get_topology(&mut self) -> Box> { + fn get_topology(&self) -> Box> { let topology = self.generate_topology().ok(); if topology.is_none() { panic!("Couldn't generate the topology !"); @@ -308,3 +716,105 @@ impl Sensor for MsrRAPLSensor { Box::new(topology) } } + +/// # Safety +/// +/// This function should is unsafe rust as it uses send_request, hence calls a DeviceIO Windows driver. +/// The safety burden actuallr resides in the DeviceIO driver that is called. Please refer to the documentation to +/// get the relationship between Scaphandre and its driver for Windows. The driver should exit smoothly if a wrong +/// MSR address is called, then this function should throw an Error. Any improper issue with the operating system would mean +/// there is an issue in the driver used behind the scene, or the way it is configured. +pub unsafe fn get_msr_value( + core_id: usize, + msr_addr: u64, + sensor_data: &HashMap, +) -> Result { + let current_process = GetCurrentProcess(); + let current_thread = GetCurrentThread(); + let mut thread_group_affinity = GROUP_AFFINITY { + Mask: 255, + Group: 9, + Reserved: [0, 0, 0], + }; + let thread_affinity_res = GetThreadGroupAffinity(current_thread, &mut thread_group_affinity); + if thread_affinity_res.as_bool() { + debug!("Thread affinity found : {:?}", thread_group_affinity); + } else { + error!("Could'nt get thread group affinity"); + } + let mut process_group_array: [u16; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; + let mut process_group_array_len = 8; + let process_affinity_res = GetProcessGroupAffinity( + current_process, + &mut process_group_array_len, + process_group_array.as_mut_ptr(), + ); + if process_affinity_res.as_bool() { + debug!("Process affinity found: {:?}", process_group_array); + } else { + error!("Could'nt get process group affinity"); + error!("Error was : {:?}", GetLastError()); + } + debug!("Core ID requested to the driver : {}", core_id); + match sensor_data.get("DRIVER_NAME") { + Some(driver) => { + match get_handle(driver) { + Ok(device) => { + let mut msr_result: u64 = 0; + let ptr_result = &mut msr_result as *mut u64; + debug!("msr_addr: {:b}", msr_addr); + debug!("core_id: {:x} {:b}", (core_id as u64), (core_id as u64)); + debug!("core_id: {:b}", ((core_id as u64) << 32)); + let src = ((core_id as u64) << 32) | msr_addr; //let src = ((core_id as u64) << 32) | msr_addr; + let ptr = &src as *const u64; + + debug!("src: {:x}", src); + debug!("src: {:b}", src); + debug!("*ptr: {:b}", *ptr); + //warn!("*ptr: {}", *ptr); + //warn!("*ptr: {:b}", *ptr); + + match send_request( + device, + MSR_PKG_ENERGY_STATUS, + ptr, + 8, + ptr_result, + size_of::(), + ) { + Ok(_res) => { + close_handle(device); + + let energy_unit = sensor_data + .get("ENERGY_UNIT") + .unwrap() + .parse::() + .unwrap(); + let current_value = + MsrRAPLSensor::extract_rapl_current_power(msr_result, energy_unit); + debug!("current_value: {}", current_value); + + Ok(Record { + timestamp: current_system_time_since_epoch(), + unit: super::units::Unit::MicroJoule, + value: current_value, + }) + } + Err(e) => { + info!("Failed to get data from send_request: {:?}", e); + close_handle(device); + Err(format!("Failed to get data from send_request: {:?}", e)) + } + } + } + Err(e) => { + error!("Couldn't get driver handle : {:?}", e); + Err(format!("Couldn't get driver handle : {:?}", e)) + } + } + } + None => { + panic!("DRIVER_NAME not set."); + } + } +} diff --git a/src/sensors/powercap_rapl.rs b/src/sensors/powercap_rapl.rs index 4cab2836..2d56700a 100644 --- a/src/sensors/powercap_rapl.rs +++ b/src/sensors/powercap_rapl.rs @@ -7,6 +7,11 @@ use std::collections::HashMap; use std::error::Error; use std::{env, fs}; +use super::units::Unit; + +pub const DEFAULT_BUFFER_PER_SOCKET_MAX_KBYTES: u16 = 1; +pub const DEFAULT_BUFFER_PER_DOMAIN_MAX_KBYTES: u16 = 1; + /// This is a Sensor type that relies on powercap and rapl linux modules /// to collect energy consumption from CPU sockets and RAPL domains pub struct PowercapRAPLSensor { @@ -67,11 +72,47 @@ impl PowercapRAPLSensor { impl RecordReader for Topology { fn read_record(&self) -> Result> { - Ok(Record { - timestamp: current_system_time_since_epoch(), - value: String::from("5"), - unit: MicroJoule, - }) + // if psys is available, return psys + // else return pkg + dram + F(disks) + + if let Some(psys_record) = self.get_rapl_psys_energy_microjoules() { + debug!("Using PSYS metric"); + Ok(psys_record) + } else { + let mut total: i128 = 0; + debug!("Suming socket PKG and DRAM metrics to get host metric"); + for s in &self.sockets { + if let Ok(r) = s.read_record() { + match r.value.trim().parse::() { + Ok(val) => { + total += val; + } + Err(e) => { + warn!("could'nt convert {} to i128: {}", r.value.trim(), e); + } + } + } + for d in &s.domains { + if d.name == "dram" { + if let Ok(dr) = d.read_record() { + match dr.value.trim().parse::() { + Ok(val) => { + total += val; + } + Err(e) => { + warn!("could'nt convert {} to i128: {}", dr.value.trim(), e); + } + } + } + } + } + } + Ok(Record::new( + current_system_time_since_epoch(), + total.to_string(), + Unit::MicroJoule, + )) + } } } impl RecordReader for CPUSocket { @@ -108,12 +149,15 @@ impl Sensor for PowercapRAPLSensor { if modules_state.is_err() && !self.virtual_machine { warn!("Couldn't find intel_rapl modules."); } - let mut topo = Topology::new(); + let mut topo = Topology::new(HashMap::new()); let re_socket = Regex::new(r"^.*/intel-rapl:\d+$").unwrap(); let re_domain = Regex::new(r"^.*/intel-rapl:\d+:\d+$").unwrap(); + let re_socket_mmio = Regex::new(r"^.*/intel-rapl-mmio:\d+$").unwrap(); + let re_domain_mmio = Regex::new(r"^.*/intel-rapl-mmio:\d+:\d+$").unwrap(); let mut re_domain_matched = false; for folder in fs::read_dir(&self.base_path).unwrap() { let folder_name = String::from(folder.unwrap().path().to_str().unwrap()); + info!("working on {folder_name}"); // let's catch domain folders if re_domain.is_match(&folder_name) { re_domain_matched = true; @@ -156,30 +200,86 @@ impl Sensor for PowercapRAPLSensor { sensor_data_for_domain, ); } + } else if re_socket_mmio.is_match(&folder_name) { + info!("matched {folder_name}"); + let mut splitted = folder_name.split(':'); + let _ = splitted.next(); + let socket_id: u16 = String::from(splitted.next().unwrap()).parse().unwrap(); + for s in topo.get_sockets() { + if socket_id == s.id { + s.sensor_data.insert( + String::from("mmio"), + format!("{}/intel-rapl-mmio:{}/energy_uj", self.base_path, socket_id), + ); + } + } + } else if re_domain_mmio.is_match(&folder_name) { + debug!("matched {folder_name}"); + let mut splitted = folder_name.split(':'); + let _ = splitted.next(); + let socket_id: u16 = String::from(splitted.next().unwrap()).parse().unwrap(); + for s in topo.get_sockets() { + if socket_id == s.id { + let mmio_file = format!("{}/energy_uj", folder_name); + for d in s.get_domains() { + let name_in_folder = + fs::read_to_string(format!("{folder_name}/name")).unwrap(); + // domain id doesn't match between regular and mmio folders, the name is coherent however (dram) + if d.name.trim() == name_in_folder.trim() { + d.sensor_data + .insert(String::from("mmio"), mmio_file.clone()); + } + } + } + } } } if !re_domain_matched { warn!("Couldn't find domain folders from powercap. Fallback on socket folders."); warn!("Scaphandre will not be able to provide per-domain data."); + let mut found = false; for folder in fs::read_dir(&self.base_path).unwrap() { let folder_name = String::from(folder.unwrap().path().to_str().unwrap()); - if re_socket.is_match(&folder_name) { - let mut splitted = folder_name.split(':'); - let _ = splitted.next(); - let socket_id = String::from(splitted.next().unwrap()).parse().unwrap(); - let mut sensor_data_for_socket = HashMap::new(); - sensor_data_for_socket.insert( - String::from("source_file"), - format!("{}/intel-rapl:{}/energy_uj", self.base_path, socket_id), - ); - topo.safe_add_socket( - socket_id, - vec![], - vec![], - format!("{}/intel-rapl:{}/energy_uj", self.base_path, socket_id), - self.buffer_per_socket_max_kbytes, - sensor_data_for_socket, - ) + if let Ok(domain_name) = &fs::read_to_string(format!("{folder_name}/name")) { + if domain_name != "psys" && re_socket.is_match(&folder_name) { + let mut splitted = folder_name.split(':'); + let _ = splitted.next(); + let socket_id = String::from(splitted.next().unwrap()).parse().unwrap(); + let mut sensor_data_for_socket = HashMap::new(); + sensor_data_for_socket.insert( + String::from("source_file"), + format!("{}/intel-rapl:{}/energy_uj", self.base_path, socket_id), + ); + topo.safe_add_socket( + socket_id, + vec![], + vec![], + format!("{}/intel-rapl:{}/energy_uj", self.base_path, socket_id), + self.buffer_per_socket_max_kbytes, + sensor_data_for_socket, + ); + found = true; + } + } else { + warn!("Couldn't read RAPL folder name : {folder_name}"); + } + } + if !found { + warn!("Could'nt find any RAPL PKG domain (nor psys)."); + } + } + for folder in fs::read_dir(&self.base_path).unwrap() { + let folder_name = String::from(folder.unwrap().path().to_str().unwrap()); + match &fs::read_to_string(format!("{folder_name}/name")) { + Ok(domain_name) => { + let domain_name_trimed = domain_name.trim(); + if domain_name_trimed == "psys" { + debug!("Found PSYS domain RAPL folder."); + topo._sensor_data.insert(String::from("psys"), folder_name); + } + } + Err(e) => { + debug!("Got error while reading {folder_name}: {e}"); } } } @@ -188,7 +288,7 @@ impl Sensor for PowercapRAPLSensor { } /// Instanciates Topology object if not existing and returns it - fn get_topology(&mut self) -> Box> { + fn get_topology(&self) -> Box> { let topology = self.generate_topology().ok(); if topology.is_none() { panic!("Couldn't generate the topology !"); @@ -207,7 +307,7 @@ mod tests { } #[test] fn get_topology_returns_topology_type() { - let mut sensor = PowercapRAPLSensor::new(1, 1, false); + let sensor = PowercapRAPLSensor::new(1, 1, false); let topology = sensor.get_topology(); assert_eq!( "alloc::boxed::Box>", diff --git a/src/sensors/units.rs b/src/sensors/units.rs index e0274f35..a2eff801 100644 --- a/src/sensors/units.rs +++ b/src/sensors/units.rs @@ -3,6 +3,7 @@ use std::{cmp::Ordering, fmt}; // !!!!!!!!!!!!!!!!! Unit !!!!!!!!!!!!!!!!!!!!!!! #[derive(Debug)] pub enum Unit { + Numeric, Joule, MilliJoule, MicroJoule, @@ -12,6 +13,11 @@ pub enum Unit { MilliWatt, MicroWatt, Percentage, + Bytes, + KiloBytes, + MegaBytes, + GigaBytes, + MegaHertz, } impl Unit { @@ -35,7 +41,7 @@ impl Unit { } else if let (Some(pos_source), Some(pos_dest)) = (pos_source_power, pos_dest_power) { Ok(measure * Unit::get_mult(pos_source, pos_dest)) } else { - panic!("Impossible conversion asked from energy value to power value (without time dimension)."); + panic!("Unimplemented or impossible conversion (if asked from energy value to power value without time dimension)."); } } @@ -63,6 +69,12 @@ impl fmt::Display for Unit { Unit::KiloWatt => write!(f, "KiloWatts"), Unit::MegaWatt => write!(f, "MegaWatts"), Unit::Percentage => write!(f, "Percentage"), + Unit::Bytes => write!(f, "Bytes"), + Unit::KiloBytes => write!(f, "KiloBytes"), + Unit::MegaBytes => write!(f, "MegaBytes"), + Unit::GigaBytes => write!(f, "GigaBytes"), + Unit::MegaHertz => write!(f, "MegaHertz"), + Unit::Numeric => write!(f, ""), } } } diff --git a/src/sensors/utils.rs b/src/sensors/utils.rs index 6748c3ba..2ba070ab 100644 --- a/src/sensors/utils.rs +++ b/src/sensors/utils.rs @@ -1,14 +1,16 @@ +use ordered_float::*; #[cfg(target_os = "linux")] -use procfs::{self, process::Process}; +use procfs; use regex::Regex; -#[cfg(feature = "containers")] +#[allow(unused_imports)] use std::collections::HashMap; -#[cfg(target_os = "windows")] -use sysinfo::{get_current_pid, Process, ProcessExt, ProcessorExt, System, SystemExt}; -//use std::error::Error; -use ordered_float::*; +use std::io::{Error, ErrorKind}; use std::path::PathBuf; use std::time::{Duration, SystemTime}; +use sysinfo::{ + get_current_pid, CpuExt, CpuRefreshKind, Pid, Process, ProcessExt, ProcessStatus, System, + SystemExt, +}; #[cfg(all(target_os = "linux", feature = "containers"))] use {docker_sync::container::Container, k8s_sync::Pod}; @@ -33,336 +35,162 @@ pub struct IStat { pub tty_nr: i32, pub tpgid: i32, pub flags: u32, - //pub minflt: u64, - //pub cminflt: u64, - //pub majflt: u64, - //pub cmajflt: u64, pub utime: u64, pub stime: u64, pub cutime: i64, pub cstime: i64, - //pub priority: i64, pub nice: i64, pub num_threads: i64, pub itrealvalue: i64, pub starttime: u64, pub vsize: u64, - //pub rss: i64, - //pub rsslim: u64, - //pub startcode: u64, - //pub endcode: u64, - //pub startstack: u64, - //pub kstkesp: u64, - //pub kstkeip: u64, pub signal: u64, pub blocked: u64, - //pub sigignore: u64, - //pub sigcatch: u64, - //pub wchan: u64, - //pub nswap: u64, - //pub cnswap: u64, pub exit_signal: Option, pub processor: Option, - //pub rt_priority: Option, - //pub policy: Option, pub delayacct_blkio_ticks: Option, pub guest_time: Option, pub cguest_time: Option, pub start_data: Option, pub end_data: Option, - //pub start_brk: Option, - //pub arg_start: Option, - //pub arg_end: Option, - //pub env_start: Option, - //pub env_end: Option, pub exit_code: Option, } -impl IStat { - #[cfg(target_os = "linux")] - fn from_procfs_stat(stat: &procfs::process::Stat) -> IStat { - IStat { - blocked: stat.blocked, - cguest_time: stat.cguest_time, - comm: stat.comm.clone(), - cstime: stat.cstime, - cutime: stat.cutime, - delayacct_blkio_ticks: stat.delayacct_blkio_ticks, - end_data: stat.end_data, - exit_code: stat.exit_code, - exit_signal: stat.exit_signal, - flags: stat.flags, - guest_time: stat.guest_time, - itrealvalue: stat.itrealvalue, - nice: stat.nice, - num_threads: stat.num_threads, - pgrp: stat.pgrp, - pid: stat.pid, - ppid: stat.ppid, - processor: stat.processor, - session: stat.session, - signal: stat.signal, - start_data: stat.start_data, - starttime: stat.starttime, - state: stat.state, - stime: stat.stime, - tpgid: stat.tpgid, - tty_nr: stat.tty_nr, - utime: stat.utime, - vsize: stat.vsize, - } - } - - #[cfg(target_os = "windows")] - fn from_windows_process_stat(_process: &Process) -> IStat { - IStat { - blocked: 0, - cguest_time: Some(0), - comm: String::from("Not implemented yet !"), - cstime: 0, - cutime: 0, - delayacct_blkio_ticks: Some(0), - end_data: Some(0), - exit_code: Some(0), - exit_signal: Some(0), - flags: 0, - guest_time: Some(0), - itrealvalue: 0, - nice: 0, - num_threads: 0, - pgrp: 0, - pid: 0, - ppid: 0, - processor: Some(0), - session: 0, - signal: 0, - start_data: Some(0), - starttime: 0, - state: 'X', - stime: 0, - tpgid: 0, - tty_nr: 0, - utime: 0, - vsize: 0, - } - } -} - #[derive(Clone)] pub struct IStatus { pub name: String, pub umask: Option, pub state: String, - //pub tgid: i32, - //pub ngid: Option, pub pid: i32, pub ppid: i32, - //pub tracerpid: i32, - //pub ruid: u32, - //pub euid: u32, - //pub suid: u32, - //pub fuid: u32, - //pub rgid: u32, - //pub egid: u32, - //pub sgid: u32, - //pub fgid: u32, - //pub fdsize: u32, - //pub groups: Vec, - //pub nstgid: Option>, - //pub nspid: Option>, - //pub nspgid: Option>, - //pub nssid: Option>, - //pub vmpeak: Option, - //pub vmsize: Option, - //pub vmlck: Option, - //pub vmpin: Option, - //pub vmhwm: Option, - //pub vmrss: Option, - //pub rssanon: Option, - //pub rssfile: Option, - //pub rssshmem: Option, - //pub vmdata: Option, - //pub vmstk: Option, - //pub vmexe: Option, - //pub vmlib: Option, - //pub vmpte: Option, - //pub vmswap: Option, - //pub hugetlbpages: Option, - //pub threads: u64, - //pub sigq: (u64, u64), - //pub sigpnd: u64, - //pub shdpnd: u64, - //pub sigblk: u64, - //pub sigign: u64, - //pub sigcgt: u64, - //pub capinh: u64, - //pub capprm: u64, - //pub capeff: u64, - //pub capbnd: Option, - //pub capamb: Option, - //pub nonewprivs: Option, - //pub seccomp: Option, - //pub speculation_store_bypass: Option, - //pub cpus_allowed: Option>, - //pub cpus_allowed_list: Option>, - //pub mems_allowed: Option>, - //pub mems_allowed_list: Option>, - //pub voluntary_ctxt_switches: Option, - //pub nonvoluntary_ctxt_switches: Option, - //pub core_dumping: Option, - //pub thp_enabled: Option, } #[derive(Debug, Clone)] pub struct IProcess { - pub pid: i32, + pub pid: Pid, pub owner: u32, pub comm: String, pub cmdline: Vec, - pub stat: Option, - //pub root: Option, + //CPU (all of them) time usage, as a percentage + pub cpu_usage_percentage: f32, + // Virtual memory used by the process (at the time the struct is created), in bytes + pub virtual_memory: u64, + // Memory consumed by the process (at the time the struct is created), in bytes + pub memory: u64, + // Disk bytes read by the process + pub disk_read: u64, + // Disk bytes written by the process + pub disk_written: u64, + // Total disk bytes read by the process + pub total_disk_read: u64, + // Total disk bytes written by the process + pub total_disk_written: u64, + #[cfg(target_os = "linux")] + pub stime: u64, #[cfg(target_os = "linux")] - pub original: Process, + pub utime: u64, } impl IProcess { - #[cfg(target_os = "linux")] - pub fn from_linux_process(process: &Process) -> IProcess { - //let root = process.root(); - let mut cmdline = vec![String::from("")]; - if let Ok(raw_cmdline) = process.cmdline() { - cmdline = raw_cmdline; - } - IProcess { - pid: process.pid, - owner: process.owner, - original: process.clone(), - comm: process.stat.comm.clone(), - cmdline, - stat: Some(IStat::from_procfs_stat(&process.stat)), + pub fn new(process: &Process) -> IProcess { + let disk_usage = process.disk_usage(); + #[cfg(target_os = "linux")] + { + let mut stime = 0; + let mut utime = 0; + if let Ok(procfs_process) = + procfs::process::Process::new(process.pid().to_string().parse::().unwrap()) + { + if let Ok(stat) = procfs_process.stat() { + stime += stat.stime; + utime += stat.utime; + } + } + IProcess { + pid: process.pid(), + owner: 0, + comm: String::from(process.exe().to_str().unwrap()), + cmdline: process.cmd().to_vec(), + cpu_usage_percentage: process.cpu_usage(), + memory: process.memory(), + virtual_memory: process.virtual_memory(), + disk_read: disk_usage.read_bytes, + disk_written: disk_usage.written_bytes, + total_disk_read: disk_usage.total_read_bytes, + total_disk_written: disk_usage.total_written_bytes, + stime, + utime, + } } - } - - #[cfg(target_os = "windows")] - pub fn from_windows_process(process: &Process) -> IProcess { - IProcess { - pid: process.pid() as i32, - owner: 0, - comm: String::from(process.exe().to_str().unwrap()), - cmdline: process.cmd().to_vec(), - stat: Some(IStat::from_windows_process_stat(process)), + #[cfg(not(target_os = "linux"))] + { + IProcess { + pid: process.pid(), + owner: 0, + comm: String::from(process.exe().to_str().unwrap()), + cmdline: process.cmd().to_vec(), + cpu_usage_percentage: process.cpu_usage(), + memory: process.memory(), + virtual_memory: process.virtual_memory(), + disk_read: disk_usage.read_bytes, + disk_written: disk_usage.written_bytes, + total_disk_read: disk_usage.total_read_bytes, + total_disk_written: disk_usage.total_written_bytes, + } } } - #[cfg(target_os = "linux")] - pub fn cmdline(&self) -> Result, String> { - if let Ok(cmdline) = self.original.cmdline() { - Ok(cmdline) - } else { - Err(String::from("cmdline() was none")) - } - } - #[cfg(target_os = "windows")] - pub fn cmdline(&self, proc_tracker: &ProcessTracker) -> Result, String> { - if let Some(p) = proc_tracker.sysinfo.process(self.pid as usize) { + /// Returns the command line of related to the process, as found by sysinfo. + pub fn cmdline(&self, proc_tracker: &ProcessTracker) -> Result, Error> { + if let Some(p) = proc_tracker.sysinfo.process(self.pid) { Ok(p.cmd().to_vec()) } else { - Err(String::from("Failed to get original process.")) - } - } - - pub fn statm(&self) -> Result { - #[cfg(target_os = "linux")] - { - let mystatm = self.original.statm().unwrap(); - Ok(IStatM { - size: mystatm.size, - data: mystatm.data, - dt: mystatm.dt, - lib: mystatm.lib, - resident: mystatm.resident, - shared: mystatm.shared, - text: mystatm.text, - }) + Err(Error::new( + ErrorKind::Other, + "Failed to get original process.", + )) } - #[cfg(target_os = "windows")] - Ok(IStatM { - size: 42, - data: 42, - dt: 42, - lib: 42, - resident: 42, - shared: 42, - text: 42, - }) } - #[cfg(target_os = "linux")] - pub fn exe(&self) -> Result { - let original_exe = self.original.exe().unwrap(); - Ok(original_exe) - } - #[cfg(target_os = "windows")] + /// Returns the executable string related to the process pub fn exe(&self, proc_tracker: &ProcessTracker) -> Result { - if let Some(p) = proc_tracker.sysinfo.process(self.pid as usize) { + if let Some(p) = proc_tracker.sysinfo.process(self.pid) { Ok(PathBuf::from(p.exe().to_str().unwrap())) } else { Err(String::from("Couldn't get process.")) } } - pub fn status(&self) -> Result { - #[cfg(target_os = "linux")] - { - if let Ok(original_status) = self.original.status() { - let status = IStatus { - name: original_status.name, - pid: original_status.pid, - ppid: original_status.ppid, - state: original_status.state, - umask: original_status.umask, - }; - Ok(status) - } else { - Err(format!("Couldn't get status for {}", self.pid)) - } - } - #[cfg(target_os = "windows")] - { - Ok(IStatus { - name: String::from("Not implemented yet !"), - pid: 42, - ppid: 42, - state: String::from("X"), - umask: None, - }) + #[cfg(target_os = "linux")] + pub fn total_time_jiffies(&self, proc_tracker: &ProcessTracker) -> u64 { + if let Some(rec) = proc_tracker.get_process_last_record(self.pid) { + return rec.process.stime + rec.process.utime; } + 0 } - #[cfg(target_os = "linux")] - pub fn myself() -> Result { - Ok(IProcess::from_linux_process(&Process::myself().unwrap())) - } - #[cfg(target_os = "windows")] pub fn myself(proc_tracker: &ProcessTracker) -> Result { - Ok(IProcess::from_windows_process( + Ok(IProcess::new( proc_tracker .sysinfo - .process(get_current_pid().unwrap() as usize) + .process(get_current_pid().unwrap()) .unwrap(), )) } + + #[cfg(target_os = "linux")] + pub fn cgroups() {} } -pub fn page_size() -> Result { +pub fn page_size() -> Result { let res; #[cfg(target_os = "linux")] { - res = Ok(procfs::page_size().unwrap()) + res = Ok(procfs::page_size()) } #[cfg(target_os = "windows")] { - res = Ok(4096) + res = Ok(4096u64) } res } @@ -377,7 +205,7 @@ pub struct ProcessTracker { /// Maximum number of ProcessRecord instances that scaphandre is allowed to /// store, per PID (thus, for each subvector). pub max_records_per_process: u16, - #[cfg(target_os = "windows")] + /// Sysinfo system for resources monitoring pub sysinfo: System, #[cfg(feature = "containers")] pub regex_cgroup_docker: Regex, @@ -392,7 +220,6 @@ impl Clone for ProcessTracker { ProcessTracker { procs: self.procs.clone(), max_records_per_process: self.max_records_per_process, - #[cfg(target_os = "windows")] sysinfo: System::new_all(), #[cfg(feature = "containers")] regex_cgroup_docker: self.regex_cgroup_docker.clone(), @@ -423,22 +250,39 @@ impl ProcessTracker { #[cfg(feature = "containers")] let regex_cgroup_containerd = Regex::new("/system.slice/containerd.service/.*$").unwrap(); + let mut system = System::new_all(); + system.refresh_cpu_specifics(CpuRefreshKind::everything()); + let nb_cores = system.cpus().len(); + ProcessTracker { procs: vec![], max_records_per_process, - #[cfg(target_os = "windows")] - sysinfo: System::new_all(), + sysinfo: system, #[cfg(feature = "containers")] regex_cgroup_docker, #[cfg(feature = "containers")] regex_cgroup_kubernetes, #[cfg(feature = "containers")] regex_cgroup_containerd, - #[cfg(target_os = "windows")] - nb_cores: System::new_all().processors().len(), - #[cfg(target_os = "linux")] - nb_cores: 0, // TODO implement + nb_cores, + } + } + + pub fn refresh(&mut self) { + self.sysinfo.refresh_components(); + self.sysinfo.refresh_memory(); + self.sysinfo.refresh_disks(); + self.sysinfo.refresh_disks_list(); + self.sysinfo + .refresh_cpu_specifics(CpuRefreshKind::everything()); + } + + pub fn components(&mut self) -> Vec { + let mut res = vec![]; + for c in self.sysinfo.components() { + res.push(format!("{c:?}")); } + res } /// Properly creates and adds a ProcessRecord to 'procs', the vector of vectors or ProcessRecords @@ -446,14 +290,26 @@ impl ProcessTracker { /// states during all the lifecycle of the exporter. /// # Linux Example: /// ``` - /// use procfs::process::Process; /// use scaphandre::sensors::utils::{ProcessTracker, IProcess}; - /// let mut tracker = ProcessTracker::new(5); - /// let pid = 1; - /// if let Ok(result) = tracker.add_process_record( - /// IProcess::from_linux_process(&Process::new(pid).unwrap()) - /// ){ - /// println!("ProcessRecord stored successfully: {}", result); + /// use scaphandre::sensors::Topology; + /// use std::collections::HashMap; + /// use sysinfo::SystemExt; + /// let mut pt = ProcessTracker::new(5); + /// pt.sysinfo.refresh_processes(); + /// pt.sysinfo.refresh_cpu(); + /// let current_procs = pt + /// .sysinfo + /// .processes() + /// .values() + /// .map(IProcess::new) + /// .collect::>(); + /// for p in current_procs { + /// match pt.add_process_record(p) { + /// Ok(result) => { println!("ProcessRecord stored successfully: {}", result); } + /// Err(msg) => { + /// panic!("Failed to track process !\nGot: {}", msg) + /// } + /// } /// } /// ``` pub fn add_process_record(&mut self, process: IProcess) -> Result { @@ -468,7 +324,7 @@ impl ProcessTracker { // check if the previous records in the vector are from the same process // (if the process with that pid is not a new one) and if so, drop it for a new one if !vector.is_empty() - && process_record.process.comm != vector.get(0).unwrap().process.comm + && process_record.process.comm != vector.first().unwrap().process.comm { *vector = vec![]; } @@ -483,6 +339,15 @@ impl ProcessTracker { Ok(String::from("Successfully added record to process.")) } + pub fn get_process_last_record(&self, pid: Pid) -> Option<&ProcessRecord> { + if let Some(records) = self.find_records(pid) { + if let Some(last) = records.first() { + return Some(last); + } + } + None + } + /// Removes as many ProcessRecords as needed from the vector (passed as a mutable ref in parameters) /// in order for the vector length to match self.max_records_per_process. fn clean_old_process_records(records: &mut Vec, max_records_per_process: u16) { @@ -502,7 +367,7 @@ impl ProcessTracker { /// Returns a Some(ref to vector of ProcessRecords) if the pid is found /// in self.procs. Returns None otherwise. - pub fn find_records(&self, pid: i32) -> Option<&Vec> { + pub fn find_records(&self, pid: Pid) -> Option<&Vec> { let mut refer = None; for v in &self.procs { if !v.is_empty() && v[0].process.pid == pid { @@ -515,62 +380,39 @@ impl ProcessTracker { refer } - /// Returns the result of the substraction of utime between last and - /// previous ProcessRecord for a given pid. - pub fn get_diff_utime(&self, pid: i32) -> Option { - let records = self.find_records(pid).unwrap(); - if records.len() > 1 { - if let Some(previous) = &records[0].process.stat { - if let Some(current) = &records[1].process.stat { - return Some(previous.utime - current.utime); - } - } - } - None - } - /// Returns the result of the substraction of stime between last and - /// previous ProcessRecord for a given pid. - pub fn get_diff_stime(&self, pid: i32) -> Option { - let records = self.find_records(pid).unwrap(); - if records.len() > 1 { - if let Some(previous) = &records[0].process.stat { - if let Some(current) = &records[1].process.stat { - return Some(previous.stime - current.stime); - } - } - } - None + pub fn get_cpu_frequency(&self) -> u64 { + self.sysinfo.global_cpu_info().frequency() } /// Returns all vectors of process records linked to a running, sleeping, waiting or zombie process. /// (Not terminated) pub fn get_alive_processes(&self) -> Vec<&Vec> { - debug!("In get alive processes."); + trace!("In get alive processes."); let mut res = vec![]; for p in self.procs.iter() { - #[cfg(target_os = "linux")] - if !p.is_empty() { - let status = p[0].process.status(); - if let Ok(status_val) = status { - if !&status_val.state.contains('T') { - // !&status_val.state.contains("Z") && - res.push(p); - } - } - } - #[cfg(target_os = "windows")] + //#[cfg(target_os = "linux")] + //if !p.is_empty() { + // let status = p[0].process.status(); + // if let Ok(status_val) = status { + // if !&status_val.state.contains('T') { + // // !&status_val.state.contains("Z") && + // res.push(p); + // } + // } + //} if !p.is_empty() { //TODO implement // clippy will ask you to remove mut from res, but you just need to implement to fix that - if let Some(_sysinfo_p) = self.sysinfo.process(p[0].process.pid as usize) { - //let status = sysinfo_p.status(); - //if status != ProcessStatus::Dead {//&& status != ProcessStatus::Stop { - res.push(p); - //} + if let Some(sysinfo_p) = self.sysinfo.process(p[0].process.pid) { + let status = sysinfo_p.status(); + if status != ProcessStatus::Dead { + //&& status != ProcessStatus::Stop { + res.push(p); + } } } } - debug!("End of get alive processes."); + trace!("End of get alive processes."); res } @@ -599,7 +441,7 @@ impl ProcessTracker { #[cfg(feature = "containers")] pub fn get_process_container_description( &self, - pid: i32, // the PID of the process to look for + pid: Pid, // the PID of the process to look for containers: &[Container], docker_version: String, pods: &[Pod], @@ -607,147 +449,163 @@ impl ProcessTracker { ) -> HashMap { let mut result = self.procs.iter().filter( // get all processes that have process records - |x| !x.is_empty() && x.get(0).unwrap().process.pid == pid, + |x| !x.is_empty() && x.first().unwrap().process.pid == pid, ); let process = result.next().unwrap(); let mut description = HashMap::new(); let regex_clean_container_id = Regex::new("[[:alnum:]]{12,}").unwrap(); - if let Some(p) = process.get(0) { + if let Some(_p) = process.first() { // if we have the cgroups data from the original process struct - if let Ok(cgroups) = p.process.original.cgroups() { - let mut found = false; - for cg in &cgroups { - if found { - break; - } - // docker - if self.regex_cgroup_docker.is_match(&cg.pathname) { - debug!("regex docker matched : {}", &cg.pathname); //coucou - description - .insert(String::from("container_scheduler"), String::from("docker")); - // extract container_id - //let container_id = cg.pathname.split('/').last().unwrap(); - if let Some(container_id_capture) = - regex_clean_container_id.captures(&cg.pathname) - { - let container_id = &container_id_capture[0]; - debug!("container_id = {}", container_id); - description - .insert(String::from("container_id"), String::from(container_id)); - if let Some(container) = - containers.iter().find(|x| x.Id == container_id) + if let Ok(procfs_process) = + procfs::process::Process::new(pid.to_string().parse::().unwrap()) + { + if let Ok(cgroups) = procfs_process.cgroups() { + let mut found = false; + for cg in &cgroups { + if found { + break; + } + // docker + if self.regex_cgroup_docker.is_match(&cg.pathname) { + debug!("regex docker matched : {}", &cg.pathname); //coucou + description.insert( + String::from("container_scheduler"), + String::from("docker"), + ); + // extract container_id + //let container_id = cg.pathname.split('/').last().unwrap(); + if let Some(container_id_capture) = + regex_clean_container_id.captures(&cg.pathname) { - debug!("found container with id: {}", &container_id); - let mut names = String::from(""); - for n in &container.Names { - debug!("adding container name: {}", &n.trim().replace('/', "")); - names.push_str(&n.trim().replace('/', "")); - } - description.insert(String::from("container_names"), names); + let container_id = &container_id_capture[0]; + debug!("container_id = {}", container_id); description.insert( - String::from("container_docker_version"), - docker_version.clone(), + String::from("container_id"), + String::from(container_id), ); - if let Some(labels) = &container.Labels { - for (k, v) in labels { - let escape_list = ["-", ".", ":", " "]; - let mut key = k.clone(); - for e in escape_list.iter() { - key = key.replace(e, "_"); - } - description.insert( - format!("container_label_{key}"), - v.to_string(), + if let Some(container) = + containers.iter().find(|x| x.Id == container_id) + { + debug!("found container with id: {}", &container_id); + let mut names = String::from(""); + for n in &container.Names { + debug!( + "adding container name: {}", + &n.trim().replace('/', "") ); + names.push_str(&n.trim().replace('/', "")); + } + description.insert(String::from("container_names"), names); + description.insert( + String::from("container_docker_version"), + docker_version.clone(), + ); + if let Some(labels) = &container.Labels { + for (k, v) in labels { + let escape_list = ["-", ".", ":", " "]; + let mut key = k.clone(); + for e in escape_list.iter() { + key = key.replace(e, "_"); + } + description.insert( + format!("container_label_{key}"), + v.to_string(), + ); + } } } + found = true; } - found = true; - } - } else { - // containerd - if self.regex_cgroup_containerd.is_match(&cg.pathname) { - debug!("regex containerd matched : {}", &cg.pathname); - description.insert( - String::from("container_runtime"), - String::from("containerd"), - ); - } else if self.regex_cgroup_kubernetes.is_match(&cg.pathname) { - debug!("regex kubernetes matched : {}", &cg.pathname); - // kubernetes not using containerd but we can get the container id } else { - // cgroup not related to a container technology - continue; - } + // containerd + if self.regex_cgroup_containerd.is_match(&cg.pathname) { + debug!("regex containerd matched : {}", &cg.pathname); + description.insert( + String::from("container_runtime"), + String::from("containerd"), + ); + } else if self.regex_cgroup_kubernetes.is_match(&cg.pathname) { + debug!("regex kubernetes matched : {}", &cg.pathname); + // kubernetes not using containerd but we can get the container id + } else { + // cgroup not related to a container technology + continue; + } - let container_id = - match self.extract_pod_id_from_cgroup_path(cg.pathname.clone()) { - Ok(id) => id, - Err(err) => { - info!("Couldn't get container id : {}", err); - "ERROR Couldn't get container id".to_string() - } - }; - description.insert(String::from("container_id"), container_id.clone()); - // find pod in pods that has pod_status > container_status.container - if let Some(pod) = pods.iter().find(|x| match &x.status { - Some(status) => { - if let Some(container_statuses) = &status.container_statuses { - container_statuses.iter().any(|y| match &y.container_id { - Some(id) => { - if let Some(final_id) = id.strip_prefix("docker://") { - final_id == container_id - } else if let Some(final_id) = - id.strip_prefix("containerd://") - { - final_id == container_id - } else { - false + let container_id = + match self.extract_pod_id_from_cgroup_path(cg.pathname.clone()) { + Ok(id) => id, + Err(err) => { + info!("Couldn't get container id : {}", err); + "ERROR Couldn't get container id".to_string() + } + }; + description.insert(String::from("container_id"), container_id.clone()); + // find pod in pods that has pod_status > container_status.container + if let Some(pod) = pods.iter().find(|x| match &x.status { + Some(status) => { + if let Some(container_statuses) = &status.container_statuses { + container_statuses.iter().any(|y| match &y.container_id { + Some(id) => { + if let Some(final_id) = id.strip_prefix("docker://") + { + final_id == container_id + } else if let Some(final_id) = + id.strip_prefix("containerd://") + { + final_id == container_id + } else { + false + } } - } - None => false, - }) - } else { - false + None => false, + }) + } else { + false + } } - } - None => false, - }) { - description.insert( - String::from("container_scheduler"), - String::from("kubernetes"), - ); - if let Some(pod_name) = &pod.metadata.name { - description - .insert(String::from("kubernetes_pod_name"), pod_name.clone()); - } - if let Some(pod_namespace) = &pod.metadata.namespace { + None => false, + }) { description.insert( - String::from("kubernetes_pod_namespace"), - pod_namespace.clone(), + String::from("container_scheduler"), + String::from("kubernetes"), ); - } - if let Some(pod_spec) = &pod.spec { - if let Some(node_name) = &pod_spec.node_name { + if let Some(pod_name) = &pod.metadata.name { + description.insert( + String::from("kubernetes_pod_name"), + pod_name.clone(), + ); + } + if let Some(pod_namespace) = &pod.metadata.namespace { description.insert( - String::from("kubernetes_node_name"), - node_name.clone(), + String::from("kubernetes_pod_namespace"), + pod_namespace.clone(), ); } + if let Some(pod_spec) = &pod.spec { + if let Some(node_name) = &pod_spec.node_name { + description.insert( + String::from("kubernetes_node_name"), + node_name.clone(), + ); + } + } } - } - found = true; - } //else { - // debug!("Cgroup not identified as related to a container technology : {}", &cg.pathname); - //} + found = true; + } //else { + // debug!("Cgroup not identified as related to a container technology : {}", &cg.pathname); + //} + } } + } else { + debug!("Could'nt find {} in procfs.", pid.to_string()); } } description } /// Returns a vector containing pids of all running, sleeping or waiting current processes. - pub fn get_alive_pids(&self) -> Vec { + pub fn get_alive_pids(&self) -> Vec { self.get_alive_processes() .iter() .filter(|x| !x.is_empty()) @@ -756,7 +614,7 @@ impl ProcessTracker { } /// Returns a vector containing pids of all processes being tracked. - pub fn get_all_pids(&self) -> Vec { + pub fn get_all_pids(&self) -> Vec { self.procs .iter() .filter(|x| !x.is_empty()) @@ -765,31 +623,29 @@ impl ProcessTracker { } /// Returns the process name associated to a PID - pub fn get_process_name(&self, pid: i32) -> String { + pub fn get_process_name(&self, pid: Pid) -> String { let mut result = self .procs .iter() - .filter(|x| !x.is_empty() && x.get(0).unwrap().process.pid == pid); + .filter(|x| !x.is_empty() && x.first().unwrap().process.pid == pid); let process = result.next().unwrap(); if result.next().is_some() { panic!("Found two vectors of processes with the same id, maintainers should fix this."); } + debug!("End of get process name."); - process.get(0).unwrap().process.comm.clone() + process.first().unwrap().process.comm.clone() } /// Returns the cmdline string associated to a PID - pub fn get_process_cmdline(&self, pid: i32) -> Option { + pub fn get_process_cmdline(&self, pid: Pid) -> Option { let mut result = self .procs .iter() - .filter(|x| !x.is_empty() && x.get(0).unwrap().process.pid == pid); + .filter(|x| !x.is_empty() && x.first().unwrap().process.pid == pid); let process = result.next().unwrap(); - if let Some(p) = process.get(0) { - #[cfg(target_os = "windows")] + if let Some(p) = process.first() { let cmdline_request = p.process.cmdline(self); - #[cfg(target_os = "linux")] - let cmdline_request = p.process.cmdline(); if let Ok(mut cmdline_vec) = cmdline_request { let mut cmdline = String::from(""); while !cmdline_vec.is_empty() { @@ -797,35 +653,16 @@ impl ProcessTracker { cmdline.push_str(&cmdline_vec.remove(0)); } } - debug!("End of get process cmdline."); return Some(cmdline); } } - debug!("End of get process cmdline."); None } - #[cfg(target_os = "linux")] - /// Returns the CPU time consumed between two measure iteration - fn get_cpu_time_consumed(&self, p: &[ProcessRecord]) -> u64 { - let last_time = p.first().unwrap().total_time_jiffies(); - let previous_time = p.get(1).unwrap().total_time_jiffies(); - let mut diff = 0; - if previous_time <= last_time { - diff = last_time - previous_time; - } - diff - } - - #[cfg(target_os = "windows")] - pub fn get_cpu_usage_percentage(&self, pid: usize, nb_cores: usize) -> f32 { - let mut cpu_current_usage = 0.0; - for c in self.sysinfo.processors() { - cpu_current_usage += c.cpu_usage(); - } + pub fn get_cpu_usage_percentage(&self, pid: Pid, nb_cores: usize) -> f32 { + let cpu_current_usage = self.sysinfo.global_cpu_info().cpu_usage(); if let Some(p) = self.sysinfo.process(pid) { - (p.cpu_usage() + (100.0 - cpu_current_usage / nb_cores as f32) * p.cpu_usage() / 100.0) - / nb_cores as f32 + (cpu_current_usage * p.cpu_usage() / 100.0) / nb_cores as f32 } else { 0.0 } @@ -836,51 +673,29 @@ impl ProcessTracker { let mut consumers: Vec<(IProcess, OrderedFloat)> = vec![]; for p in &self.procs { if p.len() > 1 { - #[cfg(target_os = "linux")] + let diff = self + .get_cpu_usage_percentage(p.first().unwrap().process.pid as _, self.nb_cores); + if consumers + .iter() + .filter(|x| { + if let Some(p) = self.sysinfo.process(x.0.pid as _) { + return p.cpu_usage() > diff; + } + false + }) + .count() + < top as usize { - let diff = self.get_cpu_time_consumed(p); - if consumers - .iter() - .filter(|x| ProcessRecord::new(x.0.to_owned()).total_time_jiffies() > diff) - .count() - < top as usize - { - consumers - .push((p.last().unwrap().process.clone(), OrderedFloat(diff as f64))); + let pid = p.first().unwrap().process.pid; + if let Some(sysinfo_process) = self.sysinfo.process(pid as _) { + let new_consumer = IProcess::new(sysinfo_process); + consumers.push((new_consumer, OrderedFloat(diff as f64))); consumers.sort_by(|x, y| y.1.cmp(&x.1)); if consumers.len() > top as usize { consumers.pop(); } - } - } - #[cfg(target_os = "windows")] - { - let diff = self.get_cpu_usage_percentage( - p.first().unwrap().process.pid as _, - self.nb_cores, - ); - if consumers - .iter() - .filter(|x| { - if let Some(p) = self.sysinfo.process(x.0.pid as _) { - return p.cpu_usage() > diff; - } - false - }) - .count() - < top as usize - { - let pid = p.first().unwrap().process.pid; - if let Some(sysinfo_process) = self.sysinfo.process(pid as _) { - let new_consumer = IProcess::from_windows_process(sysinfo_process); - consumers.push((new_consumer, OrderedFloat(diff as f64))); - consumers.sort_by(|x, y| y.1.cmp(&x.1)); - if consumers.len() > top as usize { - consumers.pop(); - } - } else { - warn!("Couldn't get process info for {}", pid); - } + } else { + debug!("Couldn't get process info for {}", pid); } } } @@ -897,28 +712,17 @@ impl ProcessTracker { let mut consumers: Vec<(IProcess, OrderedFloat)> = vec![]; for p in &self.procs { if p.len() > 1 { - #[cfg(target_os = "linux")] - { - let diff = self.get_cpu_time_consumed(p); - let process_exe = p.last().unwrap().process.exe().unwrap_or_default(); - if regex_filter.is_match(process_exe.to_str().unwrap_or_default()) { - consumers - .push((p.last().unwrap().process.clone(), OrderedFloat(diff as f64))); - consumers.sort_by(|x, y| y.1.cmp(&x.1)); - } - } - #[cfg(target_os = "windows")] - { - let diff = self.get_cpu_usage_percentage( - p.first().unwrap().process.pid as _, - self.nb_cores, - ); - let process_exe = p.last().unwrap().process.exe(self).unwrap_or_default(); - if regex_filter.is_match(process_exe.to_str().unwrap_or_default()) { - consumers - .push((p.last().unwrap().process.clone(), OrderedFloat(diff as f64))); - consumers.sort_by(|x, y| y.1.cmp(&x.1)); - } + let diff = self + .get_cpu_usage_percentage(p.first().unwrap().process.pid as _, self.nb_cores); + let p_record = p.last().unwrap(); + let process_exe = p_record.process.exe(self).unwrap_or_default(); + let process_cmdline = p_record.process.cmdline(self).unwrap_or_default(); + if regex_filter.is_match(process_exe.to_str().unwrap_or_default()) { + consumers.push((p_record.process.clone(), OrderedFloat(diff as f64))); + consumers.sort_by(|x, y| y.1.cmp(&x.1)); + } else if regex_filter.is_match(&process_cmdline.concat()) { + consumers.push((p_record.process.clone(), OrderedFloat(diff as f64))); + consumers.sort_by(|x, y| y.1.cmp(&x.1)); } } } @@ -934,44 +738,28 @@ impl ProcessTracker { /// (if the process is not running anymore) pub fn clean_terminated_process_records_vectors(&mut self) { //TODO get stats from processes to know what is hapening ! - let mut d_unint_sleep = 0; - let mut r_running = 0; - let mut s_int_sleep = 0; - let mut t_stopped = 0; - let mut z_defunct_zombie = 0; - let mut w_no_resident_high_prio = 0; - let mut n_low_prio = 0; - let mut l_pages_locked = 0; - let mut i_idle = 0; - let mut unknown = 0; for v in &mut self.procs { if !v.is_empty() { if let Some(first) = v.first() { - if let Ok(status) = first.process.status() { - if status.state.contains('T') { - while !v.is_empty() { - v.pop(); + if let Some(p) = self.sysinfo.process(first.process.pid) { + match p.status() { + ProcessStatus::Idle => {} + ProcessStatus::Dead => {} + ProcessStatus::Stop => { + while !v.is_empty() { + v.pop(); + } } - t_stopped += 1; - } else if status.state.contains('D') { - d_unint_sleep += 1; - } else if status.state.contains('R') { - r_running += 1; - } else if status.state.contains('S') { - s_int_sleep += 1; - } else if status.state.contains('Z') { - z_defunct_zombie += 1; - } else if status.state.contains('W') { - w_no_resident_high_prio += 1; - } else if status.state.contains('N') { - n_low_prio += 1; - } else if status.state.contains('L') { - l_pages_locked += 1; - } else if status.state.contains('I') { - i_idle += 1; - } else { - unknown += 1; - debug!("unkown state: {} name: {}", status.state, status.name); + ProcessStatus::Run => {} + ProcessStatus::LockBlocked => {} + ProcessStatus::Waking => {} + ProcessStatus::Wakekill => {} + ProcessStatus::Tracing => {} + ProcessStatus::Zombie => {} + ProcessStatus::Sleep => {} + ProcessStatus::Parked => {} + ProcessStatus::UninterruptibleDiskSleep => {} + ProcessStatus::Unknown(_code) => {} } } else { while !v.is_empty() { @@ -981,19 +769,6 @@ impl ProcessTracker { } } } - debug!( - "d:{} r:{} s:{} t:{} z:{} w:{} n:{} l:{} i:{} u:{}", - d_unint_sleep, - r_running, - s_int_sleep, - t_stopped, - z_defunct_zombie, - w_no_resident_high_prio, - n_low_prio, - l_pages_locked, - i_idle, - unknown - ); self.drop_empty_process_records_vectors(); } @@ -1029,35 +804,6 @@ impl ProcessRecord { timestamp: current_system_time_since_epoch(), } } - - // Returns the total CPU time consumed by this process since its creation - pub fn total_time_jiffies(&self) -> u64 { - #[cfg(target_os = "linux")] - if let Some(stat) = &self.process.stat { - trace!( - "ProcessRecord: stime {} utime {}", //cutime {} cstime {} guest_time {} cguest_time {} delayacct_blkio_ticks {} itrealvalue {}", - stat.stime, - stat.utime //, cutime, cstime, guest_time, cguest_time, delayacct_blkio_ticks, itrealvalue - ); - return stat.stime + stat.utime; - } else { - warn!("No IStat !"); - } - - //#[cfg(target_os="windows")] - //let usage = &self.sysinfo. - //let cutime = self.process.stat.cutime as u64; - //let cstime = self.process.stat.cstime as u64; - //let guest_time = self.process.stat.guest_time.unwrap_or_default(); - //let cguest_time = self.process.stat.cguest_time.unwrap_or_default() as u64; - //let delayacct_blkio_ticks = self.process.stat.delayacct_blkio_ticks.unwrap_or_default(); - //let itrealvalue = self.process.stat.itrealvalue as u64; - - // not including cstime and cutime in total as they are reported only when child dies - // child metrics as already reported as the child processes are in the global process - // list, found as /proc/PID/stat - 0 //+ guest_time + cguest_time + delayacct_blkio_ticks + itrealvalue - } } /// Returns a Duration instance with the current timestamp @@ -1067,46 +813,61 @@ pub fn current_system_time_since_epoch() -> Duration { .unwrap() } -#[cfg(all(test, target_os = "linux"))] mod tests { - use super::*; + + #[test] + fn process_cmdline() { + use super::*; + use crate::sensors::Topology; + // find the cmdline of current proc thanks to sysinfo + // do the same with processtracker + // assert + let mut system = System::new(); + system.refresh_all(); + let self_pid_by_sysinfo = get_current_pid(); + let self_process_by_sysinfo = system.process(self_pid_by_sysinfo.unwrap()).unwrap(); + + let mut topo = Topology::new(HashMap::new()); + topo.refresh(); + let self_process_by_scaph = IProcess::myself(&topo.proc_tracker).unwrap(); + + assert_eq!( + self_process_by_sysinfo.cmd().concat(), + topo.proc_tracker + .get_process_cmdline(self_process_by_scaph.pid) + .unwrap() + ); + } + + #[cfg(all(test, target_os = "linux"))] #[test] fn process_records_added() { - let proc = Process::myself().unwrap(); + use super::*; + use crate::sensors::Topology; + let mut topo = Topology::new(HashMap::new()); + topo.refresh(); + let proc = IProcess::myself(&topo.proc_tracker).unwrap(); let mut tracker = ProcessTracker::new(3); for _ in 0..3 { - assert_eq!( - tracker - .add_process_record(IProcess::from_linux_process(&proc)) - .is_ok(), - true - ); + assert_eq!(tracker.add_process_record(proc.clone()).is_ok(), true); } assert_eq!(tracker.procs.len(), 1); assert_eq!(tracker.procs[0].len(), 3); } + #[cfg(all(test, target_os = "linux"))] #[test] fn process_records_cleaned() { - let proc = Process::myself().unwrap(); + use super::*; let mut tracker = ProcessTracker::new(3); + let proc = IProcess::myself(&tracker).unwrap(); for _ in 0..5 { - assert_eq!( - tracker - .add_process_record(IProcess::from_linux_process(&proc)) - .is_ok(), - true - ); + assert_eq!(tracker.add_process_record(proc.clone()).is_ok(), true); } assert_eq!(tracker.procs.len(), 1); assert_eq!(tracker.procs[0].len(), 3); for _ in 0..15 { - assert_eq!( - tracker - .add_process_record(IProcess::from_linux_process(&proc)) - .is_ok(), - true - ); + assert_eq!(tracker.add_process_record(proc.clone()).is_ok(), true); } assert_eq!(tracker.procs.len(), 1); assert_eq!(tracker.procs[0].len(), 3); diff --git a/tests/integration.rs b/tests/integration.rs index a3dd4c2e..81a93c6e 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1,12 +1,13 @@ -use scaphandre::exporters::qemu::QemuExporter; -use scaphandre::sensors::powercap_rapl::PowercapRAPLSensor; -use std::env::current_dir; -use std::fs::{create_dir, read_dir}; - +#[cfg(all(feature = "qemu", target_os = "linux"))] #[test] fn exporter_qemu() { + use scaphandre::exporters::qemu::QemuExporter; + use scaphandre::sensors::powercap_rapl::PowercapRAPLSensor; + use std::env::current_dir; + use std::fs::{create_dir, read_dir}; + let sensor = PowercapRAPLSensor::new(1, 1, false); - let mut exporter = QemuExporter::new(Box::new(sensor)); + let mut exporter = QemuExporter::new(&sensor); // Create integration_tests directory if it does not exist let curdir = current_dir().unwrap(); let path = curdir.join("integration_tests"); @@ -15,7 +16,7 @@ fn exporter_qemu() { } // Convert to std::string::String let path = path.into_os_string().to_str().unwrap().to_string(); - exporter.iteration(path.clone()); + exporter.iterate(path.clone()); let content = read_dir(path); assert_eq!(content.is_ok(), true); }