diff --git a/.github/actions/configure-git-auth/README.md b/.github/actions/configure-git-auth/README.md new file mode 100644 index 0000000..4b6faeb --- /dev/null +++ b/.github/actions/configure-git-auth/README.md @@ -0,0 +1,80 @@ +# Configure Git Auth for Private Packages + +This composite action configures git to use token authentication for private GitHub packages. + +## Usage + +Add this step before installing dependencies that include private GitHub packages: + +```yaml +- name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} +``` + +The `GH_PAT` secret should be a Personal Access Token with `repo` scope. + +## What It Does + +This action runs: + +```bash +git config --global url."https://@github.com/".insteadOf "https://github.com/" +``` + +This tells git to automatically inject the token into all HTTPS GitHub URLs, enabling access to private repositories. + +## When to Use + +Use this action when your project has dependencies defined in `pyproject.toml` like: + +```toml +[tool.uv.sources] +private-package = { git = "https://github.com/your-org/private-package.git", rev = "v1.0.0" } +``` + +## Token Requirements + +By default, this action will use the workflow’s built-in `GITHUB_TOKEN` (`github.token`) if no `token` input is provided or if the provided value is empty (it uses `inputs.token || github.token` internally). + +The `GITHUB_TOKEN` is usually sufficient when: + +- installing dependencies hosted in the **same repository** as the workflow, or +- accessing **public** repositories. + +The default `GITHUB_TOKEN` typically does **not** have permission to read other private repositories, even within the same organization. For that scenario, you should create a Personal Access Token (PAT) with `repo` scope and store it as `secrets.GH_PAT`, then pass it to the action via the `token` input. + +If you configure the step as in the example (`token: ${{ secrets.GH_PAT }}`) and `secrets.GH_PAT` is not defined, GitHub Actions passes an empty string to the action. The composite action then falls back to `github.token`, so the configuration step itself still succeeds. However, any subsequent step that tries to access private repositories that are not covered by the permissions of `GITHUB_TOKEN` will fail with an authentication error. +## Example Workflow + +```yaml +name: CI + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Install uv + uses: astral-sh/setup-uv@v7 + + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + + - name: Install dependencies + run: uv sync --frozen + + - name: Run tests + run: uv run pytest +``` + +## See Also + +- [PRIVATE_PACKAGES.md](../../../.rhiza/docs/PRIVATE_PACKAGES.md) - Complete guide to using private packages +- [TOKEN_SETUP.md](../../../.rhiza/docs/TOKEN_SETUP.md) - Setting up Personal Access Tokens diff --git a/.github/actions/configure-git-auth/action.yml b/.github/actions/configure-git-auth/action.yml new file mode 100644 index 0000000..d4d898f --- /dev/null +++ b/.github/actions/configure-git-auth/action.yml @@ -0,0 +1,21 @@ +name: 'Configure Git Auth for Private Packages' +description: 'Configure git to use token authentication for private GitHub packages' + +inputs: + token: + description: 'GitHub token to use for authentication' + required: false + +runs: + using: composite + steps: + - name: Configure git authentication + shell: bash + env: + GH_TOKEN: ${{ inputs.token || github.token }} + run: | + # Configure git to use token authentication for GitHub URLs + # This allows uv/pip to install private packages from GitHub + git config --global url."https://${GH_TOKEN}@github.com/".insteadOf "https://github.com/" + + echo "✓ Git configured to use token authentication for GitHub" diff --git a/.github/workflows/rhiza_benchmarks.yml b/.github/workflows/rhiza_benchmarks.yml index ea92073..a9e7d6a 100644 --- a/.github/workflows/rhiza_benchmarks.yml +++ b/.github/workflows/rhiza_benchmarks.yml @@ -37,10 +37,14 @@ jobs: lfs: true - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" - python-version: "3.12" + version: "0.10.0" + + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} - name: Run benchmarks env: diff --git a/.github/workflows/rhiza_book.yml b/.github/workflows/rhiza_book.yml index 4e41e48..fbf7b8a 100644 --- a/.github/workflows/rhiza_book.yml +++ b/.github/workflows/rhiza_book.yml @@ -41,9 +41,14 @@ jobs: lfs: true - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" + version: "0.10.0" + + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} - name: "Sync the virtual environment for ${{ github.repository }}" shell: bash diff --git a/.github/workflows/rhiza_ci.yml b/.github/workflows/rhiza_ci.yml index dd019eb..8abf37f 100644 --- a/.github/workflows/rhiza_ci.yml +++ b/.github/workflows/rhiza_ci.yml @@ -29,10 +29,15 @@ jobs: lfs: true - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" + version: "0.10.0" + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + - id: versions env: UV_EXTRA_INDEX_URL: ${{ secrets.UV_EXTRA_INDEX_URL }} @@ -60,11 +65,16 @@ jobs: lfs: true - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" + version: "0.10.0" python-version: ${{ matrix.python-version }} + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + - name: Run tests env: UV_EXTRA_INDEX_URL: ${{ secrets.UV_EXTRA_INDEX_URL }} @@ -79,9 +89,14 @@ jobs: uses: actions/checkout@v6.0.2 - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 + with: + version: "0.10.0" + + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth with: - version: "0.9.28" + token: ${{ secrets.GH_PAT }} - name: Check docs coverage env: diff --git a/.github/workflows/rhiza_codeql.yml b/.github/workflows/rhiza_codeql.yml index f073171..86cbe12 100644 --- a/.github/workflows/rhiza_codeql.yml +++ b/.github/workflows/rhiza_codeql.yml @@ -83,6 +83,10 @@ jobs: - name: Checkout repository uses: actions/checkout@v6.0.2 + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} # Add any setup steps before running the `github/codeql-action/init` action. # This includes steps like installing compilers or runtimes (`actions/setup-node` # or others). This is typically only required for manual builds. @@ -91,7 +95,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v4.32.1 + uses: github/codeql-action/init@v4.32.2 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} @@ -120,6 +124,6 @@ jobs: exit 1 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v4.32.1 + uses: github/codeql-action/analyze@v4.32.2 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/rhiza_deptry.yml b/.github/workflows/rhiza_deptry.yml index f4e6f0c..30220da 100644 --- a/.github/workflows/rhiza_deptry.yml +++ b/.github/workflows/rhiza_deptry.yml @@ -27,11 +27,16 @@ jobs: name: Check dependencies with deptry runs-on: ubuntu-latest container: - image: ghcr.io/astral-sh/uv:0.9.28-bookworm + image: ghcr.io/astral-sh/uv:0.9.30-bookworm steps: - uses: actions/checkout@v6.0.2 + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + - name: Run deptry run: make deptry # NOTE: make deptry is good style because it encapsulates the folders to check diff --git a/.github/workflows/rhiza_marimo.yml b/.github/workflows/rhiza_marimo.yml index 0b09c23..94657b5 100644 --- a/.github/workflows/rhiza_marimo.yml +++ b/.github/workflows/rhiza_marimo.yml @@ -81,9 +81,14 @@ jobs: # Install uv/uvx - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" + version: "0.10.0" + + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} # Execute the notebook with the appropriate runner based on its content - name: Run notebook diff --git a/.github/workflows/rhiza_mypy.yml b/.github/workflows/rhiza_mypy.yml index cf8ea24..bf56810 100644 --- a/.github/workflows/rhiza_mypy.yml +++ b/.github/workflows/rhiza_mypy.yml @@ -24,11 +24,16 @@ jobs: name: Static type checking with mypy runs-on: ubuntu-latest container: - image: ghcr.io/astral-sh/uv:0.9.28-bookworm + image: ghcr.io/astral-sh/uv:0.9.30-bookworm steps: - uses: actions/checkout@v6 + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + # to brutal for now # - name: Run mypy # run: make -f .rhiza/rhiza.mk mypy diff --git a/.github/workflows/rhiza_pre-commit.yml b/.github/workflows/rhiza_pre-commit.yml index 2ee1877..9e81937 100644 --- a/.github/workflows/rhiza_pre-commit.yml +++ b/.github/workflows/rhiza_pre-commit.yml @@ -31,6 +31,11 @@ jobs: steps: - uses: actions/checkout@v6.0.2 + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + # Run pre-commit - name: Run pre-commit run: | diff --git a/.github/workflows/rhiza_release.yml b/.github/workflows/rhiza_release.yml index aa10bca..b73d579 100644 --- a/.github/workflows/rhiza_release.yml +++ b/.github/workflows/rhiza_release.yml @@ -111,9 +111,14 @@ jobs: fetch-depth: 0 - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" + version: "0.10.0" + + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} - name: Verify version matches tag if: hashFiles('pyproject.toml') != '' @@ -320,9 +325,9 @@ jobs: fetch-depth: 0 - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 with: - version: "0.9.28" + version: "0.10.0" - name: "Sync the virtual environment for ${{ github.repository }}" shell: bash diff --git a/.github/workflows/rhiza_security.yml b/.github/workflows/rhiza_security.yml index 26b56e8..7ded477 100644 --- a/.github/workflows/rhiza_security.yml +++ b/.github/workflows/rhiza_security.yml @@ -27,11 +27,16 @@ jobs: name: Security scanning runs-on: ubuntu-latest container: - image: ghcr.io/astral-sh/uv:0.9.28-bookworm + image: ghcr.io/astral-sh/uv:0.9.30-bookworm steps: - uses: actions/checkout@v6.0.2 + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + - name: Run security scans env: UV_EXTRA_INDEX_URL: ${{ secrets.UV_EXTRA_INDEX_URL }} diff --git a/.github/workflows/rhiza_sync.yml b/.github/workflows/rhiza_sync.yml index cf1f664..37f3712 100644 --- a/.github/workflows/rhiza_sync.yml +++ b/.github/workflows/rhiza_sync.yml @@ -50,7 +50,7 @@ jobs: fi - name: Install uv - uses: astral-sh/setup-uv@v7.2.1 + uses: astral-sh/setup-uv@v7.3.0 - name: Get Rhiza version id: rhiza-version diff --git a/.github/workflows/rhiza_validate.yml b/.github/workflows/rhiza_validate.yml index c494357..42f0a54 100644 --- a/.github/workflows/rhiza_validate.yml +++ b/.github/workflows/rhiza_validate.yml @@ -12,16 +12,26 @@ on: jobs: validation: runs-on: ubuntu-latest - # don't run this in rhiza itself. Rhiza has no template.yml file. - if: ${{ github.repository != 'jebel-quant/rhiza' }} container: - image: ghcr.io/astral-sh/uv:0.9.28-bookworm + image: ghcr.io/astral-sh/uv:0.9.30-bookworm steps: - name: Checkout repository uses: actions/checkout@v6.0.2 + - name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth + with: + token: ${{ secrets.GH_PAT }} + - name: Validate Rhiza config + # don't run this in rhiza itself. Rhiza has no template.yml file. + if: ${{ github.repository != 'jebel-quant/rhiza' }} shell: bash run: | uvx "rhiza>=0.8.0" validate . + + - name: Run Rhiza Tests + shell: bash + run: | + make rhiza-test diff --git a/.gitignore b/.gitignore index c0c30e2..b2a1fd6 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ _tests _book _pdoc _marimushka +_mkdocs _benchmarks _jupyter diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d9a8fd..5eaecf2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,7 +9,7 @@ repos: - id: check-yaml - repo: https://github.com/astral-sh/ruff-pre-commit - rev: 'v0.14.14' + rev: 'v0.15.0' hooks: - id: ruff args: [ --fix, --exit-non-zero-on-fix, --unsafe-fixes ] @@ -46,10 +46,15 @@ repos: rev: 1.9.3 hooks: - id: bandit - args: ["--skip", "B101", "--exclude", ".venv,tests,.git,.pytest_cache", "-c", "pyproject.toml"] + args: ["--skip", "B101", "--exclude", ".venv,tests,.rhiza/tests,.git,.pytest_cache", "-c", "pyproject.toml"] + + - repo: https://github.com/astral-sh/uv-pre-commit + rev: 0.10.0 + hooks: + - id: uv-lock - repo: https://github.com/Jebel-Quant/rhiza-hooks - rev: v0.1.3 # Use the latest release + rev: v0.1.6 # Use the latest release hooks: # Migrated from rhiza - id: check-rhiza-workflow-names @@ -57,4 +62,5 @@ repos: # Additional utility hooks - id: check-rhiza-config - id: check-makefile-targets - - id: check-python-version-consistency \ No newline at end of file + - id: check-python-version-consistency + - id: check-template-bundles diff --git a/.rhiza/.cfg.toml b/.rhiza/.cfg.toml index 65660d1..7c6e393 100644 --- a/.rhiza/.cfg.toml +++ b/.rhiza/.cfg.toml @@ -14,6 +14,7 @@ allow_dirty = false commit = true message = "Chore: bump version {current_version} → {new_version}" commit_args = "" +pre_commit_hooks = ["uv sync", "git add uv.lock"] # Ensure uv.lock is updated [tool.bumpversion.parts.release] optional_value = "prod" @@ -30,7 +31,7 @@ filename = "pyproject.toml" search = 'version = "{current_version}"' replace = 'version = "{new_version}"' -[[tool.bumpversion.files]] -filename = "uv.lock" -search = 'version = "{current_version}"' -replace = 'version = "{new_version}"' \ No newline at end of file +# [[tool.bumpversion.files]] +# filename = ".rhiza/template-bundles.yml" +# search = 'version: "{current_version}"' +# replace = 'version: "{new_version}"' diff --git a/.rhiza/.env b/.rhiza/.env index 1b97df1..2c7edd5 100644 --- a/.rhiza/.env +++ b/.rhiza/.env @@ -6,4 +6,4 @@ SCRIPTS_FOLDER=.rhiza/scripts BOOK_TITLE=Project Documentation BOOK_SUBTITLE=Generated by minibook BOOK_TEMPLATE=.rhiza/templates/minibook/custom.html.jinja2 -PDOC_TEMPLATE_DIR=.rhiza/templates/pdocs +# PDOC_TEMPLATE_DIR is now defined in .rhiza/make.d/08-docs.mk with a default value diff --git a/.rhiza/.rhiza-version b/.rhiza/.rhiza-version index 899f24f..42624f3 100644 --- a/.rhiza/.rhiza-version +++ b/.rhiza/.rhiza-version @@ -1 +1 @@ -0.9.0 \ No newline at end of file +0.10.2 \ No newline at end of file diff --git a/.rhiza/docs/CONFIG.md b/.rhiza/docs/CONFIG.md index c3943b4..6e65a9e 100644 --- a/.rhiza/docs/CONFIG.md +++ b/.rhiza/docs/CONFIG.md @@ -5,6 +5,7 @@ This directory contains platform-agnostic scripts and utilities for the reposito ## Important Documentation - **[TOKEN_SETUP.md](TOKEN_SETUP.md)** - Instructions for setting up the `PAT_TOKEN` secret required for the SYNC workflow +- **[PRIVATE_PACKAGES.md](PRIVATE_PACKAGES.md)** - Guide for using private GitHub packages as dependencies ## Structure diff --git a/.rhiza/docs/PRIVATE_PACKAGES.md b/.rhiza/docs/PRIVATE_PACKAGES.md new file mode 100644 index 0000000..f7a98da --- /dev/null +++ b/.rhiza/docs/PRIVATE_PACKAGES.md @@ -0,0 +1,233 @@ +# Using Private GitHub Packages + +This document explains how to configure your project to use private GitHub packages from the same organization as dependencies. + +## Quick Start + +If you're using Rhiza's template workflows, git authentication for private packages is **already configured**! All Rhiza workflows automatically include the necessary git configuration to access private repositories in the same organization. + +Simply add your private package to `pyproject.toml`: + +```toml +[tool.uv.sources] +my-package = { git = "https://github.com/jebel-quant/my-package.git", rev = "v1.0.0" } +``` + +The workflows will handle authentication automatically using `GITHUB_TOKEN`. + +## Detailed Guide + +### Problem + +When your project depends on private GitHub repositories, you need to authenticate to access them. SSH keys work locally but are complex to set up in CI/CD environments. HTTPS with tokens is simpler and more secure for automated workflows. + +## Solution + +Use HTTPS URLs with token authentication instead of SSH for git dependencies. + +### 1. Configure Dependencies in pyproject.toml + +Instead of using SSH URLs like `git@github.com:org/repo.git`, use HTTPS URLs: + +```toml +[tool.uv.sources] +my-package = { git = "https://github.com/jebel-quant/my-package.git", rev = "v1.0.0" } +another-package = { git = "https://github.com/jebel-quant/another-package.git", tag = "v2.0.0" } +``` + +**Key points:** +- Use `https://github.com/` instead of `git@github.com:` +- Specify version using `rev`, `tag`, or `branch` parameter +- No token is included in the URL itself (git config handles authentication) + +### 2. Git Authentication in CI (Already Configured!) + +**If you're using Rhiza's template workflows, this is already set up for you.** All Rhiza workflows (CI, book, release, etc.) automatically include git authentication steps. + +You can verify this by checking any Rhiza workflow file (e.g., `.github/workflows/rhiza_ci.yml`): + +```yaml +- name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth +``` + +Or for container-based workflows: + +```yaml +- name: Configure git auth for private packages + run: | + git config --global url."https://${{ github.token }}@github.com/".insteadOf "https://github.com/" +``` + +**For custom workflows** (not synced from Rhiza), add the git authentication step yourself: + +```yaml +- name: Configure git auth for private packages + run: | + git config --global url."https://${{ github.token }}@github.com/".insteadOf "https://github.com/" +``` + +This configuration tells git to automatically inject the `GITHUB_TOKEN` into all HTTPS GitHub URLs. + +### 3. Using the Composite Action (Custom Workflows) + +For custom workflows, you can use Rhiza's composite action instead of inline commands: + +```yaml +- name: Configure git auth for private packages + uses: ./.github/actions/configure-git-auth +``` + +This is cleaner and more maintainable than inline git config commands. + +### 4. Complete Workflow Example + +Here's a complete example of a GitHub Actions workflow that uses private packages: + +```yaml +name: CI with Private Packages + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Install uv + uses: astral-sh/setup-uv@v7 + with: + version: "0.9.28" + + - name: Configure git auth for private packages + run: | + git config --global url."https://${{ github.token }}@github.com/".insteadOf "https://github.com/" + + - name: Install dependencies + run: | + uv sync --frozen + + - name: Run tests + run: | + uv run pytest +``` + +## Token Scopes + +### Same Repository + +The default `GITHUB_TOKEN` automatically has access to the **same repository** where the workflow runs: +- ✅ Is automatically provided by GitHub Actions +- ✅ Is scoped to the workflow run (secure) +- ✅ No manual token management required + +This is sufficient if your private packages are defined within the same repository. + +### Same Organization (Requires PAT) + +**Important:** The default `GITHUB_TOKEN` typically does **not** have permission to read other private repositories, even within the same organization. This is GitHub's default security behavior. + +To access private packages in other repositories within your organization, you need a Personal Access Token (PAT): + +1. Create a PAT with `repo` scope (see [TOKEN_SETUP.md](TOKEN_SETUP.md) for instructions) +2. Add it as a repository secret (e.g., `PRIVATE_PACKAGES_TOKEN`) +3. Use it in the git config + +**Note:** Some organizations configure settings to allow `GITHUB_TOKEN` cross-repository access, but this is not the default and should not be assumed. Using a PAT is the recommended approach for reliability. + +### Different Organization + +If your private packages are in a **different organization**, you need a Personal Access Token (PAT): + +1. Create a PAT with `repo` scope (see [TOKEN_SETUP.md](TOKEN_SETUP.md) for instructions) +2. Add it as a repository secret (e.g., `PRIVATE_PACKAGES_TOKEN`) +3. Use it in the git config: + +```yaml +- name: Configure git auth for private packages + run: | + git config --global url."https://${{ secrets.PRIVATE_PACKAGES_TOKEN }}@github.com/".insteadOf "https://github.com/" +``` + +## Local Development + +For local development, you have several options: + +### Option 1: Use GitHub CLI (Recommended) + +```bash +# Install gh CLI +brew install gh # macOS +# or: apt install gh # Ubuntu/Debian + +# Authenticate +gh auth login + +# Configure git +gh auth setup-git +``` + +The GitHub CLI automatically handles git authentication for private repositories. + +### Option 2: Use Personal Access Token + +```bash +# Create a PAT with 'repo' scope at: +# https://github.com/settings/tokens + +# Configure git +git config --global url."https://YOUR_TOKEN@github.com/".insteadOf "https://github.com/" +``` + +**Security Note:** Be careful not to commit this configuration. It's better to use `gh` CLI or SSH keys for local development. + +### Option 3: Use SSH (Local Only) + +For local development, you can continue using SSH: + +```toml +[tool.uv.sources] +my-package = { git = "ssh://git@github.com/jebel-quant/my-package.git", rev = "v1.0.0" } +``` + +However, this won't work in CI without additional SSH key setup. + +## Troubleshooting + +### Error: "fatal: could not read Username" + +This means git cannot find authentication credentials. Ensure: +1. The git config step runs **before** `uv sync` +2. The token has proper permissions +3. The repository URL uses HTTPS format + +### Error: "Repository not found" or "403 Forbidden" + +This means the token doesn't have access to the repository. Check: +1. The repository is in the same organization (for `GITHUB_TOKEN`) +2. Or use a PAT with `repo` scope (for different organizations) +3. The token hasn't expired + +### Error: "Couldn't resolve host 'github.com'" + +This is a network issue, not authentication. Check your network connection. + +## Best Practices + +1. **Use HTTPS URLs** in `pyproject.toml` for better CI/CD compatibility +2. **Rely on `GITHUB_TOKEN`** for same-org packages (automatic and secure) +3. **Pin versions** using `rev`, `tag`, or specific commit SHA for reproducibility +4. **Use `gh` CLI** for local development (easier than managing tokens) +5. **Keep tokens secure** - never commit them to the repository + +## Related Documentation + +- [TOKEN_SETUP.md](TOKEN_SETUP.md) - Setting up Personal Access Tokens +- [GitHub Actions: Automatic token authentication](https://docs.github.com/en/actions/security-guides/automatic-token-authentication) +- [uv: Git dependencies](https://docs.astral.sh/uv/concepts/dependencies/#git-dependencies) diff --git a/.rhiza/history b/.rhiza/history index 8b047b4..7a0f8f0 100644 --- a/.rhiza/history +++ b/.rhiza/history @@ -5,6 +5,8 @@ # # Files under template control: .editorconfig +.github/actions/configure-git-auth/README.md +.github/actions/configure-git-auth/action.yml .github/agents/analyser.md .github/agents/summarise.md .github/copilot-instructions.md @@ -28,17 +30,22 @@ .rhiza/.rhiza-version .rhiza/assets/rhiza-logo.svg .rhiza/docs/CONFIG.md +.rhiza/docs/PRIVATE_PACKAGES.md .rhiza/docs/TOKEN_SETUP.md -.rhiza/make.d/00-custom-env.mk -.rhiza/make.d/01-test.mk -.rhiza/make.d/02-book.mk -.rhiza/make.d/03-marimo.mk -.rhiza/make.d/04-presentation.mk -.rhiza/make.d/05-github.mk -.rhiza/make.d/06-agentic.mk -.rhiza/make.d/07-docker.mk -.rhiza/make.d/10-custom-task.mk .rhiza/make.d/README.md +.rhiza/make.d/agentic.mk +.rhiza/make.d/book.mk +.rhiza/make.d/bootstrap.mk +.rhiza/make.d/custom-env.mk +.rhiza/make.d/custom-task.mk +.rhiza/make.d/docker.mk +.rhiza/make.d/docs.mk +.rhiza/make.d/github.mk +.rhiza/make.d/marimo.mk +.rhiza/make.d/presentation.mk +.rhiza/make.d/quality.mk +.rhiza/make.d/releasing.mk +.rhiza/make.d/test.mk .rhiza/requirements/README.md .rhiza/requirements/docs.txt .rhiza/requirements/marimo.txt @@ -46,7 +53,29 @@ .rhiza/requirements/tools.txt .rhiza/rhiza.mk .rhiza/scripts/release.sh +.rhiza/template-bundles.yml .rhiza/templates/minibook/custom.html.jinja2 +.rhiza/tests/README.md +.rhiza/tests/api/conftest.py +.rhiza/tests/api/test_github_targets.py +.rhiza/tests/api/test_makefile_api.py +.rhiza/tests/api/test_makefile_targets.py +.rhiza/tests/conftest.py +.rhiza/tests/deps/test_dependency_health.py +.rhiza/tests/integration/test_book_targets.py +.rhiza/tests/integration/test_marimushka.py +.rhiza/tests/integration/test_notebook_execution.py +.rhiza/tests/integration/test_release.py +.rhiza/tests/structure/test_project_layout.py +.rhiza/tests/structure/test_requirements.py +.rhiza/tests/sync/conftest.py +.rhiza/tests/sync/test_docstrings.py +.rhiza/tests/sync/test_readme_validation.py +.rhiza/tests/sync/test_rhiza_version.py +.rhiza/tests/test_utils.py +.rhiza/tests/utils/conftest.py +.rhiza/tests/utils/test_git_repo_fixture.py +.rhiza/tests/utils/test_version_matrix.py .rhiza/utils/version_matrix.py CODE_OF_CONDUCT.md CONTRIBUTING.md @@ -55,23 +84,8 @@ book/marimo/notebooks/rhiza.py pytest.ini renovate.json ruff.toml -tests/test_rhiza/README.md -tests/test_rhiza/__init__.py +tests/conftest.py tests/test_rhiza/benchmarks/.gitignore tests/test_rhiza/benchmarks/README.md tests/test_rhiza/benchmarks/analyze_benchmarks.py -tests/test_rhiza/conftest.py -tests/test_rhiza/test_book.py -tests/test_rhiza/test_docstrings.py -tests/test_rhiza/test_git_repo_fixture.py -tests/test_rhiza/test_makefile.py -tests/test_rhiza/test_makefile_api.py -tests/test_rhiza/test_makefile_gh.py -tests/test_rhiza/test_marimushka_target.py -tests/test_rhiza/test_notebooks.py -tests/test_rhiza/test_readme.py -tests/test_rhiza/test_release_script.py -tests/test_rhiza/test_requirements_folder.py -tests/test_rhiza/test_rhiza_workflows.py -tests/test_rhiza/test_structure.py -tests/test_rhiza/test_version_matrix.py +tests/test_rhiza/test_bundles.py diff --git a/.rhiza/make.d/README.md b/.rhiza/make.d/README.md index 6f80875..a7adce1 100644 --- a/.rhiza/make.d/README.md +++ b/.rhiza/make.d/README.md @@ -1,6 +1,8 @@ # Makefile Cookbook -This directory (`.rhiza/make.d/`) is the designated place for **repository-specific build logic**. Any `.mk` file added here is automatically absorbed by the main Makefile. +This directory (`.rhiza/make.d/`) contains **template-managed build logic**. Files here are synced from the Rhiza template and should not be modified directly. + +**For project-specific customizations, use your root `Makefile`** (before the `include .rhiza/rhiza.mk` line). Use this cookbook to find copy-paste patterns for common development needs. @@ -9,37 +11,43 @@ Use this cookbook to find copy-paste patterns for common development needs. ### 1. Add a Simple Task **Goal**: Run a script with `make train-model`. -Create `.rhiza/make.d/50-model.mk`: +Add to your root `Makefile`: ```makefile ##@ Machine Learning train: ## Train the model using local data @echo "Training model..." @uv run python scripts/train.py + +# Include the Rhiza API (template-managed) +include .rhiza/rhiza.mk ``` ### 2. Inject Code into Standard Workflows (Hooks) **Goal**: Apply task after `make sync`. -Create `.rhiza/make.d/90-hooks.mk`: +Add to your root `Makefile`: ```makefile post-sync:: @echo "Applying something..." ``` -*Note: Use double-colons (`::`) for hooks to avoid conflicts.* +*Note: Use double-colons (`::`) for hooks to allow accumulation.* ### 3. Define Global Variables **Goal**: Set a default timeout for all test runs. -Create `.rhiza/make.d/01-config.mk`: +Add to your root `Makefile` (before the include line): ```makefile # Override default timeout (defaults to 60s) export TEST_TIMEOUT := 120 + +# Include the Rhiza API (template-managed) +include .rhiza/rhiza.mk ``` ### 4. Create a Private Shortcut **Goal**: Create a command that only exists on my machine (not committed). -Do not use `.rhiza/make.d/`. Instead, create a `local.mk` in the project root: +Create a `local.mk` in the project root: ```makefile deploy-dev: @./scripts/deploy-to-my-sandbox.sh @@ -48,7 +56,7 @@ deploy-dev: ### 5. Install System Dependencies **Goal**: Ensure `graphviz` is installed for Marimo notebooks using a hook. -Create `.rhiza/make.d/20-dependencies.mk`: +Add to your root `Makefile`: ```makefile pre-install:: @if ! command -v dot >/dev/null 2>&1; then \ @@ -68,13 +76,13 @@ pre-install:: ## ℹ️ Reference -### Execution Order -Files are loaded alphabetically. We use numeric prefixes to ensure dependencies resolve correctly: -- `00-19`: Configuration & Variables -- `20-79`: Custom Tasks & Rules -- `80-99`: Hooks & Lifecycle logic +### File Organization +- **`.rhiza/make.d/`**: Template-managed files (do not edit) +- **Root `Makefile`**: Project-specific customizations (variables, hooks, custom targets) +- **`local.mk`**: Developer-local shortcuts (not committed) ### Available Hooks +Add these to your root `Makefile` using double-colon syntax (`::`): - `pre-install` / `post-install`: Runs around `make install`. - `pre-sync` / `post-sync`: Runs around repository synchronization. - `pre-validate` / `post-validate`: Runs around validation checks. diff --git a/.rhiza/make.d/06-agentic.mk b/.rhiza/make.d/agentic.mk similarity index 84% rename from .rhiza/make.d/06-agentic.mk rename to .rhiza/make.d/agentic.mk index f3105d0..f8374b8 100644 --- a/.rhiza/make.d/06-agentic.mk +++ b/.rhiza/make.d/agentic.mk @@ -5,7 +5,7 @@ .PHONY: install-copilot install-claude analyse-repo summarise-changes COPILOT_BIN ?= $(shell command -v copilot 2>/dev/null || echo "$(INSTALL_DIR)/copilot") -CLAUDE_BIN ?= $(shell command -v claude 2>/dev/null || echo "$(INSTALL_DIR)/claude") +CLAUDE_BIN ?= $(shell command -v claude 2>/dev/null || echo "$(HOME)/.local/bin/claude") DEFAULT_AI_MODEL ?= gpt-4.1 ##@ Agentic Workflows @@ -52,16 +52,13 @@ install-copilot: ## checks for copilot and prompts to install install-claude: ## checks for claude and prompts to install @if command -v claude >/dev/null 2>&1; then \ printf "${GREEN}[INFO] claude already installed in PATH, skipping install.${RESET}\n"; \ - elif [ -x "${INSTALL_DIR}/claude" ]; then \ - printf "${SUCCESS}[INFO] claude already installed in ${INSTALL_DIR}, skipping install.${RESET}\n"; \ else \ - printf "${YELLOW}[WARN] Claude Code CLI not found in ${INSTALL_DIR}.${RESET}\n"; \ + printf "${YELLOW}[WARN] Claude Code CLI not found in PATH.${RESET}\n"; \ printf "${BLUE}Do you want to install Claude Code CLI? [y/N] ${RESET}"; \ read -r response; \ if [ "$$response" = "y" ] || [ "$$response" = "Y" ]; then \ - printf "${BLUE}[INFO] Installing Claude Code CLI to ${INSTALL_DIR}...${RESET}\n"; \ - mkdir -p "${INSTALL_DIR}"; \ - if curl -fsSL https://claude.ai/install.sh | CLAUDE_INSTALL_DIR="${INSTALL_DIR}" bash; then \ + printf "${BLUE}[INFO] Installing Claude Code CLI to default location (~/.local/bin/claude)...${RESET}\n"; \ + if curl -fsSL https://claude.ai/install.sh | bash; then \ printf "${GREEN}[INFO] Claude Code CLI installed successfully.${RESET}\n"; \ else \ printf "${RED}[ERROR] Failed to install Claude Code CLI.${RESET}\n"; \ diff --git a/.rhiza/make.d/02-book.mk b/.rhiza/make.d/book.mk similarity index 60% rename from .rhiza/make.d/02-book.mk rename to .rhiza/make.d/book.mk index 270eafb..e2692e3 100644 --- a/.rhiza/make.d/02-book.mk +++ b/.rhiza/make.d/book.mk @@ -1,11 +1,10 @@ -## book.mk - Documentation and book-building targets +## book.mk - Book-building targets # This file is included by the main Makefile. -# It provides targets for generating API documentation (pdoc), -# exporting Marimo notebooks to HTML (marimushka), and compiling -# a companion book (minibook). +# It provides targets for exporting Marimo notebooks to HTML (marimushka) +# and compiling a companion book (minibook). # Declare phony targets (they don't produce files) -.PHONY: docs marimushka book +.PHONY: marimushka mkdocs-build book # Define a default no-op marimushka target that will be used # when book/marimo/marimo.mk doesn't exist or doesn't define marimushka @@ -18,9 +17,19 @@ marimushka:: install-uv > "${MARIMUSHKA_OUTPUT}/index.html"; \ fi +# Define a default no-op mkdocs-build target that will be used +# when .rhiza/make.d/08-docs.mk doesn't exist or doesn't define mkdocs-build +mkdocs-build:: install-uv + @if [ ! -f "docs/mkdocs.yml" ]; then \ + printf "${BLUE}[INFO] No mkdocs.yml found, skipping MkDocs${RESET}\n"; \ + fi + # Default output directory for Marimushka (HTML exports of notebooks) MARIMUSHKA_OUTPUT ?= _marimushka +# Default output directory for MkDocs +MKDOCS_OUTPUT ?= _mkdocs + # ---------------------------- # Book sections (declarative) # ---------------------------- @@ -31,64 +40,16 @@ BOOK_SECTIONS := \ "API|_pdoc/index.html|pdoc/index.html|_pdoc|pdoc" \ "Coverage|_tests/html-coverage/index.html|tests/html-coverage/index.html|_tests/html-coverage|tests/html-coverage" \ "Test Report|_tests/html-report/report.html|tests/html-report/report.html|_tests/html-report|tests/html-report" \ - "Notebooks|_marimushka/index.html|marimushka/index.html|_marimushka|marimushka" - + "Notebooks|_marimushka/index.html|marimushka/index.html|_marimushka|marimushka" \ + "Official Documentation|_mkdocs/index.html|docs/index.html|_mkdocs|docs" -##@ Documentation - -# The 'docs' target generates API documentation using pdoc. -# 1. Identifies Python packages within the source folder. -# 2. Detects the docformat (google, numpy, or sphinx) from ruff.toml or defaults to google. -# 3. Installs pdoc and generates HTML documentation in _pdoc. -docs:: install ## create documentation with pdoc - # Clean up previous docs - rm -rf _pdoc; - - @if [ -d "${SOURCE_FOLDER}" ]; then \ - PKGS=""; for d in "${SOURCE_FOLDER}"/*; do [ -d "$$d" ] && PKGS="$$PKGS $$(basename "$$d")"; done; \ - if [ -z "$$PKGS" ]; then \ - printf "${YELLOW}[WARN] No packages found under ${SOURCE_FOLDER}, skipping docs${RESET}\n"; \ - else \ - TEMPLATE_ARG=""; \ - if [ -d "$(PDOC_TEMPLATE_DIR)" ]; then \ - TEMPLATE_ARG="-t $(PDOC_TEMPLATE_DIR)"; \ - printf "$(BLUE)[INFO] Using pdoc templates from $(PDOC_TEMPLATE_DIR)$(RESET)\n"; \ - fi; \ - DOCFORMAT="$(DOCFORMAT)"; \ - if [ -z "$$DOCFORMAT" ]; then \ - if [ -f "ruff.toml" ]; then \ - DOCFORMAT=$$(${UV_BIN} run python -c "import tomllib; print(tomllib.load(open('ruff.toml', 'rb')).get('lint', {}).get('pydocstyle', {}).get('convention', ''))"); \ - fi; \ - if [ -z "$$DOCFORMAT" ]; then \ - DOCFORMAT="google"; \ - fi; \ - printf "${BLUE}[INFO] Detected docformat: $$DOCFORMAT${RESET}\n"; \ - else \ - printf "${BLUE}[INFO] Using provided docformat: $$DOCFORMAT${RESET}\n"; \ - fi; \ - LOGO_ARG=""; \ - if [ -n "$(LOGO_FILE)" ]; then \ - if [ -f "$(LOGO_FILE)" ]; then \ - MIME=$$(file --mime-type -b "$(LOGO_FILE)"); \ - DATA=$$(base64 < "$(LOGO_FILE)" | tr -d '\n'); \ - LOGO_ARG="--logo data:$$MIME;base64,$$DATA"; \ - printf "${BLUE}[INFO] Embedding logo: $(LOGO_FILE)${RESET}\n"; \ - else \ - printf "${YELLOW}[WARN] Logo file $(LOGO_FILE) not found, skipping${RESET}\n"; \ - fi; \ - fi; \ - ${UV_BIN} pip install pdoc && \ - PYTHONPATH="${SOURCE_FOLDER}" ${UV_BIN} run pdoc --docformat $$DOCFORMAT --output-dir _pdoc $$TEMPLATE_ARG $$LOGO_ARG $$PKGS; \ - fi; \ - else \ - printf "${YELLOW}[WARN] Source folder ${SOURCE_FOLDER} not found, skipping docs${RESET}\n"; \ - fi +##@ Book # The 'book' target assembles the final documentation book. -# 1. Aggregates API docs, coverage, test reports, and notebooks into _book. +# 1. Aggregates API docs, coverage, test reports, notebooks, and MkDocs site into _book. # 2. Generates links.json to define the book structure. # 3. Uses 'minibook' to compile the final HTML site. -book:: test docs marimushka ## compile the companion book +book:: test docs marimushka mkdocs-build ## compile the companion book @printf "${BLUE}[INFO] Building combined documentation...${RESET}\n" @rm -rf _book && mkdir -p _book diff --git a/.rhiza/make.d/bootstrap.mk b/.rhiza/make.d/bootstrap.mk new file mode 100644 index 0000000..0cc882a --- /dev/null +++ b/.rhiza/make.d/bootstrap.mk @@ -0,0 +1,95 @@ +## .rhiza/make.d/bootstrap.mk - Bootstrap and Installation +# This file provides targets for setting up the development environment, +# installing dependencies, and cleaning project artifacts. + +# Declare phony targets (they don't produce files) +.PHONY: install-uv install clean pre-install post-install + +# Hook targets (double-colon rules allow multiple definitions) +pre-install:: ; @: +post-install:: ; @: + +##@ Bootstrap +install-uv: ## ensure uv/uvx is installed + # Ensure the ${INSTALL_DIR} folder exists + @mkdir -p ${INSTALL_DIR} + + # Install uv/uvx only if they are not already present in PATH or in the install dir + @if command -v uv >/dev/null 2>&1 && command -v uvx >/dev/null 2>&1; then \ + :; \ + elif [ -x "${INSTALL_DIR}/uv" ] && [ -x "${INSTALL_DIR}/uvx" ]; then \ + printf "${BLUE}[INFO] uv and uvx already installed in ${INSTALL_DIR}, skipping.${RESET}\n"; \ + else \ + printf "${BLUE}[INFO] Installing uv and uvx into ${INSTALL_DIR}...${RESET}\n"; \ + if ! curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR="${INSTALL_DIR}" sh >/dev/null 2>&1; then \ + printf "${RED}[ERROR] Failed to install uv${RESET}\n"; \ + exit 1; \ + fi; \ + fi + +install: pre-install install-uv ## install + # Create the virtual environment only if it doesn't exist + @if [ ! -d "${VENV}" ]; then \ + ${UV_BIN} venv $(if $(PYTHON_VERSION),--python $(PYTHON_VERSION)) ${VENV} || { printf "${RED}[ERROR] Failed to create virtual environment${RESET}\n"; exit 1; }; \ + else \ + printf "${BLUE}[INFO] Using existing virtual environment at ${VENV}, skipping creation${RESET}\n"; \ + fi + + # Install the dependencies from pyproject.toml (if it exists) + @if [ -f "pyproject.toml" ]; then \ + if [ -f "uv.lock" ]; then \ + if ! ${UV_BIN} lock --check >/dev/null 2>&1; then \ + printf "${YELLOW}[WARN] uv.lock is out of sync with pyproject.toml${RESET}\n"; \ + printf "${YELLOW} Run 'uv sync' to update your lock file and environment${RESET}\n"; \ + printf "${YELLOW} Or run 'uv lock' to update only the lock file${RESET}\n"; \ + exit 1; \ + fi; \ + printf "${BLUE}[INFO] Installing dependencies from lock file${RESET}\n"; \ + ${UV_BIN} sync --all-extras --all-groups --frozen || { printf "${RED}[ERROR] Failed to install dependencies${RESET}\n"; exit 1; }; \ + else \ + printf "${YELLOW}[WARN] uv.lock not found. Generating lock file and installing dependencies...${RESET}\n"; \ + ${UV_BIN} sync --all-extras || { printf "${RED}[ERROR] Failed to install dependencies${RESET}\n"; exit 1; }; \ + fi; \ + else \ + printf "${YELLOW}[WARN] No pyproject.toml found, skipping install${RESET}\n"; \ + fi + + # Install dev dependencies from .rhiza/requirements/*.txt files + @if [ -d ".rhiza/requirements" ] && ls .rhiza/requirements/*.txt >/dev/null 2>&1; then \ + for req_file in .rhiza/requirements/*.txt; do \ + if [ -f "$$req_file" ]; then \ + printf "${BLUE}[INFO] Installing requirements from $$req_file${RESET}\n"; \ + ${UV_BIN} pip install -r "$$req_file" || { printf "${RED}[ERROR] Failed to install requirements from $$req_file${RESET}\n"; exit 1; }; \ + fi; \ + done; \ + fi + + # Check if there is requirements.txt file in the tests folder (legacy support) + @if [ -f "tests/requirements.txt" ]; then \ + printf "${BLUE}[INFO] Installing requirements from tests/requirements.txt${RESET}\n"; \ + ${UV_BIN} pip install -r tests/requirements.txt || { printf "${RED}[ERROR] Failed to install test requirements${RESET}\n"; exit 1; }; \ + fi + @$(MAKE) post-install + +clean: ## Clean project artifacts and stale local branches + @printf "%bCleaning project...%b\n" "$(BLUE)" "$(RESET)" + + # Remove ignored files/directories, but keep .env files, tested with futures project + @git clean -d -X -f \ + -e '!.env' \ + -e '!.env.*' + + # Remove build & test artifacts + @rm -rf \ + dist \ + build \ + *.egg-info \ + .coverage \ + .pytest_cache \ + .benchmarks + + @printf "%bRemoving local branches with no remote counterpart...%b\n" "$(BLUE)" "$(RESET)" + + @git fetch --prune + + @git branch -vv | awk '/: gone]/{print $$1}' | xargs -r git branch -D diff --git a/.rhiza/make.d/00-custom-env.mk b/.rhiza/make.d/custom-env.mk similarity index 100% rename from .rhiza/make.d/00-custom-env.mk rename to .rhiza/make.d/custom-env.mk diff --git a/.rhiza/make.d/10-custom-task.mk b/.rhiza/make.d/custom-task.mk similarity index 100% rename from .rhiza/make.d/10-custom-task.mk rename to .rhiza/make.d/custom-task.mk diff --git a/.rhiza/make.d/07-docker.mk b/.rhiza/make.d/docker.mk similarity index 100% rename from .rhiza/make.d/07-docker.mk rename to .rhiza/make.d/docker.mk diff --git a/.rhiza/make.d/docs.mk b/.rhiza/make.d/docs.mk new file mode 100644 index 0000000..ad44503 --- /dev/null +++ b/.rhiza/make.d/docs.mk @@ -0,0 +1,96 @@ +## docs.mk - Documentation generation targets +# This file is included by the main Makefile. +# It provides targets for generating API documentation using pdoc +# and building/serving MkDocs documentation sites. + +# Declare phony targets (they don't produce files) +.PHONY: docs mkdocs mkdocs-serve mkdocs-build + +# Default output directory for MkDocs (HTML site) +MKDOCS_OUTPUT ?= _mkdocs + +# MkDocs config file location +MKDOCS_CONFIG ?= docs/mkdocs.yml + +# Default pdoc template directory (can be overridden) +PDOC_TEMPLATE_DIR ?= book/pdoc-templates + +##@ Documentation + +# The 'docs' target generates API documentation using pdoc. +# 1. Identifies Python packages within the source folder. +# 2. Detects the docformat (google, numpy, or sphinx) from ruff.toml or defaults to google. +# 3. Installs pdoc and generates HTML documentation in _pdoc. +docs:: install ## create documentation with pdoc + # Clean up previous docs + rm -rf _pdoc; + + @if [ -d "${SOURCE_FOLDER}" ]; then \ + PKGS=""; for d in "${SOURCE_FOLDER}"/*; do [ -d "$$d" ] && PKGS="$$PKGS $$(basename "$$d")"; done; \ + if [ -z "$$PKGS" ]; then \ + printf "${YELLOW}[WARN] No packages found under ${SOURCE_FOLDER}, skipping docs${RESET}\n"; \ + else \ + TEMPLATE_ARG=""; \ + if [ -d "$(PDOC_TEMPLATE_DIR)" ]; then \ + TEMPLATE_ARG="-t $(PDOC_TEMPLATE_DIR)"; \ + printf "$(BLUE)[INFO] Using pdoc templates from $(PDOC_TEMPLATE_DIR)$(RESET)\n"; \ + fi; \ + DOCFORMAT="$(DOCFORMAT)"; \ + if [ -z "$$DOCFORMAT" ]; then \ + if [ -f "ruff.toml" ]; then \ + DOCFORMAT=$$(${UV_BIN} run python -c "import tomllib; print(tomllib.load(open('ruff.toml', 'rb')).get('lint', {}).get('pydocstyle', {}).get('convention', ''))"); \ + fi; \ + if [ -z "$$DOCFORMAT" ]; then \ + DOCFORMAT="google"; \ + fi; \ + printf "${BLUE}[INFO] Detected docformat: $$DOCFORMAT${RESET}\n"; \ + else \ + printf "${BLUE}[INFO] Using provided docformat: $$DOCFORMAT${RESET}\n"; \ + fi; \ + LOGO_ARG=""; \ + if [ -n "$(LOGO_FILE)" ]; then \ + if [ -f "$(LOGO_FILE)" ]; then \ + MIME=$$(file --mime-type -b "$(LOGO_FILE)"); \ + DATA=$$(base64 < "$(LOGO_FILE)" | tr -d '\n'); \ + LOGO_ARG="--logo data:$$MIME;base64,$$DATA"; \ + printf "${BLUE}[INFO] Embedding logo: $(LOGO_FILE)${RESET}\n"; \ + else \ + printf "${YELLOW}[WARN] Logo file $(LOGO_FILE) not found, skipping${RESET}\n"; \ + fi; \ + fi; \ + ${UV_BIN} pip install pdoc && \ + PYTHONPATH="${SOURCE_FOLDER}" ${UV_BIN} run pdoc --docformat $$DOCFORMAT --output-dir _pdoc $$TEMPLATE_ARG $$LOGO_ARG $$PKGS; \ + fi; \ + else \ + printf "${YELLOW}[WARN] Source folder ${SOURCE_FOLDER} not found, skipping docs${RESET}\n"; \ + fi + +# The 'mkdocs-build' target builds the MkDocs documentation site. +# 1. Checks if the mkdocs.yml config file exists. +# 2. Cleans up any previous output. +# 3. Builds the static site using mkdocs with material theme. +mkdocs-build:: install-uv ## build MkDocs documentation site + @printf "${BLUE}[INFO] Building MkDocs site...${RESET}\n" + @if [ -f "$(MKDOCS_CONFIG)" ]; then \ + rm -rf "$(MKDOCS_OUTPUT)"; \ + MKDOCS_OUTPUT_ABS="$$(pwd)/$(MKDOCS_OUTPUT)"; \ + ${UVX_BIN} --with mkdocs-material --with "pymdown-extensions>=10.0" mkdocs build \ + -f "$(MKDOCS_CONFIG)" \ + -d "$$MKDOCS_OUTPUT_ABS"; \ + else \ + printf "${YELLOW}[WARN] $(MKDOCS_CONFIG) not found, skipping MkDocs build${RESET}\n"; \ + fi + +# The 'mkdocs-serve' target serves the documentation with live reload. +# Useful for local development and previewing changes. +mkdocs-serve: install-uv ## serve MkDocs site with live reload + @if [ -f "$(MKDOCS_CONFIG)" ]; then \ + ${UVX_BIN} --with mkdocs-material --with "pymdown-extensions>=10.0" mkdocs serve \ + -f "$(MKDOCS_CONFIG)"; \ + else \ + printf "${RED}[ERROR] $(MKDOCS_CONFIG) not found${RESET}\n"; \ + exit 1; \ + fi + +# Convenience alias +mkdocs: mkdocs-serve ## alias for mkdocs-serve diff --git a/.rhiza/make.d/05-github.mk b/.rhiza/make.d/github.mk similarity index 100% rename from .rhiza/make.d/05-github.mk rename to .rhiza/make.d/github.mk diff --git a/.rhiza/make.d/03-marimo.mk b/.rhiza/make.d/marimo.mk similarity index 100% rename from .rhiza/make.d/03-marimo.mk rename to .rhiza/make.d/marimo.mk diff --git a/.rhiza/make.d/04-presentation.mk b/.rhiza/make.d/presentation.mk similarity index 100% rename from .rhiza/make.d/04-presentation.mk rename to .rhiza/make.d/presentation.mk diff --git a/.rhiza/make.d/quality.mk b/.rhiza/make.d/quality.mk new file mode 100644 index 0000000..52b2e6b --- /dev/null +++ b/.rhiza/make.d/quality.mk @@ -0,0 +1,27 @@ +## .rhiza/make.d/12-quality.mk - Quality and Formatting +# This file provides targets for code quality checks, linting, and formatting. + +# Declare phony targets (they don't produce files) +.PHONY: deptry fmt mypy + +##@ Quality and Formatting +deptry: install-uv ## Run deptry + @if [ -d ${SOURCE_FOLDER} ]; then \ + $(UVX_BIN) -p ${PYTHON_VERSION} deptry ${SOURCE_FOLDER}; \ + fi + + @if [ -d ${MARIMO_FOLDER} ]; then \ + if [ -d ${SOURCE_FOLDER} ]; then \ + $(UVX_BIN) -p ${PYTHON_VERSION} deptry ${MARIMO_FOLDER} ${SOURCE_FOLDER} --ignore DEP004; \ + else \ + $(UVX_BIN) -p ${PYTHON_VERSION} deptry ${MARIMO_FOLDER} --ignore DEP004; \ + fi \ + fi + +fmt: install-uv ## check the pre-commit hooks and the linting + @${UVX_BIN} -p ${PYTHON_VERSION} pre-commit run --all-files + +mypy: install ## run mypy analysis + @if [ -d ${SOURCE_FOLDER} ]; then \ + ${UV_BIN} run mypy ${SOURCE_FOLDER} --strict --config-file=pyproject.toml; \ + fi diff --git a/.rhiza/make.d/releasing.mk b/.rhiza/make.d/releasing.mk new file mode 100644 index 0000000..086bb1a --- /dev/null +++ b/.rhiza/make.d/releasing.mk @@ -0,0 +1,27 @@ +## .rhiza/make.d/releasing.mk - Releasing and Versioning +# This file provides targets for version bumping and release management. + +# Declare phony targets (they don't produce files) +.PHONY: bump release pre-bump post-bump pre-release post-release + +# Hook targets (double-colon rules allow multiple definitions) +pre-bump:: ; @: +post-bump:: ; @: +pre-release:: ; @: +post-release:: ; @: + +##@ Releasing and Versioning +bump: pre-bump ## bump version + @if [ -f "pyproject.toml" ]; then \ + $(MAKE) install; \ + ${UVX_BIN} "rhiza[tools]>=0.8.6" tools bump; \ + printf "${BLUE}[INFO] Updating uv.lock file...${RESET}\n"; \ + ${UV_BIN} lock; \ + else \ + printf "${YELLOW}[WARN] No pyproject.toml found, skipping bump${RESET}\n"; \ + fi + @$(MAKE) post-bump + +release: pre-release install-uv ## create tag and push to remote with prompts + @UV_BIN="${UV_BIN}" /bin/sh ".rhiza/scripts/release.sh" + @$(MAKE) post-release diff --git a/.rhiza/make.d/01-test.mk b/.rhiza/make.d/test.mk similarity index 70% rename from .rhiza/make.d/01-test.mk rename to .rhiza/make.d/test.mk index 7719139..8a47952 100644 --- a/.rhiza/make.d/01-test.mk +++ b/.rhiza/make.d/test.mk @@ -4,7 +4,7 @@ # executing performance benchmarks. # Declare phony targets (they don't produce files) -.PHONY: test benchmark typecheck security mutate docs-coverage +.PHONY: test benchmark typecheck security docs-coverage # Default directory for tests TESTS_FOLDER := tests @@ -23,25 +23,21 @@ COVERAGE_FAIL_UNDER ?= 90 test: install ## run all tests @rm -rf _tests; - @if [ -d ${TESTS_FOLDER} ]; then \ - mkdir -p _tests/html-coverage _tests/html-report; \ - if [ -d ${SOURCE_FOLDER} ]; then \ - ${VENV}/bin/python -m pytest ${TESTS_FOLDER} \ - --ignore=${TESTS_FOLDER}/benchmarks \ - --cov=${SOURCE_FOLDER} \ - --cov-report=term \ - --cov-report=html:_tests/html-coverage \ - --cov-fail-under=$(COVERAGE_FAIL_UNDER) \ - --cov-report=json:_tests/coverage.json \ - --html=_tests/html-report/report.html; \ - else \ - printf "${YELLOW}[WARN] Source folder ${SOURCE_FOLDER} not found, running tests without coverage${RESET}\n"; \ - ${VENV}/bin/python -m pytest ${TESTS_FOLDER} \ - --ignore=${TESTS_FOLDER}/benchmarks \ - --html=_tests/html-report/report.html; \ - fi \ + @mkdir -p _tests/html-coverage _tests/html-report; \ + if [ -d ${SOURCE_FOLDER} ]; then \ + ${VENV}/bin/python -m pytest \ + --ignore=${TESTS_FOLDER}/benchmarks \ + --cov=${SOURCE_FOLDER} \ + --cov-report=term \ + --cov-report=html:_tests/html-coverage \ + --cov-fail-under=$(COVERAGE_FAIL_UNDER) \ + --cov-report=json:_tests/coverage.json \ + --html=_tests/html-report/report.html; \ else \ - printf "${YELLOW}[WARN] Test folder ${TESTS_FOLDER} not found, skipping tests${RESET}\n"; \ + printf "${YELLOW}[WARN] Source folder ${SOURCE_FOLDER} not found, running tests without coverage${RESET}\n"; \ + ${VENV}/bin/python -m pytest \ + --ignore=${TESTS_FOLDER}/benchmarks \ + --html=_tests/html-report/report.html; \ fi # The 'typecheck' target runs static type analysis using mypy. @@ -64,15 +60,6 @@ security: install ## run security scans (pip-audit and bandit) @printf "${BLUE}[INFO] Running bandit security scan...${RESET}\n" @${UVX_BIN} bandit -r ${SOURCE_FOLDER} -ll -q -# The 'mutate' target performs mutation testing using mutmut. -# 1. Runs mutmut to apply mutations to the source code and check if tests fail. -# 2. Displays the results of the mutation testing. -mutate: install ## run mutation testing with mutmut (slow, for CI or thorough testing) - @printf "${BLUE}[INFO] Running mutation testing with mutmut...${RESET}\n" - @printf "${YELLOW}[WARN] This may take a while...${RESET}\n" - @${UVX_BIN} mutmut run --paths-to-mutate=${SOURCE_FOLDER} - @${UVX_BIN} mutmut results - # The 'benchmark' target runs performance benchmarks using pytest-benchmark. # 1. Installs benchmarking dependencies (pytest-benchmark, pygal). # 2. Executes benchmarks found in the benchmarks/ subfolder. diff --git a/.rhiza/requirements/tests.txt b/.rhiza/requirements/tests.txt index fe03710..01e4385 100644 --- a/.rhiza/requirements/tests.txt +++ b/.rhiza/requirements/tests.txt @@ -1,8 +1,10 @@ # Test dependencies for rhiza pytest>=8.0 +python-dotenv>=1.0 pytest-cov>=6.0 pytest-html>=4.0 pytest-mock>=3.0 +PyYAML>=6.0 # For property-based testing hypothesis>=6.150.0 diff --git a/.rhiza/requirements/tools.txt b/.rhiza/requirements/tools.txt index cb101c4..eed4bbb 100644 --- a/.rhiza/requirements/tools.txt +++ b/.rhiza/requirements/tools.txt @@ -1,6 +1,7 @@ # Development tool dependencies for rhiza pre-commit==4.5.1 python-dotenv==1.2.1 + # for now needed until rhiza-tools is finished typer==0.21.1 mypy==1.19.1 diff --git a/.rhiza/rhiza.mk b/.rhiza/rhiza.mk index f3ae0a0..792ddf5 100644 --- a/.rhiza/rhiza.mk +++ b/.rhiza/rhiza.mk @@ -18,14 +18,7 @@ RESET := \033[0m # Declare phony targets (they don't produce files) .PHONY: \ - bump \ - clean \ - deptry \ - fmt \ - mypy \ help \ - install \ - install-uv \ post-bump \ post-install \ post-release \ @@ -36,10 +29,10 @@ RESET := \033[0m pre-release \ pre-sync \ pre-validate \ - release \ - sync \ + print-logo \ + readme \ summarise-sync \ - update-readme \ + sync \ validate \ version-matrix @@ -54,7 +47,7 @@ PYTHON_VERSION ?= $(shell cat .python-version 2>/dev/null || echo "3.13") export PYTHON_VERSION # Read Rhiza version from .rhiza/.rhiza-version (single source of truth for rhiza-tools) -RHIZA_VERSION ?= $(shell cat .rhiza/.rhiza-version 2>/dev/null || echo "0.9.0") +RHIZA_VERSION ?= $(shell cat .rhiza/.rhiza-version 2>/dev/null || echo "0.10.2") export RHIZA_VERSION export UV_NO_MODIFY_PATH := 1 @@ -63,20 +56,8 @@ export UV_VENV_CLEAR := 1 # Load .rhiza/.env (if present) and export its variables so recipes see them. -include .rhiza/.env -# # Include split Makefiles -# -include tests/tests.mk -# -include book/book.mk -# -include book/marimo/marimo.mk -# -include presentation/presentation.mk -# -include docker/docker.mk -# -include .github/agents/agentic.mk -# # .rhiza/rhiza.mk is INLINED below -# -include .github/github.mk - - - # ============================================================================== -# Rhiza Core Actions (formerly .rhiza/rhiza.mk) +# Rhiza Core # ============================================================================== # RHIZA_LOGO definition @@ -94,16 +75,12 @@ export RHIZA_LOGO .PHONY: print-logo sync validate readme pre-sync post-sync pre-validate post-validate # Hook targets (double-colon rules allow multiple definitions) +# Note: pre-install/post-install are defined in bootstrap.mk +# Note: pre-bump/post-bump/pre-release/post-release are defined in releasing.mk pre-sync:: ; @: post-sync:: ; @: pre-validate:: ; @: post-validate:: ; @: -pre-install:: ; @: -post-install:: ; @: -pre-release:: ; @: -post-release:: ; @: -pre-bump:: ; @: -post-bump:: ; @: ##@ Rhiza Workflows @@ -128,7 +105,14 @@ summarise-sync: install-uv ## summarise differences created by sync with templat ${UVX_BIN} "rhiza>=$(RHIZA_VERSION)" summarise .; \ fi -validate: pre-validate ## validate project structure against template repository as defined in .rhiza/template.yml +rhiza-test: install ## run rhiza's own tests (if any) + @if [ -d ".rhiza/tests" ]; then \ + ${UV_BIN} run pytest .rhiza/tests; \ + else \ + printf "${YELLOW}[WARN] No .rhiza/tests directory found, skipping rhiza-tests${RESET}\n"; \ + fi + +validate: pre-validate rhiza-test ## validate project structure against template repository as defined in .rhiza/template.yml @if git remote get-url origin 2>/dev/null | grep -iqE 'jebel-quant/rhiza(\.git)?$$'; then \ printf "${BLUE}[INFO] Skipping validate in rhiza repository (no template.yml by design)${RESET}\n"; \ else \ @@ -140,128 +124,6 @@ validate: pre-validate ## validate project structure against template repository readme: install-uv ## update README.md with current Makefile help output @${UVX_BIN} "rhiza-tools>=0.2.0" update-readme -# ============================================================================== -# End Rhiza Core Actions -# ============================================================================== - -##@ Bootstrap -install-uv: ## ensure uv/uvx is installed - # Ensure the ${INSTALL_DIR} folder exists - @mkdir -p ${INSTALL_DIR} - - # Install uv/uvx only if they are not already present in PATH or in the install dir - @if command -v uv >/dev/null 2>&1 && command -v uvx >/dev/null 2>&1; then \ - :; \ - elif [ -x "${INSTALL_DIR}/uv" ] && [ -x "${INSTALL_DIR}/uvx" ]; then \ - printf "${BLUE}[INFO] uv and uvx already installed in ${INSTALL_DIR}, skipping.${RESET}\n"; \ - else \ - printf "${BLUE}[INFO] Installing uv and uvx into ${INSTALL_DIR}...${RESET}\n"; \ - if ! curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR="${INSTALL_DIR}" sh >/dev/null 2>&1; then \ - printf "${RED}[ERROR] Failed to install uv${RESET}\n"; \ - exit 1; \ - fi; \ - fi - -install: pre-install install-uv ## install - # Create the virtual environment only if it doesn't exist - @if [ ! -d "${VENV}" ]; then \ - ${UV_BIN} venv $(if $(PYTHON_VERSION),--python $(PYTHON_VERSION)) ${VENV} || { printf "${RED}[ERROR] Failed to create virtual environment${RESET}\n"; exit 1; }; \ - else \ - printf "${BLUE}[INFO] Using existing virtual environment at ${VENV}, skipping creation${RESET}\n"; \ - fi - - # Install the dependencies from pyproject.toml (if it exists) - @if [ -f "pyproject.toml" ]; then \ - if [ -f "uv.lock" ]; then \ - printf "${BLUE}[INFO] Installing dependencies from lock file${RESET}\n"; \ - ${UV_BIN} sync --all-extras --all-groups --frozen || { printf "${RED}[ERROR] Failed to install dependencies${RESET}\n"; exit 1; }; \ - else \ - printf "${YELLOW}[WARN] uv.lock not found. Generating lock file and installing dependencies...${RESET}\n"; \ - ${UV_BIN} sync --all-extras || { printf "${RED}[ERROR] Failed to install dependencies${RESET}\n"; exit 1; }; \ - fi; \ - else \ - printf "${YELLOW}[WARN] No pyproject.toml found, skipping install${RESET}\n"; \ - fi - - # Install dev dependencies from .rhiza/requirements/*.txt files - @if [ -d ".rhiza/requirements" ] && ls .rhiza/requirements/*.txt >/dev/null 2>&1; then \ - for req_file in .rhiza/requirements/*.txt; do \ - if [ -f "$$req_file" ]; then \ - printf "${BLUE}[INFO] Installing requirements from $$req_file${RESET}\n"; \ - ${UV_BIN} pip install -r "$$req_file" || { printf "${RED}[ERROR] Failed to install requirements from $$req_file${RESET}\n"; exit 1; }; \ - fi; \ - done; \ - fi - - # Check if there is requirements.txt file in the tests folder (legacy support) - @if [ -f "tests/requirements.txt" ]; then \ - printf "${BLUE}[INFO] Installing requirements from tests/requirements.txt${RESET}\n"; \ - ${UV_BIN} pip install -r tests/requirements.txt || { printf "${RED}[ERROR] Failed to install test requirements${RESET}\n"; exit 1; }; \ - fi - @$(MAKE) post-install - -clean: ## Clean project artifacts and stale local branches - @printf "%bCleaning project...%b\n" "$(BLUE)" "$(RESET)" - - # Remove ignored files/directories, but keep .env files, tested with futures project - @git clean -d -X -f \ - -e '!.env' \ - -e '!.env.*' - - # Remove build & test artifacts - @rm -rf \ - dist \ - build \ - *.egg-info \ - .coverage \ - .pytest_cache \ - .benchmarks - - @printf "%bRemoving local branches with no remote counterpart...%b\n" "$(BLUE)" "$(RESET)" - - @git fetch --prune - - @git branch -vv | awk '/: gone]/{print $$1}' | xargs -r git branch -D - -##@ Quality and Formatting -deptry: install-uv ## Run deptry - @if [ -d ${SOURCE_FOLDER} ]; then \ - $(UVX_BIN) -p ${PYTHON_VERSION} deptry ${SOURCE_FOLDER}; \ - fi - - @if [ -d ${MARIMO_FOLDER} ]; then \ - if [ -d ${SOURCE_FOLDER} ]; then \ - $(UVX_BIN) -p ${PYTHON_VERSION} deptry ${MARIMO_FOLDER} ${SOURCE_FOLDER} --ignore DEP004; \ - else \ - $(UVX_BIN) -p ${PYTHON_VERSION} deptry ${MARIMO_FOLDER} --ignore DEP004; \ - fi \ - fi - -fmt: install-uv ## check the pre-commit hooks and the linting - @${UVX_BIN} -p ${PYTHON_VERSION} pre-commit run --all-files - -mypy: install ## run mypy analysis - @if [ -d ${SOURCE_FOLDER} ]; then \ - ${UV_BIN} run mypy ${SOURCE_FOLDER} --strict --config-file=pyproject.toml; \ - fi - -##@ Releasing and Versioning -bump: pre-bump ## bump version - @if [ -f "pyproject.toml" ]; then \ - $(MAKE) install; \ - ${UVX_BIN} "rhiza[tools]>=0.8.6" tools bump; \ - printf "${BLUE}[INFO] Updating uv.lock file...${RESET}\n"; \ - ${UV_BIN} lock; \ - else \ - printf "${YELLOW}[WARN] No pyproject.toml found, skipping bump${RESET}\n"; \ - fi - @$(MAKE) post-bump - -release: pre-release install-uv ## create tag and push to remote with prompts - @UV_BIN="${UV_BIN}" /bin/sh ".rhiza/scripts/release.sh" - @$(MAKE) post-release - - ##@ Meta help: print-logo ## Display this help message diff --git a/.rhiza/template-bundles.yml b/.rhiza/template-bundles.yml new file mode 100644 index 0000000..e3b09a7 --- /dev/null +++ b/.rhiza/template-bundles.yml @@ -0,0 +1,291 @@ +# Rhiza Template Bundle Definitions +# +# This file defines template bundles - pre-configured sets of files that can be +# included in downstream projects by selecting templates instead of listing +# individual file paths. +# +# Usage in downstream projects (.rhiza/template.yml): +# +# templates: +# - tests +# - docker +# - marimo +# +# Instead of manually listing: +# +# include: | +# .rhiza/make.d/01-test.mk +# pytest.ini +# tests/** +# docker/Dockerfile +# ... + +# Schema version for this bundles file format +version: "0.7.1" + +# Bundle Definitions +bundles: + # ============================================================================ + # CORE - Required infrastructure + # ============================================================================ + core: + description: "Core Rhiza infrastructure" + required: true + standalone: true + files: + # Core Rhiza files + - .rhiza/rhiza.mk + - .rhiza/.cfg.toml + - .rhiza/.env + - .rhiza/.gitignore + - .rhiza/.rhiza-version + - .rhiza/make.d/00-custom-env.mk + - .rhiza/make.d/08-docs.mk + - .rhiza/make.d/10-custom-task.mk + - .rhiza/make.d/README.md + - .rhiza/scripts + - .rhiza/docs + - .rhiza/assets + - .rhiza/requirements/README.md + - .rhiza/requirements/docs.txt + - .rhiza/requirements/tools.txt + + # Root configuration files + - Makefile + - .pre-commit-config.yaml + - .editorconfig + - .gitignore + - .python-version + - ruff.toml + - renovate.json + + # Documentation files (project-specific docs) + - docs/SECURITY.md + - docs/ARCHITECTURE.md + - docs/CUSTOMIZATION.md + - docs/GLOSSARY.md + - docs/QUICK_REFERENCE.md + - docs/RELEASING.md + - docs/WORKFLOWS.md + - docs/ASSETS.md + - docs/DEMO.md + + github: + description: "GitHub Actions workflows for CI/CD" + standalone: true + requires: [core] + files: + - .rhiza/make.d/05-github.mk + - .rhiza/make.d/06-agentic.mk + # Core GitHub Actions workflows + - .github/workflows/rhiza_validate.yml + - .github/workflows/rhiza_sync.yml + - .github/workflows/rhiza_pre-commit.yml + - .github/workflows/rhiza_deptry.yml + - .github/workflows/rhiza_release.yml + - .github/actions/configure-git-auth + - .github/dependabot.yml + - .github/copilot-instructions.md + - .github/agents + + # ============================================================================ + # LEGAL - Legal and community files + # ============================================================================ + legal: + description: "Legal and community documentation files" + standalone: true + requires: [] + files: + # Legal files + - LICENSE + + # Community files + - CONTRIBUTING.md + - CODE_OF_CONDUCT.md + + # ============================================================================ + # DEVCONTAINER - VS Code DevContainer configuration + # ============================================================================ + devcontainer: + description: "VS Code DevContainer configuration for consistent development environments" + standalone: true + requires: [] + files: + # DevContainer configuration + - .devcontainer/devcontainer.json + - .devcontainer/bootstrap.sh + + # Documentation + - docs/DEVCONTAINER.md + + # GitHub Actions workflows + - .github/workflows/rhiza_devcontainer.yml + + # ============================================================================ + # BENCHMARKS - Performance benchmarking with pytest-benchmark + # ============================================================================ + benchmarks: + description: "Performance benchmarking infrastructure with pytest-benchmark" + standalone: true + requires: [] + recommends: + - tests # Benchmarks work best alongside tests + files: + # Benchmark make target (part of test.mk) + # Note: The benchmark target is in 01-test.mk + + # Benchmark directory + - tests/test_rhiza/benchmarks + + # GitHub Actions workflows + - .github/workflows/rhiza_benchmarks.yml + + # ============================================================================ + # DOCKER - Docker containerization support + # ============================================================================ + docker: + description: "Docker containerization support for building and running containers" + standalone: true + requires: [] + files: + # Docker configuration and files + - docker/Dockerfile + - docker/Dockerfile.dockerignore + + # Make targets + - .rhiza/make.d/07-docker.mk + + # Documentation + - docs/DOCKER.md + + # GitHub Actions workflows + - .github/workflows/rhiza_docker.yml + + # ============================================================================ + # PRESENTATION - Presentation building with reveal.js + # ============================================================================ + presentation: + description: "Presentation building using reveal.js and Marimo" + standalone: true + requires: [] + recommends: + - marimo # Presentations often use Marimo for interactive slides + files: + - .rhiza/make.d/04-presentation.mk + - docs/PRESENTATION.md + + # ============================================================================ + # GITLAB - GitLab CI/CD pipeline configuration + # ============================================================================ + gitlab: + description: "GitLab CI/CD pipeline configuration and workflows" + standalone: true + requires: [core] + notes: | + GitLab workflows provide similar functionality to GitHub Actions. + Some workflows (like book, ci) may benefit from having their + corresponding feature templates (book, tests) also enabled. + files: + # Main GitLab CI configuration + - .gitlab-ci.yml + + # GitLab workflow files + - .gitlab/workflows/rhiza_book.yml + - .gitlab/workflows/rhiza_ci.yml + - .gitlab/workflows/rhiza_deptry.yml + - .gitlab/workflows/rhiza_pre-commit.yml + - .gitlab/workflows/rhiza_release.yml + - .gitlab/workflows/rhiza_renovate.yml + - .gitlab/workflows/rhiza_sync.yml + - .gitlab/workflows/rhiza_validate.yml + + # GitLab templates + - .gitlab/template + + # GitLab documentation + - .gitlab/COMPARISON.md + - .gitlab/README.md + - .gitlab/SUMMARY.md + - .gitlab/TESTING.md + + # ============================================================================ + # TESTS - Testing infrastructure with pytest, coverage, and type checking + # ============================================================================ + tests: + description: "Testing infrastructure with pytest, coverage, and type checking" + standalone: true + requires: [] + files: + # Make targets and configuration + - .rhiza/utils/version_matrix.py + - .rhiza/make.d/01-test.mk + - .rhiza/requirements/tests.txt + - pytest.ini + + # Core test infrastructure + #- tests/test_rhiza/__init__.py + #- tests/test_rhiza/conftest.py + #- tests/test_rhiza/README.md + + # Core/generic test files + - .rhiza/tests + + # GitHub Actions workflows + - .github/workflows/rhiza_ci.yml + - .github/workflows/rhiza_mypy.yml + - .github/workflows/rhiza_security.yml + - .github/workflows/rhiza_codeql.yml + + # ============================================================================ + # MARIMO - Interactive Marimo notebooks + # ============================================================================ + marimo: + description: "Interactive Marimo notebooks for data exploration and documentation" + standalone: true + requires: [] + files: + # Marimo configuration + - .rhiza/make.d/03-marimo.mk + - .rhiza/requirements/marimo.txt + + # Marimo notebooks directory + - book/marimo + + # Marimo-specific tests + #- tests/test_rhiza/test_marimushka_target.py + #- tests/test_rhiza/test_notebooks.py + + # Documentation + - docs/MARIMO.md + + # GitHub Actions workflows + - .github/workflows/rhiza_marimo.yml + + # ============================================================================ + # BOOK - Documentation book generation + # ============================================================================ + book: + description: | + Comprehensive documentation book generation combining: + - API documentation (pdoc) + - Test coverage reports + - Test results + - Interactive notebooks (if marimo is enabled) + standalone: false + requires: + - tests # Required: book needs test coverage and reports + recommends: + - marimo # Optional: book works better with notebook exports + files: + # Book building configuration + - .rhiza/make.d/02-book.mk + - .rhiza/templates/minibook + + # Book-specific tests + #- tests/test_rhiza/test_book.py + + # Documentation + - docs/BOOK.md + + # GitHub Actions workflows + - .github/workflows/rhiza_book.yml diff --git a/.rhiza/tests/README.md b/.rhiza/tests/README.md new file mode 100644 index 0000000..b9e74cc --- /dev/null +++ b/.rhiza/tests/README.md @@ -0,0 +1,128 @@ +# Rhiza Test Suite + +This directory contains the comprehensive test suite for the Rhiza project. + +## Test Organization + +Tests are organized into purpose-driven subdirectories: + +### `structure/` +Static assertions about file and directory presence. These tests verify that the repository contains the expected files, directories, and configuration structure without executing any subprocesses. + +- `test_project_layout.py` — Validates root-level files and directories +- `test_requirements.py` — Validates `.rhiza/requirements/` structure + +### `api/` +Makefile target validation via dry-runs. These tests verify that Makefile targets are properly defined and would execute the expected commands. + +- `test_makefile_targets.py` — Core Makefile targets (install, test, fmt, etc.) +- `test_makefile_api.py` — Makefile API (delegation, extension, hooks, overrides) +- `test_github_targets.py` — GitHub-specific Makefile targets + +### `integration/` +Tests requiring sandboxed git repositories or subprocess execution. These tests verify end-to-end workflows. + +- `test_release.py` — Release script functionality +- `test_book_targets.py` — Documentation book build targets +- `test_marimushka.py` — Marimushka target execution +- `test_notebook_execution.py` — Marimo notebook execution validation + +### `sync/` +Template sync, workflows, versioning, and content validation tests. These tests ensure that template synchronization and content validation work correctly. + +- `test_rhiza_version.py` — Version reading and workflow validation +- `test_readme_validation.py` — README code block execution and validation +- `test_docstrings.py` — Doctest validation across source modules + +### `utils/` +Tests for utility code and test infrastructure. These tests validate the testing framework itself and utility scripts. + +- `test_git_repo_fixture.py` — Validates the `git_repo` fixture +- `test_version_matrix.py` — Version matrix utility validation + +### `deps/` +Dependency validation tests. These tests ensure that project dependencies are correctly specified and healthy. + +- `test_dependency_health.py` — Validates pyproject.toml and requirements files + +## Running Tests + +### Run all tests +```bash +uv run pytest .rhiza/tests/ +# or +make test +``` + +### Run tests from a specific category +```bash +uv run pytest .rhiza/tests/structure/ +uv run pytest .rhiza/tests/api/ +uv run pytest .rhiza/tests/integration/ +uv run pytest .rhiza/tests/sync/ +uv run pytest .rhiza/tests/utils/ +uv run pytest .rhiza/tests/deps/ +``` + +### Run a specific test file +```bash +uv run pytest .rhiza/tests/structure/test_project_layout.py +``` + +### Run with verbose output +```bash +uv run pytest .rhiza/tests/ -v +``` + +### Run with coverage +```bash +uv run pytest .rhiza/tests/ --cov +``` + +## Fixtures + +### Root-level fixtures (`conftest.py`) +- `root` — Repository root path (session-scoped) +- `logger` — Configured logger instance (session-scoped) +- `git_repo` — Sandboxed git repository (function-scoped) + +### Category-specific fixtures +- `api/conftest.py` — `setup_tmp_makefile`, `run_make`, `setup_rhiza_git_repo` +- `sync/conftest.py` — `setup_sync_env` +- `utils/conftest.py` — sys.path setup for version_matrix imports + +## Writing Tests + +### Conventions +- Use descriptive test names that explain what is being tested +- Group related tests in classes when appropriate +- Use appropriate fixtures for setup/teardown +- Add docstrings to test modules and complex test functions +- Use `pytest.mark.skip` for tests that depend on optional features + +### Import Patterns +```python +# Import shared helpers from test_utils +from test_utils import strip_ansi, run_make, setup_rhiza_git_repo + +# Import from local category conftest (for fixtures and category-specific helpers) +from api.conftest import SPLIT_MAKEFILES, setup_tmp_makefile + +# Note: Fixtures defined in conftest.py are automatically available in tests +# and don't need to be explicitly imported +``` + +## Test Coverage + +The test suite aims for high coverage across: +- Configuration validation (structure, dependencies) +- Makefile target correctness (api) +- End-to-end workflows (integration) +- Template synchronization (sync) +- Utility code (utils) + +## Notes + +- Benchmarks are located in `tests/test_rhiza/benchmarks/` and run via `make benchmark` +- Integration tests use sandboxed git repositories to avoid affecting the working tree +- All Makefile tests use dry-run mode (`make -n`) to avoid side effects diff --git a/.rhiza/tests/api/conftest.py b/.rhiza/tests/api/conftest.py new file mode 100644 index 0000000..4578d59 --- /dev/null +++ b/.rhiza/tests/api/conftest.py @@ -0,0 +1,135 @@ +"""Shared fixtures for Makefile API tests. + +This conftest provides: +- setup_tmp_makefile: Copies Makefile and split files to temp dir for isolated testing +- run_make: Helper to execute make commands with dry-run support (imported from test_utils) +- setup_rhiza_git_repo: Initialize a git repo configured as rhiza origin (imported from test_utils) +- SPLIT_MAKEFILES: List of split Makefile paths +""" + +from __future__ import annotations + +import os +import shutil +import subprocess # nosec +from pathlib import Path + +import pytest + +# Import shared utilities (no __init__.py needed with new structure) +# Note: we define our own run_make and setup_rhiza_git_repo here with enhanced functionality +from test_utils import MAKE + +# Split Makefile paths that are included in the main Makefile +# These are now located in .rhiza/make.d/ directory +SPLIT_MAKEFILES = [ + ".rhiza/rhiza.mk", + ".rhiza/make.d/bootstrap.mk", + ".rhiza/make.d/quality.mk", + ".rhiza/make.d/releasing.mk", + ".rhiza/make.d/test.mk", + ".rhiza/make.d/book.mk", + ".rhiza/make.d/marimo.mk", + ".rhiza/make.d/presentation.mk", + ".rhiza/make.d/github.mk", + ".rhiza/make.d/agentic.mk", + ".rhiza/make.d/docker.mk", + ".rhiza/make.d/docs.mk", +] + + +@pytest.fixture(autouse=True) +def setup_tmp_makefile(logger, root, tmp_path: Path): + """Copy the Makefile and split Makefiles into a temp directory and chdir there. + + We rely on `make -n` so that no real commands are executed. + This fixture consolidates setup for both basic Makefile tests and GitHub targets. + """ + logger.debug("Setting up temporary Makefile test dir: %s", tmp_path) + + # Copy the main Makefile into the temporary working directory + shutil.copy(root / "Makefile", tmp_path / "Makefile") + + # Copy core Rhiza Makefiles + (tmp_path / ".rhiza").mkdir(exist_ok=True) + shutil.copy(root / ".rhiza" / "rhiza.mk", tmp_path / ".rhiza" / "rhiza.mk") + + # Copy .python-version file for PYTHON_VERSION variable + if (root / ".python-version").exists(): + shutil.copy(root / ".python-version", tmp_path / ".python-version") + + # Copy .rhiza/.env if it exists (needed for GitHub targets and other configuration) + if (root / ".rhiza" / ".env").exists(): + shutil.copy(root / ".rhiza" / ".env", tmp_path / ".rhiza" / ".env") + else: + # Create a minimal, deterministic .rhiza/.env for tests so they don't + # depend on the developer's local configuration which may vary. + env_content = "SCRIPTS_FOLDER=.rhiza/scripts\nCUSTOM_SCRIPTS_FOLDER=.rhiza/customisations/scripts\n" + (tmp_path / ".rhiza" / ".env").write_text(env_content) + + logger.debug("Copied Makefile from %s to %s", root / "Makefile", tmp_path / "Makefile") + + # Copy split Makefiles if they exist (maintaining directory structure) + for split_file in SPLIT_MAKEFILES: + source_path = root / split_file + if source_path.exists(): + dest_path = tmp_path / split_file + dest_path.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(source_path, dest_path) + logger.debug("Copied %s to %s", source_path, dest_path) + + # Move into tmp directory for isolation + old_cwd = Path.cwd() + os.chdir(tmp_path) + logger.debug("Changed working directory to %s", tmp_path) + try: + yield + finally: + os.chdir(old_cwd) + logger.debug("Restored working directory to %s", old_cwd) + + +def run_make( + logger, + args: list[str] | None = None, + check: bool = True, + dry_run: bool = True, + env: dict[str, str] | None = None, +) -> subprocess.CompletedProcess: + """Run `make` with optional arguments and return the completed process. + + Args: + logger: Logger used to emit diagnostic messages during the run + args: Additional arguments for make + check: If True, raise on non-zero return code + dry_run: If True, use -n to avoid executing commands + env: Optional environment variables to pass to the subprocess + """ + cmd = [MAKE] + if args: + cmd.extend(args) + # Use -s to reduce noise, -n to avoid executing commands + flags = "-sn" if dry_run else "-s" + cmd.insert(1, flags) + logger.info("Running command: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, env=env) # nosec + logger.debug("make exited with code %d", result.returncode) + if result.stdout: + logger.debug("make stdout (truncated to 500 chars):\n%s", result.stdout[:500]) + if result.stderr: + logger.debug("make stderr (truncated to 500 chars):\n%s", result.stderr[:500]) + if check and result.returncode != 0: + msg = f"make failed with code {result.returncode}:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + raise AssertionError(msg) + return result + + +def setup_rhiza_git_repo(): + """Initialize a git repository and set remote to rhiza.""" + git = shutil.which("git") or "/usr/bin/git" + subprocess.run([git, "init"], check=True, capture_output=True) # nosec + subprocess.run( + [git, "remote", "add", "origin", "https://github.com/jebel-quant/rhiza"], + check=True, + capture_output=True, + ) # nosec diff --git a/.rhiza/tests/api/test_github_targets.py b/.rhiza/tests/api/test_github_targets.py new file mode 100644 index 0000000..c3b1941 --- /dev/null +++ b/.rhiza/tests/api/test_github_targets.py @@ -0,0 +1,55 @@ +"""Tests for the GitHub Makefile targets using safe dry-runs. + +These tests validate that the .github/github.mk targets are correctly exposed +and emit the expected commands without actually executing them. +""" + +from __future__ import annotations + +# Import run_make from local conftest (setup_tmp_makefile is autouse) +from api.conftest import run_make + + +def test_gh_targets_exist(logger): + """Verify that GitHub targets are listed in help.""" + result = run_make(logger, ["help"], dry_run=False) + output = result.stdout + + expected_targets = ["gh-install", "view-prs", "view-issues", "failed-workflows", "whoami"] + + for target in expected_targets: + assert target in output, f"Target {target} not found in help output" + + +def test_gh_install_dry_run(logger): + """Verify gh-install target dry-run.""" + result = run_make(logger, ["gh-install"]) + # In dry-run, we expect to see the shell commands that would be executed. + # Since the recipe uses @if, make -n might verify the syntax or show the command if not silenced. + # However, with -s (silent), make -n might not show much for @ commands unless they are echoed. + # But we mainly want to ensure it runs without error. + assert result.returncode == 0 + + +def test_view_prs_dry_run(logger): + """Verify view-prs target dry-run.""" + result = run_make(logger, ["view-prs"]) + assert result.returncode == 0 + + +def test_view_issues_dry_run(logger): + """Verify view-issues target dry-run.""" + result = run_make(logger, ["view-issues"]) + assert result.returncode == 0 + + +def test_failed_workflows_dry_run(logger): + """Verify failed-workflows target dry-run.""" + result = run_make(logger, ["failed-workflows"]) + assert result.returncode == 0 + + +def test_whoami_dry_run(logger): + """Verify whoami target dry-run.""" + result = run_make(logger, ["whoami"]) + assert result.returncode == 0 diff --git a/tests/test_rhiza/test_makefile_api.py b/.rhiza/tests/api/test_makefile_api.py similarity index 50% rename from tests/test_rhiza/test_makefile_api.py rename to .rhiza/tests/api/test_makefile_api.py index cf19ebb..8096006 100644 --- a/tests/test_rhiza/test_makefile_api.py +++ b/.rhiza/tests/api/test_makefile_api.py @@ -2,14 +2,13 @@ import os import shutil -import subprocess +import subprocess # nosec from pathlib import Path import pytest # Get absolute paths for executables to avoid S607 warnings from CodeFactor/Bandit GIT = shutil.which("git") or "/usr/bin/git" -MAKE = shutil.which("make") or "/usr/bin/make" # Files required for the API test environment REQUIRED_FILES = [ @@ -70,17 +69,17 @@ def setup_api_env(logger, root, tmp_path: Path): (tmp_path / "local.mk").unlink() # Initialize git repo for rhiza tools (required for sync/validate) - subprocess.run([GIT, "init"], cwd=tmp_path, check=True, capture_output=True) + subprocess.run([GIT, "init"], cwd=tmp_path, check=True, capture_output=True) # nosec # Configure git user for commits if needed (some rhiza checks might need commits) - subprocess.run([GIT, "config", "user.email", "you@example.com"], cwd=tmp_path, check=True, capture_output=True) - subprocess.run([GIT, "config", "user.name", "Rhiza Test"], cwd=tmp_path, check=True, capture_output=True) + subprocess.run([GIT, "config", "user.email", "you@example.com"], cwd=tmp_path, check=True, capture_output=True) # nosec + subprocess.run([GIT, "config", "user.name", "Rhiza Test"], cwd=tmp_path, check=True, capture_output=True) # nosec # Add origin remote to simulate being in the rhiza repo (triggers the skip logic in rhiza.mk) subprocess.run( [GIT, "remote", "add", "origin", "https://github.com/jebel-quant/rhiza.git"], cwd=tmp_path, check=True, capture_output=True, - ) + ) # nosec # Move to tmp dir old_cwd = Path.cwd() @@ -91,27 +90,13 @@ def setup_api_env(logger, root, tmp_path: Path): os.chdir(old_cwd) -def run_make(args: list[str] | None = None, dry_run: bool = True) -> subprocess.CompletedProcess: - """Run make in the current directory.""" - cmd = [MAKE] - if dry_run: - cmd.append("-n") - if args: - cmd.extend(args) - - # We use -s (silent) to minimize noise, but sometimes we want to see output - if dry_run: - # For dry-run, we often want to see the commands - pass - else: - cmd[:1] = [MAKE, "-s"] +# Import run_make from local conftest +from api.conftest import run_make # noqa: E402 - return subprocess.run(cmd, capture_output=True, text=True) - -def test_api_delegation(setup_api_env): +def test_api_delegation(logger, setup_api_env): """Test that 'make help' works and delegates to .rhiza/rhiza.mk.""" - result = run_make(["help"], dry_run=False) + result = run_make(logger, ["help"], dry_run=False) assert result.returncode == 0 # "Rhiza Workflows" is a section in .rhiza/rhiza.mk assert "Rhiza Workflows" in result.stdout @@ -120,7 +105,7 @@ def test_api_delegation(setup_api_env): assert "test" in result.stdout or "install" in result.stdout -def test_minimal_setup_works(setup_api_env): +def test_minimal_setup_works(logger, setup_api_env): """Test that make works even if optional folders (tests, docker, etc.) are missing.""" # Remove optional folders for folder in OPTIONAL_FOLDERS: @@ -132,7 +117,7 @@ def test_minimal_setup_works(setup_api_env): # Just mainly folders. # Run make help - result = run_make(["help"], dry_run=False) + result = run_make(logger, ["help"], dry_run=False) assert result.returncode == 0 # Check that core rhiza targets exist @@ -144,25 +129,28 @@ def test_minimal_setup_works(setup_api_env): # This is by design - targets are always available but handle missing resources. -def test_extension_mechanism(setup_api_env): - """Test that .rhiza/make.d/*.mk files are included.""" - ext_file = setup_api_env / ".rhiza" / "make.d" / "50-custom.mk" - ext_file.write_text(""" -.PHONY: custom-target +def test_extension_mechanism(logger, setup_api_env): + """Test that custom targets can be added in the root Makefile.""" + # Add a custom target to the root Makefile (before include line) + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + # Insert custom target before the include line + new_content = ( + """.PHONY: custom-target custom-target: @echo "Running custom target" -""") - # Verify the target is listed in help (if we were parsing help, but running it is better) - # Note: make -n might not show @echo commands if they are silent, - # but here we just want to see if make accepts the target. +""" + + original + ) + makefile.write_text(new_content) - result = run_make(["custom-target"], dry_run=False) + result = run_make(logger, ["custom-target"], dry_run=False) assert result.returncode == 0 assert "Running custom target" in result.stdout -def test_local_override(setup_api_env): +def test_local_override(logger, setup_api_env): """Test that local.mk is included and can match targets.""" local_file = setup_api_env / "local.mk" local_file.write_text(""" @@ -171,12 +159,12 @@ def test_local_override(setup_api_env): @echo "Running local target" """) - result = run_make(["local-target"], dry_run=False) + result = run_make(logger, ["local-target"], dry_run=False) assert result.returncode == 0 assert "Running local target" in result.stdout -def test_local_override_pre_hook(setup_api_env): +def test_local_override_pre_hook(logger, setup_api_env): """Test using local.mk to override a pre-hook.""" local_file = setup_api_env / "local.mk" # We override pre-sync to print a marker (using double-colon to match rhiza.mk) @@ -193,37 +181,31 @@ def test_local_override_pre_hook(setup_api_env): # Wait, I defined it as `pre-sync: ; @:` (single colon). # So redefining it in local.mk (which is included AFTER) might trigger a warning but should work. - result = run_make(["sync"], dry_run=False) + result = run_make(logger, ["sync"], dry_run=False) # We might expect a warning about overriding commands for target `pre-sync` # checking stdout/stderr for the marker assert "[[LOCAL_PRE_SYNC]]" in result.stdout -def test_hooks_flow(setup_api_env): - """Verify that sync runs pre-sync, the sync logic, and post-sync.""" - # We can't easily see execution order in dry run if commands are hidden. - # Let's inspect the output of make -n sync - - result = run_make(["sync"], dry_run=True) - assert result.returncode == 0 - - # The output should contain the command sequences. - # Since pre-sync is currently empty (@:) it might not show up in -n output unless we override it. - - -def test_hook_execution_order(setup_api_env): - """Define hooks and verify execution order.""" - # Create an extension that defines visible hooks (using double-colon) - (setup_api_env / ".rhiza" / "make.d" / "hooks.mk").write_text(""" -pre-sync:: +def test_hook_execution_order(logger, setup_api_env): + """Define hooks in root Makefile and verify execution order.""" + # Add hooks to root Makefile (before include line) + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + """pre-sync:: @echo "STARTING_SYNC" post-sync:: @echo "FINISHED_SYNC" -""") - result = run_make(["sync"], dry_run=False) +""" + + original + ) + makefile.write_text(new_content) + + result = run_make(logger, ["sync"], dry_run=False) assert result.returncode == 0 output = result.stdout @@ -237,22 +219,151 @@ def test_hook_execution_order(setup_api_env): assert start_index < finish_index -def test_override_core_target(setup_api_env): - """Verify that a repo extension can override a core target (with warning).""" - # Override 'fmt' which is defined in Makefile.rhiza - (setup_api_env / ".rhiza" / "make.d" / "override.mk").write_text(""" +def test_override_core_target(logger, setup_api_env): + """Verify that the root Makefile can override a core target (with warning).""" + # Override 'fmt' which is defined in quality.mk + # Add override AFTER the include line so it takes precedence + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + original + + """ fmt: @echo "CUSTOM_FMT" -""") +""" + ) + makefile.write_text(new_content) - result = run_make(["fmt"], dry_run=False) + result = run_make(logger, ["fmt"], dry_run=False) assert result.returncode == 0 - # It should run the custom one because .rhiza/make.d is included later + # It should run the custom one because it's defined after the include assert "CUSTOM_FMT" in result.stdout - # It should NOT run the original one (which runs pre-commit) - # The original one has "@${UV_BIN} run pre-commit..." - # We can check that the output doesn't look like pre-commit output or just check presence of CUSTOM_FMT # We expect a warning on stderr about overriding assert "warning: overriding" in result.stderr.lower() assert "fmt" in result.stderr.lower() + + +def test_global_variable_override(logger, setup_api_env): + """Test that global variables can be overridden in the root Makefile. + + This tests the pattern documented in CUSTOMIZATION.md: + Set variables before the include line to override defaults. + """ + # Add variable override to root Makefile (before include line) + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + """# Override default coverage threshold (defaults to 90) +COVERAGE_FAIL_UNDER := 42 +export COVERAGE_FAIL_UNDER + +""" + + original + ) + makefile.write_text(new_content) + + result = run_make(logger, ["print-COVERAGE_FAIL_UNDER"], dry_run=False) + assert result.returncode == 0 + assert "42" in result.stdout + + +def test_pre_install_hook(logger, setup_api_env): + """Test that pre-install hooks are executed before install. + + This tests the hook pattern documented in CUSTOMIZATION.md. + """ + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + """pre-install:: + @echo "[[PRE_INSTALL_HOOK]]" + +""" + + original + ) + makefile.write_text(new_content) + + # Run install in dry-run mode to avoid actual installation + result = run_make(logger, ["install"], dry_run=True) + assert result.returncode == 0 + # In dry-run mode, the echo command is printed (not executed) + assert "PRE_INSTALL_HOOK" in result.stdout + + +def test_post_install_hook(logger, setup_api_env): + """Test that post-install hooks are executed after install. + + This tests the hook pattern documented in CUSTOMIZATION.md. + """ + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + """post-install:: + @echo "[[POST_INSTALL_HOOK]]" + +""" + + original + ) + makefile.write_text(new_content) + + # Run install in dry-run mode + result = run_make(logger, ["install"], dry_run=True) + assert result.returncode == 0 + assert "POST_INSTALL_HOOK" in result.stdout + + +def test_multiple_hooks_accumulate(logger, setup_api_env): + """Test that multiple hook definitions accumulate rather than override. + + This is a key feature of double-colon rules: the root Makefile and + local.mk can both add to the same hook without conflicts. + """ + # Add hook in root Makefile + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + """pre-sync:: + @echo "[[HOOK_A]]" + +""" + + original + ) + makefile.write_text(new_content) + + # Add another hook in local.mk + (setup_api_env / "local.mk").write_text("""pre-sync:: + @echo "[[HOOK_B]]" +""") + + result = run_make(logger, ["sync"], dry_run=False) + assert result.returncode == 0 + # Both hooks should be present + assert "[[HOOK_A]]" in result.stdout + assert "[[HOOK_B]]" in result.stdout + + +def test_variable_override_before_include(logger, setup_api_env): + """Test that variables set before include take precedence. + + Variables defined in the root Makefile before the include line + should be available throughout the build. + """ + # Set a variable and use it in a target (before include) + makefile = setup_api_env / "Makefile" + original = makefile.read_text() + new_content = ( + """MY_CUSTOM_VAR := hello + +.PHONY: show-var +show-var: + @echo "MY_VAR=$(MY_CUSTOM_VAR)" + +""" + + original + ) + makefile.write_text(new_content) + + result = run_make(logger, ["show-var"], dry_run=False) + assert result.returncode == 0 + assert "MY_VAR=hello" in result.stdout diff --git a/tests/test_rhiza/test_makefile.py b/.rhiza/tests/api/test_makefile_targets.py similarity index 70% rename from tests/test_rhiza/test_makefile.py rename to .rhiza/tests/api/test_makefile_targets.py index e782ca4..0b5d3ca 100644 --- a/tests/test_rhiza/test_makefile.py +++ b/.rhiza/tests/api/test_makefile_targets.py @@ -1,4 +1,4 @@ -"""Tests for the Makefile targets and help output using safe dry‑runs. +"""Tests for the Makefile targets and help output using safe dry-runs. This file and its associated tests flow down via a SYNC action from the jebel-quant/rhiza repository (https://github.com/jebel-quant/rhiza). @@ -13,124 +13,12 @@ from __future__ import annotations import os -import re -import shutil -import subprocess -from pathlib import Path import pytest +from api.conftest import SPLIT_MAKEFILES, run_make, setup_rhiza_git_repo -# Get absolute paths for executables to avoid S607 warnings from CodeFactor/Bandit -MAKE = shutil.which("make") or "/usr/bin/make" - -# Split Makefile paths that are included in the main Makefile -# These are now located in .rhiza/make.d/ directory -SPLIT_MAKEFILES = [ - ".rhiza/rhiza.mk", - ".rhiza/make.d/01-test.mk", - ".rhiza/make.d/02-book.mk", - ".rhiza/make.d/03-marimo.mk", - ".rhiza/make.d/04-presentation.mk", - ".rhiza/make.d/05-github.mk", -] - - -def strip_ansi(text: str) -> str: - """Strip ANSI escape sequences from text.""" - ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - return ansi_escape.sub("", text) - - -@pytest.fixture(autouse=True) -def setup_tmp_makefile(logger, root, tmp_path: Path): - """Copy the Makefile and split Makefiles into a temp directory and chdir there. - - We rely on `make -n` so that no real commands are executed. - """ - logger.debug("Setting up temporary Makefile test dir: %s", tmp_path) - - # Copy the main Makefile into the temporary working directory - shutil.copy(root / "Makefile", tmp_path / "Makefile") - - # Copy core Rhiza Makefiles - (tmp_path / ".rhiza").mkdir(exist_ok=True) - shutil.copy(root / ".rhiza" / "rhiza.mk", tmp_path / ".rhiza" / "rhiza.mk") - - # Copy .python-version file for PYTHON_VERSION variable - if (root / ".python-version").exists(): - shutil.copy(root / ".python-version", tmp_path / ".python-version") - - # Create a minimal, deterministic .rhiza/.env for tests so they don't - # depend on the developer's local configuration which may vary. - env_content = "SCRIPTS_FOLDER=.rhiza/scripts\nCUSTOM_SCRIPTS_FOLDER=.rhiza/customisations/scripts\n" - (tmp_path / ".rhiza" / ".env").write_text(env_content) - - logger.debug("Copied Makefile from %s to %s", root / "Makefile", tmp_path / "Makefile") - - # Copy split Makefiles if they exist (maintaining directory structure) - for split_file in SPLIT_MAKEFILES: - source_path = root / split_file - if source_path.exists(): - dest_path = tmp_path / split_file - dest_path.parent.mkdir(parents=True, exist_ok=True) - shutil.copy(source_path, dest_path) - logger.debug("Copied %s to %s", source_path, dest_path) - - # Move into tmp directory for isolation - old_cwd = Path.cwd() - os.chdir(tmp_path) - logger.debug("Changed working directory to %s", tmp_path) - try: - yield - finally: - os.chdir(old_cwd) - logger.debug("Restored working directory to %s", old_cwd) - - -def run_make( - logger, - args: list[str] | None = None, - check: bool = True, - dry_run: bool = True, - env: dict[str, str] | None = None, -) -> subprocess.CompletedProcess: - """Run `make` with optional arguments and return the completed process. - - Args: - logger: Logger used to emit diagnostic messages during the run - args: Additional arguments for make - check: If True, raise on non-zero return code - dry_run: If True, use -n to avoid executing commands - env: Optional environment variables to pass to the subprocess - """ - cmd = [MAKE] - if args: - cmd.extend(args) - # Use -s to reduce noise, -n to avoid executing commands - flags = "-sn" if dry_run else "-s" - cmd.insert(1, flags) - logger.info("Running command: %s", " ".join(cmd)) - result = subprocess.run(cmd, capture_output=True, text=True, env=env) - logger.debug("make exited with code %d", result.returncode) - if result.stdout: - logger.debug("make stdout (truncated to 500 chars):\n%s", result.stdout[:500]) - if result.stderr: - logger.debug("make stderr (truncated to 500 chars):\n%s", result.stderr[:500]) - if check and result.returncode != 0: - msg = f"make failed with code {result.returncode}:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" - raise AssertionError(msg) - return result - - -def setup_rhiza_git_repo(): - """Initialize a git repository and set remote to rhiza.""" - git = shutil.which("git") or "/usr/bin/git" - subprocess.run([git, "init"], check=True, capture_output=True) - subprocess.run( - [git, "remote", "add", "origin", "https://github.com/jebel-quant/rhiza"], - check=True, - capture_output=True, - ) +# Import shared helpers from test_utils and local conftest +from test_utils import strip_ansi class TestMakefile: @@ -225,7 +113,8 @@ def test_test_target_dry_run(self, logger): out = proc.stdout # Expect key steps assert "mkdir -p _tests/html-coverage _tests/html-report" in out - # Check for uv command with the configured path + # Check for uv command running pytest + assert ".venv/bin/python -m pytest" in out def test_test_target_without_source_folder(self, logger, tmp_path): """Test target should run without coverage when SOURCE_FOLDER doesn't exist.""" @@ -244,7 +133,7 @@ def test_test_target_without_source_folder(self, logger, tmp_path): # Should see warning about missing source folder assert "if [ -d nonexistent_src ]" in out # Should still run pytest but without coverage flags - assert "pytest tests" in out + assert ".venv/bin/python -m pytest" in out assert "--html=_tests/html-report/report.html" in out def test_python_version_defaults_to_3_13_if_missing(self, logger, tmp_path): @@ -294,12 +183,6 @@ def test_makefile_exists_at_root(self, root): assert makefile.exists() assert makefile.is_file() - def test_makefile_is_readable(self, root): - """Makefile should be readable.""" - makefile = root / "Makefile" - content = makefile.read_text() - assert len(content) > 0 - def test_makefile_contains_targets(self, root): """Makefile should contain expected targets (including split files).""" makefile = root / "Makefile" @@ -315,19 +198,6 @@ def test_makefile_contains_targets(self, root): for target in expected_targets: assert f"{target}:" in content or f".PHONY: {target}" in content - def test_makefile_has_uv_variables(self, root): - """Makefile should define UV-related variables.""" - makefile = root / "Makefile" - content = makefile.read_text() - - # Read split Makefiles as well - for split_file in SPLIT_MAKEFILES: - split_path = root / split_file - if split_path.exists(): - content += "\n" + split_path.read_text() - - assert "UV_BIN" in content or "uv" in content.lower() - def test_validate_target_skips_in_rhiza_repo(self, logger): """Validate target should skip execution in rhiza repository.""" setup_rhiza_git_repo() diff --git a/tests/test_rhiza/conftest.py b/.rhiza/tests/conftest.py similarity index 75% rename from tests/test_rhiza/conftest.py rename to .rhiza/tests/conftest.py index ed5521f..e459514 100644 --- a/tests/test_rhiza/conftest.py +++ b/.rhiza/tests/conftest.py @@ -9,62 +9,13 @@ import logging import os import pathlib -import re import shutil -import subprocess +import subprocess # nosec B404 import pytest -# Get absolute paths for executables to avoid S607 warnings -GIT = shutil.which("git") or "/usr/bin/git" -MAKE = shutil.which("make") or "/usr/bin/make" - - -def strip_ansi(text: str) -> str: - """Strip ANSI escape sequences from text.""" - ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") - return ansi_escape.sub("", text) - - -def run_make( - logger, args: list[str] | None = None, check: bool = True, dry_run: bool = True -) -> subprocess.CompletedProcess: - """Run `make` with optional arguments and return the completed process. - - Args: - logger: Logger used to emit diagnostic messages during the run - args: Additional arguments for make - check: If True, raise on non-zero return code - dry_run: If True, use -n to avoid executing commands - """ - cmd = [MAKE] - if args: - cmd.extend(args) - # Use -s to reduce noise, -n to avoid executing commands - flags = "-sn" if dry_run else "-s" - cmd.insert(1, flags) - logger.info("Running command: %s", " ".join(cmd)) - result = subprocess.run(cmd, capture_output=True, text=True) - logger.debug("make exited with code %d", result.returncode) - if result.stdout: - logger.debug("make stdout (truncated to 500 chars):\n%s", result.stdout[:500]) - if result.stderr: - logger.debug("make stderr (truncated to 500 chars):\n%s", result.stderr[:500]) - if check and result.returncode != 0: - msg = f"make failed with code {result.returncode}:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" - raise AssertionError(msg) - return result - - -def setup_rhiza_git_repo(): - """Initialize a git repository and set remote to rhiza.""" - subprocess.run([GIT, "init"], check=True, capture_output=True) - subprocess.run( - [GIT, "remote", "add", "origin", "https://github.com/jebel-quant/rhiza"], - check=True, - capture_output=True, - ) - +# Import shared helpers from test_utils (no __init__.py needed) +from test_utils import GIT, MAKE, run_make, setup_rhiza_git_repo, strip_ansi # noqa: F401 MOCK_MAKE_SCRIPT = """#!/usr/bin/env python3 import sys @@ -192,18 +143,18 @@ def git_repo(root, tmp_path, monkeypatch): # 1. Create bare remote remote_dir.mkdir() - subprocess.run([GIT, "init", "--bare", str(remote_dir)], check=True) + subprocess.run([GIT, "init", "--bare", str(remote_dir)], check=True) # nosec B603 # Ensure the remote's default HEAD points to master for predictable behavior - subprocess.run([GIT, "symbolic-ref", "HEAD", "refs/heads/master"], cwd=remote_dir, check=True) + subprocess.run([GIT, "symbolic-ref", "HEAD", "refs/heads/master"], cwd=remote_dir, check=True) # nosec B603 # 2. Clone to local - subprocess.run([GIT, "clone", str(remote_dir), str(local_dir)], check=True) + subprocess.run([GIT, "clone", str(remote_dir), str(local_dir)], check=True) # nosec B603 # Use monkeypatch to safely change cwd for the duration of the test monkeypatch.chdir(local_dir) # Ensure local default branch is 'master' to match test expectations - subprocess.run([GIT, "checkout", "-b", "master"], check=True) + subprocess.run([GIT, "checkout", "-b", "master"], check=True) # nosec B603 # Create pyproject.toml with open("pyproject.toml", "w") as f: @@ -252,10 +203,10 @@ def git_repo(root, tmp_path, monkeypatch): (script_dir / "release.sh").chmod(0o755) # Commit and push initial state - subprocess.run([GIT, "config", "user.email", "test@example.com"], check=True) - subprocess.run([GIT, "config", "user.name", "Test User"], check=True) - subprocess.run([GIT, "add", "."], check=True) - subprocess.run([GIT, "commit", "-m", "Initial commit"], check=True) - subprocess.run([GIT, "push", "origin", "master"], check=True) + subprocess.run([GIT, "config", "user.email", "test@example.com"], check=True) # nosec B603 + subprocess.run([GIT, "config", "user.name", "Test User"], check=True) # nosec B603 + subprocess.run([GIT, "add", "."], check=True) # nosec B603 + subprocess.run([GIT, "commit", "-m", "Initial commit"], check=True) # nosec B603 + subprocess.run([GIT, "push", "origin", "master"], check=True) # nosec B603 return local_dir diff --git a/.rhiza/tests/deps/test_dependency_health.py b/.rhiza/tests/deps/test_dependency_health.py new file mode 100644 index 0000000..e3de07d --- /dev/null +++ b/.rhiza/tests/deps/test_dependency_health.py @@ -0,0 +1,111 @@ +"""Dependency health tests — validate requirements files and pyproject.toml content.""" + +import re +import tomllib + + +def test_pyproject_has_requires_python(root): + """Verify that pyproject.toml declares requires-python in [project].""" + pyproject_path = root / "pyproject.toml" + assert pyproject_path.exists(), "pyproject.toml not found" + + with pyproject_path.open("rb") as f: + pyproject = tomllib.load(f) + + assert "project" in pyproject, "[project] section missing from pyproject.toml" + assert "requires-python" in pyproject["project"], "requires-python missing from [project] section" + + requires_python = pyproject["project"]["requires-python"] + assert isinstance(requires_python, str), "requires-python must be a string" + assert requires_python.strip(), "requires-python cannot be empty" + + +def test_requirements_files_are_valid_pip_specifiers(root): + """Verify that all lines in requirements files are valid pip requirement specifiers.""" + requirements_dir = root / ".rhiza" / "requirements" + assert requirements_dir.exists(), ".rhiza/requirements directory not found" + + # Pattern for valid requirement specifier (simplified check) + # Matches: package, package>=1.0, package[extra], git+https://... + valid_specifier_pattern = re.compile( + r"^([a-zA-Z0-9]([a-zA-Z0-9._-]*[a-zA-Z0-9])?|git\+https?://)", + re.IGNORECASE, + ) + + for req_file in requirements_dir.glob("*.txt"): + if req_file.name == "README.md": + continue + + with req_file.open() as f: + for line_num, line in enumerate(f, start=1): + line = line.strip() + + # Skip empty lines and comments + if not line or line.startswith("#"): + continue + + # Basic validation: line should start with a valid package name or git URL + assert valid_specifier_pattern.match(line), ( + f"{req_file.name}:{line_num} - Invalid requirement specifier: {line}" + ) + + +def test_no_duplicate_packages_across_requirements(root): + """Verify that no package appears in multiple requirements files.""" + requirements_dir = root / ".rhiza" / "requirements" + assert requirements_dir.exists(), ".rhiza/requirements directory not found" + + # Known packages that intentionally appear in multiple files + # python-dotenv is used by both test infrastructure and development tools + allowed_duplicates = {"python-dotenv"} + + # Map of package name (lowercase) to list of files it appears in + package_locations = {} + + # Pattern to extract package name from requirement line + # Matches the package name before any version specifier, extra, or URL fragment + package_name_pattern = re.compile(r"^([a-zA-Z0-9][a-zA-Z0-9._-]*)", re.IGNORECASE) + + for req_file in requirements_dir.glob("*.txt"): + if req_file.name == "README.md": + continue + + with req_file.open() as f: + for line in f: + line = line.strip() + + # Skip empty lines and comments + if not line or line.startswith("#"): + continue + + # Extract package name + match = package_name_pattern.match(line) + if match: + package_name = match.group(1).lower() + + if package_name not in package_locations: + package_locations[package_name] = [] + + package_locations[package_name].append(req_file.name) + + # Find duplicates (excluding allowed ones) + duplicates = { + pkg: files for pkg, files in package_locations.items() if len(files) > 1 and pkg not in allowed_duplicates + } + + if duplicates: + duplicate_list = [f"{pkg} ({', '.join(files)})" for pkg, files in duplicates.items()] + msg = f"Packages found in multiple requirements files: {', '.join(duplicate_list)}" + raise AssertionError(msg) + + +def test_dotenv_in_test_requirements(root): + """Verify that python-dotenv is listed in tests.txt (test suite depends on it).""" + tests_req_path = root / ".rhiza" / "requirements" / "tests.txt" + assert tests_req_path.exists(), "tests.txt not found" + + with tests_req_path.open() as f: + content = f.read().lower() + + # Check for python-dotenv (case-insensitive) + assert "python-dotenv" in content, "python-dotenv not found in tests.txt (required by test suite)" diff --git a/tests/test_rhiza/test_book.py b/.rhiza/tests/integration/test_book_targets.py similarity index 80% rename from tests/test_rhiza/test_book.py rename to .rhiza/tests/integration/test_book_targets.py index 12fe210..5507571 100644 --- a/tests/test_rhiza/test_book.py +++ b/.rhiza/tests/integration/test_book_targets.py @@ -1,7 +1,7 @@ """Tests for book-related Makefile targets and their resilience.""" import shutil -import subprocess +import subprocess # nosec import pytest @@ -15,6 +15,10 @@ def test_no_book_folder(git_repo): available but check internally for the existence of the book folder. Using dry-run (-n) to test the target logic without actually executing. """ + # Skip if book.mk is not present in the repository + if not (git_repo / ".rhiza" / "make.d" / "book.mk").exists(): + pytest.skip("book.mk not found, skipping test") + if (git_repo / "book").exists(): shutil.rmtree(git_repo / "book") assert not (git_repo / "book").exists() @@ -22,7 +26,7 @@ def test_no_book_folder(git_repo): # Targets are now always defined via .rhiza/make.d/ # Use dry-run to verify they exist and can be parsed for target in ["book", "docs", "marimushka"]: - result = subprocess.run([MAKE, "-n", target], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([MAKE, "-n", target], cwd=git_repo, capture_output=True, text=True) # nosec # Target should exist (not "no rule to make target") assert "no rule to make target" not in result.stderr.lower(), ( f"Target {target} should be defined in .rhiza/make.d/" @@ -32,9 +36,13 @@ def test_no_book_folder(git_repo): def test_book_folder_but_no_mk(git_repo): """Test behavior when book folder exists but is empty. - With the new architecture, targets are always defined in .rhiza/make.d/02-book.mk, + With the new architecture, targets are always defined in .rhiza/make.d/book.mk, so they should exist regardless of the book folder contents. """ + # Skip if book.mk is not present in the repository + if not (git_repo / ".rhiza" / "make.d" / "book.mk").exists(): + pytest.skip("book.mk not found, skipping test") + # ensure book folder exists but is empty if (git_repo / "book").exists(): shutil.rmtree(git_repo / "book") @@ -49,7 +57,7 @@ def test_book_folder_but_no_mk(git_repo): # Targets are now always defined via .rhiza/make.d/ # Use dry-run to verify they exist and can be parsed for target in ["book", "docs", "marimushka"]: - result = subprocess.run([MAKE, "-n", target], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([MAKE, "-n", target], cwd=git_repo, capture_output=True, text=True) # nosec # Target should exist (not "no rule to make target") assert "no rule to make target" not in result.stderr.lower(), ( f"Target {target} should be defined in .rhiza/make.d/" @@ -57,18 +65,18 @@ def test_book_folder_but_no_mk(git_repo): def test_book_folder(git_repo): - """Test that .rhiza/make.d/02-book.mk defines the expected phony targets.""" + """Test that .rhiza/make.d/book.mk defines the expected phony targets.""" # Check for the new location of book targets - makefile = git_repo / ".rhiza" / "make.d" / "02-book.mk" + makefile = git_repo / ".rhiza" / "make.d" / "book.mk" if not makefile.exists(): - pytest.skip("02-book.mk not found, skipping test") + pytest.skip("book.mk not found, skipping test") content = makefile.read_text() # get the list of phony targets from the Makefile phony_targets = [line.strip() for line in content.splitlines() if line.startswith(".PHONY:")] if not phony_targets: - pytest.skip("No .PHONY targets found in 02-book.mk") + pytest.skip("No .PHONY targets found in book.mk") # Collect all targets from all .PHONY lines all_targets = set() @@ -76,7 +84,7 @@ def test_book_folder(git_repo): targets = phony_line.split(":")[1].strip().split() all_targets.update(targets) - expected_targets = {"book", "docs", "marimushka"} + expected_targets = {"book", "marimushka", "mkdocs-build"} assert expected_targets.issubset(all_targets), ( f"Expected phony targets to include {expected_targets}, got {all_targets}" ) @@ -88,6 +96,10 @@ def test_book_without_logo_file(git_repo): The build should succeed gracefully without a logo, and the generated HTML template should hide the logo element via onerror handler. """ + # Skip if book.mk is not present in the repository + if not (git_repo / ".rhiza" / "make.d" / "book.mk").exists(): + pytest.skip("book.mk not found, skipping test") + makefile = git_repo / "Makefile" if not makefile.exists(): pytest.skip("Makefile not found") @@ -107,7 +119,7 @@ def test_book_without_logo_file(git_repo): makefile.write_text("\n".join(new_lines)) # Dry-run the book target - it should still be valid - result = subprocess.run([MAKE, "-n", "book"], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([MAKE, "-n", "book"], cwd=git_repo, capture_output=True, text=True) # nosec assert "no rule to make target" not in result.stderr.lower(), "book target should work without LOGO_FILE" # Should not have errors about missing logo variable assert result.returncode == 0, f"Dry-run failed: {result.stderr}" @@ -118,6 +130,10 @@ def test_book_with_missing_logo_file(git_repo): The build should succeed but emit a warning about the missing logo. """ + # Skip if book.mk is not present in the repository + if not (git_repo / ".rhiza" / "make.d" / "book.mk").exists(): + pytest.skip("book.mk not found, skipping test") + makefile = git_repo / "Makefile" if not makefile.exists(): pytest.skip("Makefile not found") @@ -142,5 +158,5 @@ def test_book_with_missing_logo_file(git_repo): makefile.write_text("\n".join(new_lines)) # Dry-run should still succeed - result = subprocess.run([MAKE, "-n", "book"], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([MAKE, "-n", "book"], cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 0, f"Dry-run failed with missing logo: {result.stderr}" diff --git a/tests/test_rhiza/test_marimushka_target.py b/.rhiza/tests/integration/test_marimushka.py similarity index 96% rename from tests/test_rhiza/test_marimushka_target.py rename to .rhiza/tests/integration/test_marimushka.py index 074dde4..6abdaa9 100644 --- a/tests/test_rhiza/test_marimushka_target.py +++ b/.rhiza/tests/integration/test_marimushka.py @@ -8,7 +8,7 @@ import os import shutil -import subprocess +import subprocess # nosec import pytest @@ -59,7 +59,7 @@ def test_marimushka_target_success(git_repo): # Override UVX_BIN to use our mock marimushka CLI env["UVX_BIN"] = str(git_repo / "bin" / "marimushka") - result = subprocess.run([MAKE, "marimushka"], env=env, cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([MAKE, "marimushka"], env=env, cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 0 assert "Exporting notebooks" in result.stdout @@ -87,7 +87,7 @@ def test_marimushka_no_python_files(git_repo): env["MARIMO_FOLDER"] = "book/marimo/notebooks" env["MARIMUSHKA_OUTPUT"] = "_marimushka" - result = subprocess.run([MAKE, "marimushka"], env=env, cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([MAKE, "marimushka"], env=env, cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 0 assert (output_folder / "index.html").exists() diff --git a/tests/test_rhiza/test_notebooks.py b/.rhiza/tests/integration/test_notebook_execution.py similarity index 91% rename from tests/test_rhiza/test_notebooks.py rename to .rhiza/tests/integration/test_notebook_execution.py index f21d176..12036f9 100644 --- a/tests/test_rhiza/test_notebooks.py +++ b/.rhiza/tests/integration/test_notebook_execution.py @@ -1,7 +1,7 @@ """Tests for Marimo notebooks.""" import shutil -import subprocess +import subprocess # nosec from pathlib import Path import pytest @@ -36,6 +36,12 @@ def collect_marimo_notebooks(env_path: Path = RHIZA_ENV_PATH): NOTEBOOK_PATHS = collect_marimo_notebooks() +def test_notebooks_discovered(): + """At least one notebook should be discovered for parametrized tests to run.""" + if not NOTEBOOK_PATHS: + pytest.skip("No Marimo notebooks found — check MARIMO_FOLDER in .rhiza/.env") + + @pytest.mark.parametrize("notebook_path", NOTEBOOK_PATHS, ids=lambda p: p.name) def test_notebook_execution(notebook_path: Path): """Test if a Marimo notebook can be executed without errors. @@ -66,7 +72,7 @@ def test_notebook_execution(notebook_path: Path): "/dev/null", # We don't need the actual HTML output ] - result = subprocess.run(cmd, capture_output=True, text=True, cwd=notebook_path.parent) + result = subprocess.run(cmd, capture_output=True, text=True, cwd=notebook_path.parent) # nosec # Ensure process exit code indicates success assert result.returncode == 0, ( diff --git a/tests/test_rhiza/test_release_script.py b/.rhiza/tests/integration/test_release.py similarity index 86% rename from tests/test_rhiza/test_release_script.py rename to .rhiza/tests/integration/test_release.py index 090add2..f0b4df5 100644 --- a/tests/test_rhiza/test_release_script.py +++ b/.rhiza/tests/integration/test_release.py @@ -9,7 +9,7 @@ """ import shutil -import subprocess +import subprocess # nosec # Get absolute paths for executables to avoid S607 warnings from CodeFactor/Bandit SHELL = shutil.which("sh") or "/bin/sh" @@ -23,12 +23,12 @@ def test_release_creates_tag(git_repo): # Run release # 1. Prompts to create tag -> y # 2. Prompts to push tag -> y - result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="y\ny\n", capture_output=True, text=True) + result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="y\ny\n", capture_output=True, text=True) # nosec assert result.returncode == 0 assert "Tag 'v0.1.0' created locally" in result.stdout # Verify the tag exists - verify_result = subprocess.run( + verify_result = subprocess.run( # nosec [GIT, "tag", "-l", "v0.1.0"], cwd=git_repo, capture_output=True, @@ -42,10 +42,10 @@ def test_release_fails_if_local_tag_exists(git_repo): script = git_repo / ".rhiza" / "scripts" / "release.sh" # Create a local tag that matches current version - subprocess.run([GIT, "tag", "v0.1.0"], cwd=git_repo, check=True) + subprocess.run([GIT, "tag", "v0.1.0"], cwd=git_repo, check=True) # nosec # Input 'n' to abort - result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="n\n", capture_output=True, text=True) + result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="n\n", capture_output=True, text=True) # nosec assert result.returncode == 0 assert "Tag 'v0.1.0' already exists locally" in result.stdout @@ -57,10 +57,10 @@ def test_release_fails_if_remote_tag_exists(git_repo): script = git_repo / ".rhiza" / "scripts" / "release.sh" # Create tag locally and push to remote - subprocess.run([GIT, "tag", "v0.1.0"], cwd=git_repo, check=True) - subprocess.run([GIT, "push", "origin", "v0.1.0"], cwd=git_repo, check=True) + subprocess.run([GIT, "tag", "v0.1.0"], cwd=git_repo, check=True) # nosec + subprocess.run([GIT, "push", "origin", "v0.1.0"], cwd=git_repo, check=True) # nosec - result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="y\n", capture_output=True, text=True) + result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="y\n", capture_output=True, text=True) # nosec assert result.returncode == 1 assert "already exists on remote" in result.stdout @@ -74,7 +74,7 @@ def test_release_uncommitted_changes_failure(git_repo): with open(git_repo / "pyproject.toml", "a") as f: f.write("\n# comment") - result = subprocess.run([SHELL, str(script)], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([SHELL, str(script)], cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 1 assert "You have uncommitted changes" in result.stdout @@ -87,14 +87,14 @@ def test_release_pushes_if_ahead_of_remote(git_repo): # Create a commit locally that isn't on remote tracked_file = git_repo / "file.txt" tracked_file.touch() - subprocess.run([GIT, "add", "file.txt"], cwd=git_repo, check=True) - subprocess.run([GIT, "commit", "-m", "Local commit"], cwd=git_repo, check=True) + subprocess.run([GIT, "add", "file.txt"], cwd=git_repo, check=True) # nosec + subprocess.run([GIT, "commit", "-m", "Local commit"], cwd=git_repo, check=True) # nosec # Run release # 1. Prompts to push -> y # 2. Prompts to create tag -> y # 3. Prompts to push tag -> y - result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="y\ny\ny\n", capture_output=True, text=True) + result = subprocess.run([SHELL, str(script)], cwd=git_repo, input="y\ny\ny\n", capture_output=True, text=True) # nosec assert result.returncode == 0 assert "Your branch is ahead" in result.stdout @@ -110,21 +110,21 @@ def test_release_fails_if_behind_remote(git_repo): # Create a commit on remote that isn't local # We need to clone another repo to push to remote other_clone = git_repo.parent / "other_clone" - subprocess.run([GIT, "clone", str(git_repo.parent / "remote.git"), str(other_clone)], check=True) + subprocess.run([GIT, "clone", str(git_repo.parent / "remote.git"), str(other_clone)], check=True) # nosec # Configure git user for other_clone (needed in CI) - subprocess.run([GIT, "config", "user.email", "test@example.com"], cwd=other_clone, check=True) - subprocess.run([GIT, "config", "user.name", "Test User"], cwd=other_clone, check=True) + subprocess.run([GIT, "config", "user.email", "test@example.com"], cwd=other_clone, check=True) # nosec + subprocess.run([GIT, "config", "user.name", "Test User"], cwd=other_clone, check=True) # nosec # Commit and push from other clone with open(other_clone / "other.txt", "w") as f: f.write("content") - subprocess.run([GIT, "add", "other.txt"], cwd=other_clone, check=True) - subprocess.run([GIT, "commit", "-m", "Remote commit"], cwd=other_clone, check=True) - subprocess.run([GIT, "push"], cwd=other_clone, check=True) + subprocess.run([GIT, "add", "other.txt"], cwd=other_clone, check=True) # nosec + subprocess.run([GIT, "commit", "-m", "Remote commit"], cwd=other_clone, check=True) # nosec + subprocess.run([GIT, "push"], cwd=other_clone, check=True) # nosec # Run release (it will fetch and see it's behind) - result = subprocess.run([SHELL, str(script)], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([SHELL, str(script)], cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 1 assert "Your branch is behind" in result.stdout @@ -135,7 +135,7 @@ def test_dry_run_flag_recognized(git_repo): script = git_repo / ".rhiza" / "scripts" / "release.sh" # Run with --dry-run flag - result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) # nosec # Should exit successfully assert result.returncode == 0 @@ -148,7 +148,7 @@ def test_dry_run_no_git_operations(git_repo): script = git_repo / ".rhiza" / "scripts" / "release.sh" # Get initial git state - tags_before = subprocess.run( + tags_before = subprocess.run( # nosec [GIT, "tag", "-l"], cwd=git_repo, capture_output=True, @@ -156,12 +156,12 @@ def test_dry_run_no_git_operations(git_repo): ).stdout # Run with --dry-run - result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 0 # Verify no tags were created - tags_after = subprocess.run( + tags_after = subprocess.run( # nosec [GIT, "tag", "-l"], cwd=git_repo, capture_output=True, @@ -170,7 +170,7 @@ def test_dry_run_no_git_operations(git_repo): assert tags_before == tags_after # Verify tag doesn't exist using consistent pattern with other tests - tag_check = subprocess.run( + tag_check = subprocess.run( # nosec [GIT, "tag", "-l", "v0.1.0"], cwd=git_repo, capture_output=True, @@ -183,7 +183,7 @@ def test_dry_run_shows_appropriate_messages(git_repo): """Test that appropriate DRY-RUN messages are displayed.""" script = git_repo / ".rhiza" / "scripts" / "release.sh" - result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) # nosec assert result.returncode == 0 @@ -202,13 +202,13 @@ def test_dry_run_exits_successfully_without_creating_tags(git_repo): script = git_repo / ".rhiza" / "scripts" / "release.sh" # Run with --dry-run - result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) + result = subprocess.run([SHELL, str(script), "--dry-run"], cwd=git_repo, capture_output=True, text=True) # nosec # Should exit successfully assert result.returncode == 0 # Verify no local tag was created - local_tag_check = subprocess.run( + local_tag_check = subprocess.run( # nosec [GIT, "tag", "-l", "v0.1.0"], cwd=git_repo, capture_output=True, @@ -217,7 +217,7 @@ def test_dry_run_exits_successfully_without_creating_tags(git_repo): assert "v0.1.0" not in local_tag_check.stdout # Verify no remote tag was pushed - remote_tag_check = subprocess.run( + remote_tag_check = subprocess.run( # nosec [GIT, "ls-remote", "--tags", "origin", "v0.1.0"], cwd=git_repo, capture_output=True, diff --git a/.rhiza/tests/structure/test_project_layout.py b/.rhiza/tests/structure/test_project_layout.py new file mode 100644 index 0000000..1ceac5f --- /dev/null +++ b/.rhiza/tests/structure/test_project_layout.py @@ -0,0 +1,57 @@ +"""Tests for the root pytest fixture that yields the repository root Path. + +This file and its associated tests flow down via a SYNC action from the jebel-quant/rhiza repository +(https://github.com/jebel-quant/rhiza). + +This module ensures the fixture resolves to the true project root and that +expected files/directories exist, enabling other tests to locate resources +reliably. +""" + +import pytest + + +class TestRootFixture: + """Tests for the root fixture that provides repository root path.""" + + def test_root_resolves_correctly_from_nested_location(self, root): + """Root should correctly resolve to repository root from .rhiza/tests/.""" + conftest_path = root / ".rhiza" / "tests" / "conftest.py" + assert conftest_path.exists() + + def test_root_contains_expected_directories(self, root): + """Root should contain all expected project directories.""" + required_dirs = [".rhiza"] + optional_dirs = ["src", "tests", "book"] # src/ is optional (rhiza itself doesn't have one) + + for dirname in required_dirs: + assert (root / dirname).exists(), f"Required directory {dirname} not found" + + # Check that at least one CI directory exists (.github or .gitlab) + ci_dirs = [".github", ".gitlab"] + if not any((root / ci_dir).exists() for ci_dir in ci_dirs): + pytest.fail(f"At least one CI directory from {ci_dirs} must exist") + + for dirname in optional_dirs: + if not (root / dirname).exists(): + pytest.skip(f"Optional directory {dirname} not present in this project") + + def test_root_contains_expected_files(self, root): + """Root should contain all expected configuration files.""" + required_files = [ + "pyproject.toml", + "README.md", + "Makefile", + ] + optional_files = [ + "ruff.toml", + ".gitignore", + ".editorconfig", + ] + + for filename in required_files: + assert (root / filename).exists(), f"Required file {filename} not found" + + for filename in optional_files: + if not (root / filename).exists(): + pytest.skip(f"Optional file {filename} not present in this project") diff --git a/tests/test_rhiza/test_requirements_folder.py b/.rhiza/tests/structure/test_requirements.py similarity index 90% rename from tests/test_rhiza/test_requirements_folder.py rename to .rhiza/tests/structure/test_requirements.py index e9450b8..1bf9d04 100644 --- a/tests/test_rhiza/test_requirements_folder.py +++ b/.rhiza/tests/structure/test_requirements.py @@ -4,14 +4,16 @@ requirement files for development dependencies. """ +from typing import ClassVar + class TestRequirementsFolder: """Tests for the .rhiza/requirements folder structure.""" # Expected requirements files - EXPECTED_REQUIREMENTS_FILES = [ - "tests.txt", - "marimo.txt", + EXPECTED_REQUIREMENTS_FILES: ClassVar[list[str]] = [ + # "tests.txt", # may not be present in all repositories + # "marimo.txt", # may not be present in all repositories "docs.txt", "tools.txt", ] diff --git a/.rhiza/tests/sync/conftest.py b/.rhiza/tests/sync/conftest.py new file mode 100644 index 0000000..14a2f66 --- /dev/null +++ b/.rhiza/tests/sync/conftest.py @@ -0,0 +1,91 @@ +"""Shared fixtures and helpers for sync tests. + +Provides environment setup for template sync, workflow versioning, +and content validation tests. +""" + +from __future__ import annotations + +import os +import shutil +from pathlib import Path + +import pytest + +# Import from test_utils instead of relative import +from test_utils import setup_rhiza_git_repo + + +@pytest.fixture(autouse=True) +def setup_sync_env(logger, root, tmp_path: Path): + """Set up a temporary environment for sync tests with Makefile, templates, and git. + + This fixture creates a complete test environment with: + - Makefile and rhiza.mk configuration + - .rhiza-version file and .env configuration + - template.yml and pyproject.toml + - Initialized git repository (configured as rhiza origin) + - src/ and tests/ directories to satisfy validate target + """ + logger.debug("Setting up sync test environment: %s", tmp_path) + + # Copy the main Makefile into the temporary working directory + shutil.copy(root / "Makefile", tmp_path / "Makefile") + + # Copy core Rhiza Makefiles and version file + (tmp_path / ".rhiza").mkdir(exist_ok=True) + shutil.copy(root / ".rhiza" / "rhiza.mk", tmp_path / ".rhiza" / "rhiza.mk") + + # Copy split Makefiles from make.d directory + split_makefiles = [ + "bootstrap.mk", + "quality.mk", + "releasing.mk", + "test.mk", + "book.mk", + "marimo.mk", + "presentation.mk", + "github.mk", + "agentic.mk", + "docker.mk", + "docs.mk", + ] + (tmp_path / ".rhiza" / "make.d").mkdir(parents=True, exist_ok=True) + for mk_file in split_makefiles: + source_path = root / ".rhiza" / "make.d" / mk_file + if source_path.exists(): + shutil.copy(source_path, tmp_path / ".rhiza" / "make.d" / mk_file) + + # Copy .rhiza-version if it exists + if (root / ".rhiza" / ".rhiza-version").exists(): + shutil.copy(root / ".rhiza" / ".rhiza-version", tmp_path / ".rhiza" / ".rhiza-version") + + # Create a minimal, deterministic .rhiza/.env for tests + env_content = "SCRIPTS_FOLDER=.rhiza/scripts\nCUSTOM_SCRIPTS_FOLDER=.rhiza/customisations/scripts\n" + (tmp_path / ".rhiza" / ".env").write_text(env_content) + + logger.debug("Copied Makefile from %s to %s", root / "Makefile", tmp_path / "Makefile") + + # Create a minimal .rhiza/template.yml + (tmp_path / ".rhiza" / "template.yml").write_text("repository: Jebel-Quant/rhiza\nref: main\n") + + # Sort out pyproject.toml + (tmp_path / "pyproject.toml").write_text('[project]\nname = "test-project"\nversion = "0.1.0"\n') + + # Move into tmp directory for isolation + old_cwd = Path.cwd() + os.chdir(tmp_path) + logger.debug("Changed working directory to %s", tmp_path) + + # Initialize a git repo so that commands checking for it (like materialize) don't fail validation + setup_rhiza_git_repo() + + # Create src and tests directories to satisfy validate + (tmp_path / "src").mkdir(exist_ok=True) + (tmp_path / "tests").mkdir(exist_ok=True) + + try: + yield + finally: + os.chdir(old_cwd) + logger.debug("Restored working directory to %s", old_cwd) diff --git a/tests/test_rhiza/test_docstrings.py b/.rhiza/tests/sync/test_docstrings.py similarity index 93% rename from tests/test_rhiza/test_docstrings.py rename to .rhiza/tests/sync/test_docstrings.py index 6d5f4ed..231e57b 100644 --- a/tests/test_rhiza/test_docstrings.py +++ b/.rhiza/tests/sync/test_docstrings.py @@ -14,6 +14,10 @@ from pathlib import Path import pytest +from dotenv import dotenv_values + +# Read .rhiza/.env at collection time (no environment side-effects). +RHIZA_ENV_PATH = Path(".rhiza/.env") def _iter_modules_from_path(logger, package_path: Path, src_path: Path): @@ -47,7 +51,9 @@ def _find_packages(src_path: Path): def test_doctests(logger, root, monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture[str]): """Run doctests for each package directory.""" - src_path = root / "src" + values = dotenv_values(root / RHIZA_ENV_PATH) if (root / RHIZA_ENV_PATH).exists() else {} + source_folder = values.get("SOURCE_FOLDER", "src") + src_path = root / source_folder logger.info("Starting doctest discovery in: %s", src_path) if not src_path.exists(): diff --git a/tests/test_rhiza/test_readme.py b/.rhiza/tests/sync/test_readme_validation.py similarity index 91% rename from tests/test_rhiza/test_readme.py rename to .rhiza/tests/sync/test_readme_validation.py index 6150b07..29f1b0e 100644 --- a/tests/test_rhiza/test_readme.py +++ b/.rhiza/tests/sync/test_readme_validation.py @@ -40,7 +40,7 @@ def test_readme_runs(logger, root): # Trust boundary: we execute Python snippets sourced from README.md in this repo. # The README is part of the trusted repository content and reviewed in PRs. logger.debug("Executing README code via %s -c ...", sys.executable) - result = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, cwd=root) + result = subprocess.run([sys.executable, "-c", code], capture_output=True, text=True, cwd=root) # nosec stdout = result.stdout logger.debug("Execution finished with return code %d", result.returncode) @@ -86,15 +86,6 @@ def test_readme_code_is_syntactically_valid(self, root): class TestReadmeBashFragments: """Tests for bash code fragments in README.""" - def test_bash_blocks_are_non_empty(self, root): - """Bash code blocks in README should not be empty.""" - readme = root / "README.md" - content = readme.read_text(encoding="utf-8") - bash_blocks = BASH_BLOCK.findall(content) - - for i, code in enumerate(bash_blocks): - assert code.strip(), f"Bash block {i} should not be empty" - def test_bash_blocks_basic_syntax(self, root, logger): """Bash code blocks should have basic valid syntax (can be parsed by bash -n).""" readme = root / "README.md" @@ -120,7 +111,7 @@ def test_bash_blocks_basic_syntax(self, root, logger): # Use bash -n to check syntax without executing # Trust boundary: we use bash -n which only parses without executing - result = subprocess.run( + result = subprocess.run( # nosec [BASH, "-n"], input=code, capture_output=True, diff --git a/tests/test_rhiza/test_rhiza_workflows.py b/.rhiza/tests/sync/test_rhiza_version.py similarity index 56% rename from tests/test_rhiza/test_rhiza_workflows.py rename to .rhiza/tests/sync/test_rhiza_version.py index b3ee926..d2e0d10 100644 --- a/tests/test_rhiza/test_rhiza_workflows.py +++ b/.rhiza/tests/sync/test_rhiza_version.py @@ -11,64 +11,8 @@ from __future__ import annotations -import os -import shutil -import subprocess -from pathlib import Path - -import pytest - -from .conftest import run_make, setup_rhiza_git_repo, strip_ansi - - -@pytest.fixture(autouse=True) -def setup_tmp_makefile(logger, root, tmp_path: Path): - """Copy the Makefile and necessary files into a temp directory and chdir there. - - We rely on `make -n` so that no real commands are executed. - """ - logger.debug("Setting up temporary Makefile test dir: %s", tmp_path) - - # Copy the main Makefile into the temporary working directory - shutil.copy(root / "Makefile", tmp_path / "Makefile") - - # Copy core Rhiza Makefiles and version file - (tmp_path / ".rhiza").mkdir(exist_ok=True) - shutil.copy(root / ".rhiza" / "rhiza.mk", tmp_path / ".rhiza" / "rhiza.mk") - - # Copy .rhiza-version if it exists - if (root / ".rhiza" / ".rhiza-version").exists(): - shutil.copy(root / ".rhiza" / ".rhiza-version", tmp_path / ".rhiza" / ".rhiza-version") - - # Create a minimal, deterministic .rhiza/.env for tests - env_content = "SCRIPTS_FOLDER=.rhiza/scripts\nCUSTOM_SCRIPTS_FOLDER=.rhiza/customisations/scripts\n" - (tmp_path / ".rhiza" / ".env").write_text(env_content) - - logger.debug("Copied Makefile from %s to %s", root / "Makefile", tmp_path / "Makefile") - - # Create a minimal .rhiza/template.yml - (tmp_path / ".rhiza" / "template.yml").write_text("repository: Jebel-Quant/rhiza\nref: main\n") - - # Sort out pyproject.toml - (tmp_path / "pyproject.toml").write_text('[project]\nname = "test-project"\nversion = "0.1.0"\n') - - # Move into tmp directory for isolation - old_cwd = Path.cwd() - os.chdir(tmp_path) - logger.debug("Changed working directory to %s", tmp_path) - - # Initialize a git repo so that commands checking for it (like materialize) don't fail validation - setup_rhiza_git_repo() - - # Create src and tests directories to satisfy validate - (tmp_path / "src").mkdir(exist_ok=True) - (tmp_path / "tests").mkdir(exist_ok=True) - - try: - yield - finally: - os.chdir(old_cwd) - logger.debug("Restored working directory to %s", old_cwd) +# Import from test_utils instead of relative import +from test_utils import run_make, strip_ansi class TestRhizaVersion: @@ -95,15 +39,24 @@ def test_rhiza_version_exported_in_makefile(self, logger): assert any(char.isdigit() for char in out) def test_rhiza_version_defaults_to_0_9_0_without_file(self, logger, tmp_path): - """RHIZA_VERSION should default to 0.9.0 if .rhiza-version doesn't exist.""" + """RHIZA_VERSION should default to 0.10.2 if .rhiza-version doesn't exist.""" # Remove the .rhiza-version file version_file = tmp_path / ".rhiza" / ".rhiza-version" if version_file.exists(): version_file.unlink() - proc = run_make(logger, ["print-RHIZA_VERSION"], dry_run=False) + # Clear RHIZA_VERSION from environment to test the default value + import os + import subprocess + + env = os.environ.copy() + env.pop("RHIZA_VERSION", None) + + cmd = ["/usr/bin/make", "-s", "print-RHIZA_VERSION"] + logger.info("Running command: %s", " ".join(cmd)) + proc = subprocess.run(cmd, capture_output=True, text=True, env=env) out = strip_ansi(proc.stdout) - assert "Value of RHIZA_VERSION:\n0.9.0" in out + assert "Value of RHIZA_VERSION:\n0.10.2" in out def test_rhiza_version_used_in_sync_target(self, logger): """Sync target should use RHIZA_VERSION from .rhiza-version.""" @@ -164,49 +117,6 @@ def test_summarise_sync_requires_install_uv(self, logger): # This might be implicit via the dependency chain assert "rhiza" in out - -class TestWorkflowSync: - """Tests to validate the workflow pattern used in .github/workflows/rhiza_sync.yml.""" - - def test_workflow_version_reading_pattern(self, logger, tmp_path): - """Test the pattern used in workflow to read Rhiza version.""" - # Create .rhiza-version file - version_file = tmp_path / ".rhiza" / ".rhiza-version" - version_file.write_text("0.9.5\n") - - # Simulate the workflow's version reading step - result = subprocess.run( - [shutil.which("cat") or "cat", str(version_file)], - capture_output=True, - text=True, - check=True, - ) - version = result.stdout.strip() - - assert version == "0.9.5" - - def test_workflow_version_fallback_pattern(self, logger, tmp_path): - """Test the fallback pattern when .rhiza-version doesn't exist.""" - # Ensure .rhiza-version doesn't exist - version_file = tmp_path / ".rhiza" / ".rhiza-version" - if version_file.exists(): - version_file.unlink() - - # Simulate the workflow's version reading with fallback using proper subprocess - try: - result = subprocess.run( - [shutil.which("cat") or "cat", str(version_file)], - capture_output=True, - text=True, - check=True, - ) - version = result.stdout.strip() - except subprocess.CalledProcessError: - # File doesn't exist, use fallback - version = "0.9.0" - - assert version == "0.9.0" - def test_workflow_uvx_command_format(self, logger): """Test that the uvx command format matches workflow expectations.""" # This test validates the command format used in both Makefile and workflow diff --git a/.rhiza/tests/test_utils.py b/.rhiza/tests/test_utils.py new file mode 100644 index 0000000..d90db5e --- /dev/null +++ b/.rhiza/tests/test_utils.py @@ -0,0 +1,63 @@ +"""Shared test utilities. + +Helper functions used across the test suite. Extracted from conftest.py to avoid +relative imports and __init__.py requirements in test directories. + +This file and its associated utilities flow down via a SYNC action from the +jebel-quant/rhiza repository (https://github.com/jebel-quant/rhiza). +""" + +import re +import shutil +import subprocess # nosec B404 + +# Get absolute paths for executables to avoid S607 warnings +GIT = shutil.which("git") or "/usr/bin/git" +MAKE = shutil.which("make") or "/usr/bin/make" + + +def strip_ansi(text: str) -> str: + """Strip ANSI escape sequences from text.""" + ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + return ansi_escape.sub("", text) + + +def run_make( + logger, args: list[str] | None = None, check: bool = True, dry_run: bool = True, env: dict[str, str] | None = None +) -> subprocess.CompletedProcess: + """Run `make` with optional arguments and return the completed process. + + Args: + logger: Logger used to emit diagnostic messages during the run + args: Additional arguments for make + check: If True, raise on non-zero return code + dry_run: If True, use -n to avoid executing commands + env: Optional environment variables to pass to the subprocess + """ + cmd = [MAKE] + if args: + cmd.extend(args) + # Use -s to reduce noise, -n to avoid executing commands + flags = "-sn" if dry_run else "-s" + cmd.insert(1, flags) + logger.info("Running command: %s", " ".join(cmd)) + result = subprocess.run(cmd, capture_output=True, text=True, env=env) # nosec B603 + logger.debug("make exited with code %d", result.returncode) + if result.stdout: + logger.debug("make stdout (truncated to 500 chars):\n%s", result.stdout[:500]) + if result.stderr: + logger.debug("make stderr (truncated to 500 chars):\n%s", result.stderr[:500]) + if check and result.returncode != 0: + msg = f"make failed with code {result.returncode}:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" + raise AssertionError(msg) + return result + + +def setup_rhiza_git_repo(): + """Initialize a git repository and set remote to rhiza.""" + subprocess.run([GIT, "init"], check=True, capture_output=True) # nosec B603 + subprocess.run( # nosec B603 + [GIT, "remote", "add", "origin", "https://github.com/jebel-quant/rhiza"], + check=True, + capture_output=True, + ) diff --git a/.rhiza/tests/utils/conftest.py b/.rhiza/tests/utils/conftest.py new file mode 100644 index 0000000..c69f973 --- /dev/null +++ b/.rhiza/tests/utils/conftest.py @@ -0,0 +1,12 @@ +"""Utility test fixtures and setup. + +This conftest sets up the Python path to allow imports from .rhiza/utils +for testing utility scripts and helpers. +""" + +import sys +from pathlib import Path + +# Add the utils directory to the path for imports +# From .rhiza/tests/utils/conftest.py, .rhiza/utils is 3 levels up then down into utils +sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent / "utils")) diff --git a/tests/test_rhiza/test_git_repo_fixture.py b/.rhiza/tests/utils/test_git_repo_fixture.py similarity index 100% rename from tests/test_rhiza/test_git_repo_fixture.py rename to .rhiza/tests/utils/test_git_repo_fixture.py diff --git a/tests/test_rhiza/test_version_matrix.py b/.rhiza/tests/utils/test_version_matrix.py similarity index 98% rename from tests/test_rhiza/test_version_matrix.py rename to .rhiza/tests/utils/test_version_matrix.py index 6c7bbdc..18604f2 100644 --- a/tests/test_rhiza/test_version_matrix.py +++ b/.rhiza/tests/utils/test_version_matrix.py @@ -4,14 +4,7 @@ for malformed inputs. """ -import sys -from pathlib import Path - import pytest - -# Add the utils directory to the path for imports -sys.path.insert(0, str(Path(__file__).parent.parent.parent / ".rhiza" / "utils")) - from version_matrix import ( CANDIDATES, PyProjectError, diff --git a/pytest.ini b/pytest.ini index a39a4c5..5706c38 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,5 @@ [pytest] +testpaths = tests # Enable live logs on console log_cli = true # Show DEBUG+ messages diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..ae9263f --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,11 @@ +"""Configuration for pytest.""" + +from pathlib import Path + +import pytest + + +@pytest.fixture(scope="session") +def root(): + """Fixture for the root directory of the project.""" + return Path(__file__).parent.parent diff --git a/tests/test_rhiza/README.md b/tests/test_rhiza/README.md deleted file mode 100644 index 33942d8..0000000 --- a/tests/test_rhiza/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Rhiza Test Suite - -This directory contains the core test suite that flows down via SYNC action from the [jebel-quant/rhiza](https://github.com/jebel-quant/rhiza) repository. - -## Purpose - -These tests validate the foundational infrastructure and workflows that are shared across all Rhiza-synchronized projects: - -- **Git-based workflows**: Version bumping, releasing, and tagging -- **Project structure**: Ensuring required files and directories exist -- **Build automation**: Makefile targets and commands -- **Documentation**: README code examples and docstring validation -- **Synchronization**: Template file exclusion and sync script behavior -- **Development tools**: Mock fixtures for testing in isolation - -## Test Organization - -- `conftest.py` - Pytest fixtures including the `git_repo` fixture for sandboxed testing -- `test_bump_script.py` - Tests for version bumping workflow -- `test_docstrings.py` - Doctest validation across all modules -- `test_git_repo_fixture.py` - Validation of the mock git repository fixture -- `test_makefile.py` - Makefile target validation using dry-runs -- `test_marimushka_script.py` - Testing the marimushka Makefile target (exports notebooks to static HTML) -- `test_readme.py` - README code example execution and validation -- `test_release_script.py` - Release and tagging workflow tests -- `test_structure.py` - Project structure and file existence checks -- `test_updatereadme_script` - Testing our abilities to embed the output of make directly in markdown files. - -## Exclusion from Sync - -While it is **technically possible** to exclude these tests from synchronization by adding them to the `exclude` section of your `template.yml` file, this is **not recommended**. - -These tests ensure that the shared infrastructure components work correctly in your project. Excluding them means: - -- ❌ No validation of version bumping and release workflows -- ❌ No automated checks for project structure requirements -- ❌ Missing critical integration tests for synced scripts -- ❌ Potential breakage when shared components are updated - -## When to Exclude - -You should only consider excluding specific tests if: - -1. Your project has fundamentally different workflow requirements -2. You've replaced the synced scripts with custom implementations -3. You have equivalent or better test coverage for the same functionality - -If you must exclude tests, do so selectively rather than excluding the entire `test_rhiza/` directory. - -## Running the Tests - -```bash -# Run all Rhiza tests -make test - -# Run specific test files -pytest tests/test_rhiza/test_bump_script.py -v - -# Run tests with detailed output -pytest tests/test_rhiza/ -vv -``` - -## Customization - -If you need to customize or extend these tests for your project-specific needs, consider: - -1. Creating additional test files in `tests/` (outside `test_rhiza/`) -2. Adding project-specific fixtures to a separate `conftest.py` -3. Keeping the synced tests intact for baseline validation - -This approach maintains the safety net of standardized tests while accommodating your unique requirements. diff --git a/tests/test_rhiza/__init__.py b/tests/test_rhiza/__init__.py deleted file mode 100644 index 20e4136..0000000 --- a/tests/test_rhiza/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Test package for rhiza tests. - -This file makes test_rhiza a Python package, enabling relative imports -within the test modules (e.g., from .conftest import ...). -""" diff --git a/tests/test_rhiza/test_bundles.py b/tests/test_rhiza/test_bundles.py new file mode 100644 index 0000000..dd8e8bc --- /dev/null +++ b/tests/test_rhiza/test_bundles.py @@ -0,0 +1,97 @@ +"""Tests for the template bundles configuration file. + +This file validates the structure and consistency of .rhiza/template-bundles.yml, +ensuring all bundle definitions are properly formatted and reference existing files. +""" + +# This test file should not(!) be copied into repositories further downstream +from __future__ import annotations + +import tomllib + +import pytest +import yaml + + +@pytest.fixture +def template_bundles_path(root): + """Return path to template-bundles.yml.""" + return root / ".rhiza" / "template-bundles.yml" + + +@pytest.fixture +def template_bundles(template_bundles_path): + """Load and return template bundles configuration.""" + with open(template_bundles_path) as f: + return yaml.safe_load(f) + + +class TestTemplateBundlesStructure: + """Tests for template bundles YAML structure.""" + + def test_template_bundles_file_exists(self, template_bundles_path): + """Template bundles configuration file should exist.""" + assert template_bundles_path.exists() + + def test_template_bundles_is_valid_yaml(self, template_bundles_path): + """Template bundles file should be valid YAML.""" + with open(template_bundles_path) as f: + data = yaml.safe_load(f) + assert data is not None + + def test_has_version_field(self, template_bundles): + """Template bundles should have a version field.""" + assert "version" in template_bundles + assert isinstance(template_bundles["version"], str) + + def test_version_matches_pyproject(self, template_bundles, root): + """Template bundles version should match pyproject.toml version.""" + pyproject_path = root / "pyproject.toml" + with open(pyproject_path, "rb") as f: + pyproject = tomllib.load(f) + + pyproject_version = pyproject["project"]["version"] + bundles_version = template_bundles["version"] + + assert bundles_version == pyproject_version, ( + f"Version mismatch: template-bundles.yml has '{bundles_version}' " + f"but pyproject.toml has '{pyproject_version}'. " + "Run 'make bump' to sync versions." + ) + + def test_has_bundles_section(self, template_bundles): + """Template bundles should have a bundles section.""" + assert "bundles" in template_bundles + assert isinstance(template_bundles["bundles"], dict) + + +class TestTemplateBundleDefinitions: + """Tests for individual bundle definitions.""" + + def test_all_bundles_have_required_fields(self, template_bundles): + """Each bundle should have required fields.""" + bundles = template_bundles.get("bundles", {}) + required_fields = {"description", "files"} + + for bundle_name, bundle_config in bundles.items(): + assert isinstance(bundle_config, dict), f"Bundle {bundle_name} should be a dict" + for field in required_fields: + assert field in bundle_config, f"Bundle {bundle_name} missing {field}" + + def test_bundle_descriptions_are_strings(self, template_bundles): + """Bundle descriptions should be strings.""" + bundles = template_bundles.get("bundles", {}) + for bundle_name, bundle_config in bundles.items(): + assert isinstance(bundle_config["description"], str), f"Bundle {bundle_name} description should be a string" + + def test_bundle_files_are_lists(self, template_bundles): + """Bundle files should be lists.""" + bundles = template_bundles.get("bundles", {}) + for bundle_name, bundle_config in bundles.items(): + assert isinstance(bundle_config["files"], list), f"Bundle {bundle_name} files should be a list" + + def test_core_bundle_is_marked_required(self, template_bundles): + """Core bundle should be marked as required.""" + bundles = template_bundles.get("bundles", {}) + assert "core" in bundles + assert bundles["core"].get("required") is True diff --git a/tests/test_rhiza/test_makefile_gh.py b/tests/test_rhiza/test_makefile_gh.py deleted file mode 100644 index 3675009..0000000 --- a/tests/test_rhiza/test_makefile_gh.py +++ /dev/null @@ -1,122 +0,0 @@ -"""Tests for the GitHub Makefile targets using safe dry-runs. - -These tests validate that the .github/github.mk targets are correctly exposed -and emit the expected commands without actually executing them. -""" - -from __future__ import annotations - -import os -import shutil -import subprocess -from pathlib import Path - -import pytest - -# Get absolute paths for executables to avoid S607 warnings from CodeFactor/Bandit -MAKE = shutil.which("make") or "/usr/bin/make" - -# We need to copy these files to the temp dir for the tests to work -REQUIRED_FILES = [ - ".rhiza/make.d/05-github.mk", -] - - -@pytest.fixture(autouse=True) -def setup_gh_makefile(logger, root, tmp_path: Path): - """Copy the Makefile and GitHub Makefile into a temp directory.""" - logger.debug("Setting up temporary GitHub Makefile test dir: %s", tmp_path) - - # Copy the main Makefile - if (root / "Makefile").exists(): - shutil.copy(root / "Makefile", tmp_path / "Makefile") - - # Copy core Rhiza Makefiles - if (root / ".rhiza" / "rhiza.mk").exists(): - (tmp_path / ".rhiza").mkdir(exist_ok=True) - shutil.copy(root / ".rhiza" / "rhiza.mk", tmp_path / ".rhiza" / "rhiza.mk") - - if (root / ".rhiza" / ".env").exists(): - (tmp_path / ".rhiza").mkdir(exist_ok=True) - shutil.copy(root / ".rhiza" / ".env", tmp_path / ".rhiza" / ".env") - - # Copy the entire .rhiza/make.d directory (rhiza.mk includes *.mk from there) - make_d_src = root / ".rhiza" / "make.d" - if make_d_src.exists(): - make_d_dst = tmp_path / ".rhiza" / "make.d" - shutil.copytree(make_d_src, make_d_dst, dirs_exist_ok=True) - logger.debug("Copied %s to %s", make_d_src, make_d_dst) - else: - pytest.skip(".rhiza/make.d directory not found") - - # Move into tmp directory - old_cwd = Path.cwd() - os.chdir(tmp_path) - try: - yield - finally: - os.chdir(old_cwd) - - -def run_make( - logger, args: list[str] | None = None, check: bool = True, dry_run: bool = True -) -> subprocess.CompletedProcess: - """Run `make` with optional arguments.""" - cmd = [MAKE] - if args: - cmd.extend(args) - flags = "-sn" if dry_run else "-s" - cmd.insert(1, flags) - - logger.info("Running command: %s", " ".join(cmd)) - result = subprocess.run(cmd, capture_output=True, text=True) - - if check and result.returncode != 0: - msg = f"make failed with code {result.returncode}:\nSTDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}" - raise AssertionError(msg) - return result - - -def test_gh_targets_exist(logger): - """Verify that GitHub targets are listed in help.""" - result = run_make(logger, ["help"], dry_run=False) - output = result.stdout - - expected_targets = ["gh-install", "view-prs", "view-issues", "failed-workflows", "whoami"] - - for target in expected_targets: - assert target in output, f"Target {target} not found in help output" - - -def test_gh_install_dry_run(logger): - """Verify gh-install target dry-run.""" - result = run_make(logger, ["gh-install"]) - # In dry-run, we expect to see the shell commands that would be executed. - # Since the recipe uses @if, make -n might verify the syntax or show the command if not silenced. - # However, with -s (silent), make -n might not show much for @ commands unless they are echoed. - # But we mainly want to ensure it runs without error. - assert result.returncode == 0 - - -def test_view_prs_dry_run(logger): - """Verify view-prs target dry-run.""" - result = run_make(logger, ["view-prs"]) - assert result.returncode == 0 - - -def test_view_issues_dry_run(logger): - """Verify view-issues target dry-run.""" - result = run_make(logger, ["view-issues"]) - assert result.returncode == 0 - - -def test_failed_workflows_dry_run(logger): - """Verify failed-workflows target dry-run.""" - result = run_make(logger, ["failed-workflows"]) - assert result.returncode == 0 - - -def test_whoami_dry_run(logger): - """Verify whoami target dry-run.""" - result = run_make(logger, ["whoami"]) - assert result.returncode == 0 diff --git a/tests/test_rhiza/test_structure.py b/tests/test_rhiza/test_structure.py deleted file mode 100644 index f48ce66..0000000 --- a/tests/test_rhiza/test_structure.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Tests for the root pytest fixture that yields the repository root Path. - -This file and its associated tests flow down via a SYNC action from the jebel-quant/rhiza repository -(https://github.com/jebel-quant/rhiza). - -This module ensures the fixture resolves to the true project root and that -expected files/directories exist, enabling other tests to locate resources -reliably. -""" - -import warnings -from pathlib import Path - - -class TestRootFixture: - """Tests for the root fixture that provides repository root path.""" - - def test_root_returns_pathlib_path(self, root): - """Root fixture should return a pathlib.Path object.""" - assert isinstance(root, Path) - - def test_root_is_absolute_path(self, root): - """Root fixture should return an absolute path.""" - assert root.is_absolute() - - def test_root_resolves_correctly_from_nested_location(self, root): - """Root should correctly resolve to repository root from tests/test_config_templates/.""" - conftest_path = root / "tests" / "test_rhiza" / "conftest.py" - assert conftest_path.exists() - - def test_root_contains_expected_directories(self, root): - """Root should contain all expected project directories.""" - expected_dirs = [".rhiza", "src", "tests", "book"] - for dirname in expected_dirs: - if not (root / dirname).exists(): - warnings.warn(f"Expected directory {dirname} not found", stacklevel=2) - - def test_root_contains_expected_files(self, root): - """Root should contain all expected configuration files.""" - expected_files = [ - "pyproject.toml", - "README.md", - "Makefile", - "ruff.toml", - ".gitignore", - ".editorconfig", - ] - for filename in expected_files: - if not (root / filename).exists(): - warnings.warn(f"Expected file {filename} not found", stacklevel=2) - - def test_root_can_locate_github_scripts(self, root): - """Root should allow locating GitHub scripts.""" - scripts_dir = root / ".rhiza" / "scripts" - if not scripts_dir.exists(): - warnings.warn("GitHub scripts directory not found", stacklevel=2) - elif not (scripts_dir / "release.sh").exists(): - warnings.warn("Expected script release.sh not found", stacklevel=2)