Feat/split tests #3
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Robot Framework Tests (PR - Label Triggered) | |
| on: | |
| pull_request: | |
| types: [labeled, synchronize] | |
| permissions: | |
| contents: read | |
| pull-requests: write | |
| issues: write | |
| pages: write | |
| id-token: write | |
| jobs: | |
| pr-full-tests: | |
| # Only run if PR has the 'test-with-api-keys' label | |
| if: contains(github.event.pull_request.labels.*.name, 'test-with-api-keys') | |
| runs-on: ubuntu-latest | |
| timeout-minutes: 30 | |
| steps: | |
| - name: Checkout code | |
| uses: actions/checkout@v4 | |
| - name: Verify required secrets | |
| env: | |
| DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} | |
| OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
| HF_TOKEN: ${{ secrets.HF_TOKEN }} | |
| run: | | |
| echo "Verifying required secrets for label-triggered full test run..." | |
| if [ -z "$DEEPGRAM_API_KEY" ]; then | |
| echo "❌ ERROR: DEEPGRAM_API_KEY secret is not set" | |
| exit 1 | |
| fi | |
| if [ -z "$OPENAI_API_KEY" ]; then | |
| echo "❌ ERROR: OPENAI_API_KEY secret is not set" | |
| exit 1 | |
| fi | |
| if [ -z "$HF_TOKEN" ]; then | |
| echo "⚠️ WARNING: HF_TOKEN secret is not set (speaker recognition will be disabled)" | |
| else | |
| echo "✓ HF_TOKEN is set (length: ${#HF_TOKEN})" | |
| fi | |
| echo "✓ DEEPGRAM_API_KEY is set (length: ${#DEEPGRAM_API_KEY})" | |
| echo "✓ OPENAI_API_KEY is set (length: ${#OPENAI_API_KEY})" | |
| echo "✓ Required secrets verified" | |
| - name: Set up Docker Buildx | |
| uses: docker/setup-buildx-action@v3 | |
| with: | |
| driver-opts: | | |
| image=moby/buildkit:latest | |
| network=host | |
| - name: Cache Docker layers | |
| uses: actions/cache@v4 | |
| with: | |
| path: /tmp/.buildx-cache | |
| key: ${{ runner.os }}-buildx-${{ hashFiles('backends/advanced/Dockerfile', 'backends/advanced/pyproject.toml') }} | |
| restore-keys: | | |
| ${{ runner.os }}-buildx- | |
| - name: Set up Python | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: "3.12" | |
| - name: Install uv | |
| uses: astral-sh/setup-uv@v4 | |
| with: | |
| version: "latest" | |
| - name: Install Robot Framework and dependencies | |
| run: | | |
| uv pip install --system robotframework robotframework-requests python-dotenv websockets | |
| - name: Create test config.yml | |
| run: | | |
| echo "Copying test configuration file..." | |
| mkdir -p config | |
| cp tests/configs/deepgram-openai.yml config/config.yml | |
| echo "✓ Test config.yml created from tests/configs/deepgram-openai.yml" | |
| ls -lh config/config.yml | |
| - name: Create plugins.yml from template | |
| run: | | |
| echo "Creating plugins.yml from template..." | |
| if [ -f "config/plugins.yml.template" ]; then | |
| cp config/plugins.yml.template config/plugins.yml | |
| echo "✓ plugins.yml created from template" | |
| ls -lh config/plugins.yml | |
| else | |
| echo "❌ ERROR: config/plugins.yml.template not found" | |
| exit 1 | |
| fi | |
| - name: Run Full Robot Framework tests | |
| working-directory: tests | |
| env: | |
| # Required for test runner script | |
| DEEPGRAM_API_KEY: ${{ secrets.DEEPGRAM_API_KEY }} | |
| OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
| HF_TOKEN: ${{ secrets.HF_TOKEN }} | |
| CLEANUP_CONTAINERS: "false" # Don't cleanup in CI - handled by workflow | |
| run: | | |
| # Use the full test script (includes all tests with API keys) | |
| ./run-robot-tests.sh | |
| TEST_EXIT_CODE=$? | |
| echo "test_exit_code=$TEST_EXIT_CODE" >> $GITHUB_ENV | |
| exit 0 # Don't fail here, we'll fail at the end after uploading artifacts | |
| - name: Show service logs | |
| if: always() | |
| working-directory: backends/advanced | |
| run: | | |
| echo "=== Backend Logs (last 50 lines) ===" | |
| docker compose -f docker-compose-test.yml logs --tail=50 chronicle-backend-test | |
| echo "" | |
| echo "=== Worker Logs (last 50 lines) ===" | |
| docker compose -f docker-compose-test.yml logs --tail=50 workers-test | |
| - name: Check if test results exist | |
| if: always() | |
| id: check_results | |
| run: | | |
| if [ -f tests/results/output.xml ]; then | |
| echo "results_exist=true" >> $GITHUB_OUTPUT | |
| else | |
| echo "results_exist=false" >> $GITHUB_OUTPUT | |
| echo "⚠️ No test results found in tests/results/" | |
| ls -la tests/results/ || echo "Results directory doesn't exist" | |
| fi | |
| - name: Upload Robot Framework HTML reports | |
| if: always() && steps.check_results.outputs.results_exist == 'true' | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: robot-test-reports-html-pr-labeled | |
| path: | | |
| tests/results/report.html | |
| tests/results/log.html | |
| retention-days: 30 | |
| - name: Publish HTML Report as GitHub Pages artifact | |
| if: always() && steps.check_results.outputs.results_exist == 'true' | |
| uses: actions/upload-pages-artifact@v3 | |
| with: | |
| path: tests/results | |
| - name: Deploy to GitHub Pages | |
| if: always() && steps.check_results.outputs.results_exist == 'true' | |
| uses: actions/deploy-pages@v4 | |
| id: deployment | |
| - name: Generate test summary | |
| if: always() && steps.check_results.outputs.results_exist == 'true' | |
| id: test_summary | |
| run: | | |
| # Parse test results | |
| python3 << 'PYTHON_SCRIPT' > test_summary.txt | |
| import xml.etree.ElementTree as ET | |
| tree = ET.parse('tests/results/output.xml') | |
| root = tree.getroot() | |
| stats = root.find('.//total/stat') | |
| if stats is not None: | |
| passed = stats.get("pass", "0") | |
| failed = stats.get("fail", "0") | |
| total = int(passed) + int(failed) | |
| print(f"PASSED={passed}") | |
| print(f"FAILED={failed}") | |
| print(f"TOTAL={total}") | |
| PYTHON_SCRIPT | |
| # Source the variables | |
| source test_summary.txt | |
| # Set outputs | |
| echo "passed=$PASSED" >> $GITHUB_OUTPUT | |
| echo "failed=$FAILED" >> $GITHUB_OUTPUT | |
| echo "total=$TOTAL" >> $GITHUB_OUTPUT | |
| - name: Post PR comment with test results | |
| if: always() && steps.check_results.outputs.results_exist == 'true' | |
| uses: actions/github-script@v7 | |
| with: | |
| github-token: ${{ secrets.GITHUB_TOKEN }} | |
| script: | | |
| const passed = '${{ steps.test_summary.outputs.passed }}'; | |
| const failed = '${{ steps.test_summary.outputs.failed }}'; | |
| const total = '${{ steps.test_summary.outputs.total }}'; | |
| const runUrl = `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`; | |
| const pagesUrl = '${{ steps.deployment.outputs.page_url }}'; | |
| const status = failed === '0' ? '✅ All tests passed!' : '❌ Some tests failed'; | |
| const emoji = failed === '0' ? '🎉' : '⚠️'; | |
| const comment = `## ${emoji} Robot Framework Test Results (Label-Triggered Full Suite) | |
| **Status**: ${status} | |
| 🏷️ **Note**: This run was triggered by the \`test-with-api-keys\` label. | |
| All tests including API-dependent tests have been executed. | |
| | Metric | Count | | |
| |--------|-------| | |
| | ✅ Passed | ${passed} | | |
| | ❌ Failed | ${failed} | | |
| | 📊 Total | ${total} | | |
| ### 📊 View Reports | |
| **GitHub Pages (Live Reports):** | |
| - [📋 Test Report](${pagesUrl}report.html) | |
| - [📝 Detailed Log](${pagesUrl}log.html) | |
| **Download Artifacts:** | |
| - [robot-test-reports-html-pr-labeled](${runUrl}) - HTML reports | |
| - [robot-test-results-xml-pr-labeled](${runUrl}) - XML output | |
| --- | |
| *[View full workflow run](${runUrl})*`; | |
| github.rest.issues.createComment({ | |
| issue_number: context.issue.number, | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| body: comment | |
| }); | |
| - name: Upload Robot Framework XML output | |
| if: always() && steps.check_results.outputs.results_exist == 'true' | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: robot-test-results-xml-pr-labeled | |
| path: tests/results/output.xml | |
| retention-days: 30 | |
| - name: Upload logs on failure | |
| if: failure() | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: robot-test-logs-pr-labeled | |
| path: | | |
| backends/advanced/.env | |
| tests/setup/.env.test | |
| retention-days: 7 | |
| - name: Display test results summary | |
| if: always() | |
| run: | | |
| if [ -f tests/results/output.xml ]; then | |
| echo "Label-triggered full test results generated successfully" | |
| echo "========================================" | |
| python3 << 'PYTHON_SCRIPT' | |
| import xml.etree.ElementTree as ET | |
| tree = ET.parse('tests/results/output.xml') | |
| root = tree.getroot() | |
| stats = root.find('.//total/stat') | |
| if stats is not None: | |
| passed = stats.get("pass", "0") | |
| failed = stats.get("fail", "0") | |
| print(f'✅ Passed: {passed}') | |
| print(f'❌ Failed: {failed}') | |
| print(f'📊 Total: {int(passed) + int(failed)}') | |
| PYTHON_SCRIPT | |
| echo "========================================" | |
| echo "" | |
| echo "🏷️ This run was triggered by the 'test-with-api-keys' label" | |
| echo "ℹ️ Full test suite including API-dependent tests" | |
| echo "" | |
| echo "📊 FULL TEST REPORTS AVAILABLE:" | |
| echo " 1. Go to the 'Summary' tab at the top of this page" | |
| echo " 2. Scroll down to 'Artifacts' section" | |
| echo " 3. Download 'robot-test-reports-html-pr-labeled'" | |
| echo " 4. Extract and open report.html or log.html in your browser" | |
| echo "" | |
| fi | |
| - name: Cleanup | |
| if: always() | |
| working-directory: backends/advanced | |
| run: | | |
| docker compose -f docker-compose-test.yml down -v | |
| - name: Fail workflow if tests failed | |
| if: always() | |
| run: | | |
| if [ "${{ env.test_exit_code }}" != "0" ]; then | |
| echo "❌ Tests failed with exit code ${{ env.test_exit_code }}" | |
| exit 1 | |
| else | |
| echo "✅ All tests passed" | |
| fi |