Skip to content

Refactor audio processing and job handling for transcription workflows #182

Refactor audio processing and job handling for transcription workflows

Refactor audio processing and job handling for transcription workflows #182

Workflow file for this run

name: Robot Framework Tests (No API Keys)
on:
pull_request:
paths:
- 'tests/**/*.robot'
- 'tests/**/*.py'
- 'backends/advanced/src/**'
- '.github/workflows/robot-tests.yml'
permissions:
contents: read
pull-requests: write
issues: write
pages: write
id-token: write
jobs:
robot-tests:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver-opts: |
image=moby/buildkit:latest
network=host
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ hashFiles('backends/advanced/Dockerfile', 'backends/advanced/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
version: "latest"
- name: Install Robot Framework and dependencies
run: |
uv pip install --system robotframework robotframework-requests python-dotenv websockets
- name: Create test config.yml
run: |
echo "Copying mock services configuration file..."
mkdir -p config
cp tests/configs/mock-services.yml config/config.yml
echo "✓ Test config.yml created from tests/configs/mock-services.yml"
echo "ℹ️ This config disables external API dependencies (transcription, LLM)"
ls -lh config/config.yml
- name: Create plugins.yml from template
run: |
echo "Creating plugins.yml from template..."
if [ -f "config/plugins.yml.template" ]; then
cp config/plugins.yml.template config/plugins.yml
echo "✓ plugins.yml created from template"
ls -lh config/plugins.yml
else
echo "❌ ERROR: config/plugins.yml.template not found"
exit 1
fi
- name: Run Robot Framework tests (No API Keys)
working-directory: tests
env:
CLEANUP_CONTAINERS: "false" # Don't cleanup in CI - handled by workflow
run: |
# Use the no-API test script (excludes tests tagged with requires-api-keys)
./run-no-api-tests.sh
TEST_EXIT_CODE=$?
echo "test_exit_code=$TEST_EXIT_CODE" >> $GITHUB_ENV
exit 0 # Don't fail here, we'll fail at the end after uploading artifacts
- name: Show service logs
if: always()
working-directory: backends/advanced
run: |
echo "=== Backend Logs (last 50 lines) ==="
docker compose -f docker-compose-test.yml logs --tail=50 chronicle-backend-test
echo ""
echo "=== Worker Logs (last 50 lines) ==="
docker compose -f docker-compose-test.yml logs --tail=50 workers-test
- name: Check if test results exist
if: always()
id: check_results
run: |
if [ -f tests/results-no-api/output.xml ]; then
echo "results_exist=true" >> $GITHUB_OUTPUT
else
echo "results_exist=false" >> $GITHUB_OUTPUT
echo "⚠️ No test results found in tests/results-no-api/"
ls -la tests/results-no-api/ || echo "Results directory doesn't exist"
fi
- name: Upload Robot Framework HTML reports
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/upload-artifact@v4
with:
name: robot-test-reports-html-no-api
path: |
tests/results-no-api/report.html
tests/results-no-api/log.html
retention-days: 30
- name: Publish HTML Report as GitHub Pages artifact
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/upload-pages-artifact@v3
with:
path: tests/results-no-api
- name: Deploy to GitHub Pages
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/deploy-pages@v4
id: deployment
- name: Generate test summary
if: always() && steps.check_results.outputs.results_exist == 'true'
id: test_summary
run: |
# Parse test results
python3 << 'PYTHON_SCRIPT' > test_summary.txt
import xml.etree.ElementTree as ET
tree = ET.parse('tests/results-no-api/output.xml')
root = tree.getroot()
stats = root.find('.//total/stat')
if stats is not None:
passed = stats.get("pass", "0")
failed = stats.get("fail", "0")
total = int(passed) + int(failed)
print(f"PASSED={passed}")
print(f"FAILED={failed}")
print(f"TOTAL={total}")
PYTHON_SCRIPT
# Source the variables
source test_summary.txt
# Set outputs
echo "passed=$PASSED" >> $GITHUB_OUTPUT
echo "failed=$FAILED" >> $GITHUB_OUTPUT
echo "total=$TOTAL" >> $GITHUB_OUTPUT
- name: Post PR comment with test results
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const passed = '${{ steps.test_summary.outputs.passed }}';
const failed = '${{ steps.test_summary.outputs.failed }}';
const total = '${{ steps.test_summary.outputs.total }}';
const runUrl = `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`;
const pagesUrl = '${{ steps.deployment.outputs.page_url }}';
const status = failed === '0' ? '✅ All tests passed!' : '❌ Some tests failed';
const emoji = failed === '0' ? '🎉' : '⚠️';
const comment = `## ${emoji} Robot Framework Test Results (No API Keys)
**Status**: ${status}
ℹ️ **Note**: This run excludes tests requiring external API keys (Deepgram, OpenAI).
Tests tagged with \`requires-api-keys\` will run on dev/main branches.
| Metric | Count |
|--------|-------|
| ✅ Passed | ${passed} |
| ❌ Failed | ${failed} |
| 📊 Total | ${total} |
### 📊 View Reports
**GitHub Pages (Live Reports):**
- [📋 Test Report](${pagesUrl}report.html)
- [📝 Detailed Log](${pagesUrl}log.html)
**Download Artifacts:**
- [robot-test-reports-html-no-api](${runUrl}) - HTML reports
- [robot-test-results-xml-no-api](${runUrl}) - XML output
---
*[View full workflow run](${runUrl})*`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
- name: Upload Robot Framework XML output
if: always() && steps.check_results.outputs.results_exist == 'true'
uses: actions/upload-artifact@v4
with:
name: robot-test-results-xml-no-api
path: tests/results-no-api/output.xml
retention-days: 30
- name: Upload logs on failure
if: failure()
uses: actions/upload-artifact@v4
with:
name: robot-test-logs
path: |
backends/advanced/.env
tests/setup/.env.test
retention-days: 7
- name: Display test results summary
if: always()
run: |
if [ -f tests/results-no-api/output.xml ]; then
echo "Test results generated successfully (No API Keys mode)"
echo "========================================"
python3 << 'PYTHON_SCRIPT'
import xml.etree.ElementTree as ET
tree = ET.parse('tests/results-no-api/output.xml')
root = tree.getroot()
stats = root.find('.//total/stat')
if stats is not None:
passed = stats.get("pass", "0")
failed = stats.get("fail", "0")
print(f'✅ Passed: {passed}')
print(f'❌ Failed: {failed}')
print(f'📊 Total: {int(passed) + int(failed)}')
PYTHON_SCRIPT
echo "========================================"
echo ""
echo "ℹ️ Tests excluded: requires-api-keys (run on dev/main branches)"
echo ""
echo "📊 FULL TEST REPORTS AVAILABLE:"
echo " 1. Go to the 'Summary' tab at the top of this page"
echo " 2. Scroll down to 'Artifacts' section"
echo " 3. Download 'robot-test-reports-html-no-api'"
echo " 4. Extract and open report.html or log.html in your browser"
echo ""
echo "The HTML reports provide:"
echo " - report.html: Executive summary with statistics"
echo " - log.html: Detailed step-by-step execution log"
echo ""
fi
- name: Cleanup
if: always()
working-directory: backends/advanced
run: |
docker compose -f docker-compose-test.yml down -v
- name: Fail workflow if tests failed
if: always()
run: |
if [ "${{ env.test_exit_code }}" != "0" ]; then
echo "❌ Tests failed with exit code ${{ env.test_exit_code }}"
exit 1
else
echo "✅ All tests passed"
fi