diff --git a/.github/workflows/secret-digger-claude.lock.yml b/.github/workflows/secret-digger-claude.lock.yml index a267e295..06bafe06 100644 --- a/.github/workflows/secret-digger-claude.lock.yml +++ b/.github/workflows/secret-digger-claude.lock.yml @@ -738,7 +738,6 @@ jobs: sudo -E awf --enable-chroot --tty --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --build-local \ -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && claude --print --disable-slash-commands --no-chrome --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools '\''Bash,BashOutput,Edit,Edit(/tmp/gh-aw/cache-memory/*),ExitPlanMode,Glob,Grep,KillBash,LS,MultiEdit,MultiEdit(/tmp/gh-aw/cache-memory/*),NotebookEdit,NotebookRead,Read,Read(/tmp/gh-aw/cache-memory/*),Task,TodoWrite,Write,Write(/tmp/gh-aw/cache-memory/*),mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__issue_read,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users'\'' --debug-file /tmp/gh-aw/agent-stdio.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_CLAUDE:+ --model "$GH_AW_MODEL_AGENT_CLAUDE"}' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} BASH_DEFAULT_TIMEOUT_MS: 1800000 BASH_MAX_TIMEOUT_MS: 1800000 CLAUDE_CODE_OAUTH_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} @@ -1068,7 +1067,6 @@ jobs: # Execute Claude Code CLI with prompt from file claude --print --disable-slash-commands --no-chrome --allowed-tools 'Bash(cat),Bash(grep),Bash(head),Bash(jq),Bash(ls),Bash(tail),Bash(wc),BashOutput,ExitPlanMode,Glob,Grep,KillBash,LS,NotebookRead,Read,Task,TodoWrite' --debug-file /tmp/gh-aw/threat-detection/detection.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_DETECTION_CLAUDE:+ --model "$GH_AW_MODEL_DETECTION_CLAUDE"} 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} BASH_DEFAULT_TIMEOUT_MS: 1800000 BASH_MAX_TIMEOUT_MS: 1800000 CLAUDE_CODE_OAUTH_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} diff --git a/.github/workflows/security-guard.lock.yml b/.github/workflows/security-guard.lock.yml index 44510b07..efed0cf4 100644 --- a/.github/workflows/security-guard.lock.yml +++ b/.github/workflows/security-guard.lock.yml @@ -667,7 +667,6 @@ jobs: sudo -E awf --tty --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.githubusercontent.com,anthropic.com,api.anthropic.com,api.github.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,ghcr.io,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,pypi.org,raw.githubusercontent.com,registry.npmjs.org,s.symcb.com,s.symcd.com,security.ubuntu.com,sentry.io,statsig.anthropic.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --build-local \ -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && claude --print --disable-slash-commands --no-chrome --mcp-config /tmp/gh-aw/mcp-config/mcp-servers.json --allowed-tools Bash,BashOutput,Edit,ExitPlanMode,Glob,Grep,KillBash,LS,MultiEdit,NotebookEdit,NotebookRead,Read,Task,TodoWrite,Write,mcp__github__download_workflow_run_artifact,mcp__github__get_code_scanning_alert,mcp__github__get_commit,mcp__github__get_dependabot_alert,mcp__github__get_discussion,mcp__github__get_discussion_comments,mcp__github__get_file_contents,mcp__github__get_job_logs,mcp__github__get_label,mcp__github__get_latest_release,mcp__github__get_me,mcp__github__get_notification_details,mcp__github__get_pull_request,mcp__github__get_pull_request_comments,mcp__github__get_pull_request_diff,mcp__github__get_pull_request_files,mcp__github__get_pull_request_review_comments,mcp__github__get_pull_request_reviews,mcp__github__get_pull_request_status,mcp__github__get_release_by_tag,mcp__github__get_secret_scanning_alert,mcp__github__get_tag,mcp__github__get_workflow_run,mcp__github__get_workflow_run_logs,mcp__github__get_workflow_run_usage,mcp__github__issue_read,mcp__github__list_branches,mcp__github__list_code_scanning_alerts,mcp__github__list_commits,mcp__github__list_dependabot_alerts,mcp__github__list_discussion_categories,mcp__github__list_discussions,mcp__github__list_issue_types,mcp__github__list_issues,mcp__github__list_label,mcp__github__list_notifications,mcp__github__list_pull_requests,mcp__github__list_releases,mcp__github__list_secret_scanning_alerts,mcp__github__list_starred_repositories,mcp__github__list_tags,mcp__github__list_workflow_jobs,mcp__github__list_workflow_run_artifacts,mcp__github__list_workflow_runs,mcp__github__list_workflows,mcp__github__pull_request_read,mcp__github__search_code,mcp__github__search_issues,mcp__github__search_orgs,mcp__github__search_pull_requests,mcp__github__search_repositories,mcp__github__search_users --debug-file /tmp/gh-aw/agent-stdio.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_CLAUDE:+ --model "$GH_AW_MODEL_AGENT_CLAUDE"}' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} BASH_DEFAULT_TIMEOUT_MS: 60000 BASH_MAX_TIMEOUT_MS: 60000 CLAUDE_CODE_OAUTH_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} @@ -988,7 +987,6 @@ jobs: # Execute Claude Code CLI with prompt from file claude --print --disable-slash-commands --no-chrome --allowed-tools 'Bash(cat),Bash(grep),Bash(head),Bash(jq),Bash(ls),Bash(tail),Bash(wc),BashOutput,ExitPlanMode,Glob,Grep,KillBash,LS,NotebookRead,Read,Task,TodoWrite' --debug-file /tmp/gh-aw/threat-detection/detection.log --verbose --permission-mode bypassPermissions --output-format stream-json "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_DETECTION_CLAUDE:+ --model "$GH_AW_MODEL_DETECTION_CLAUDE"} 2>&1 | tee -a /tmp/gh-aw/threat-detection/detection.log env: - ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} BASH_DEFAULT_TIMEOUT_MS: 60000 BASH_MAX_TIMEOUT_MS: 60000 CLAUDE_CODE_OAUTH_TOKEN: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }} diff --git a/.github/workflows/smoke-codex.lock.yml b/.github/workflows/smoke-codex.lock.yml index ce77a62e..4bc836f3 100644 --- a/.github/workflows/smoke-codex.lock.yml +++ b/.github/workflows/smoke-codex.lock.yml @@ -1381,11 +1381,10 @@ jobs: run: | set -o pipefail mkdir -p "$CODEX_HOME/logs" - sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.githubusercontent.com,172.30.0.1,api.openai.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,mcp.tavily.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,openai.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,s.symcb.com,s.symcd.com,security.ubuntu.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --build-local \ + sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.githubusercontent.com,172.30.0.30,api.openai.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,mcp.tavily.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,openai.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,s.symcb.com,s.symcd.com,security.ubuntu.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --build-local \ -- /bin/bash -c 'export PATH="$(find /opt/hostedtoolcache -maxdepth 4 -type d -name bin 2>/dev/null | tr '\''\n'\'' '\'':'\'')$PATH"; [ -n "$GOROOT" ] && export PATH="$GOROOT/bin:$PATH" || true && INSTRUCTION="$(cat /tmp/gh-aw/aw-prompts/prompt.txt)" && codex ${GH_AW_MODEL_AGENT_CODEX:+-c model="$GH_AW_MODEL_AGENT_CODEX" }exec --dangerously-bypass-approvals-and-sandbox --skip-git-repo-check "$INSTRUCTION"' \ 2>&1 | tee /tmp/gh-aw/agent-stdio.log env: - CODEX_API_KEY: ${{ secrets.CODEX_API_KEY || secrets.OPENAI_API_KEY }} CODEX_HOME: /tmp/gh-aw/mcp-config GH_AW_MCP_CONFIG: /tmp/gh-aw/mcp-config/config.toml GH_AW_MODEL_AGENT_CODEX: ${{ vars.GH_AW_MODEL_AGENT_CODEX || '' }} @@ -1393,7 +1392,7 @@ jobs: GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} GH_DEBUG: 1 GITHUB_STEP_SUMMARY: ${{ env.GITHUB_STEP_SUMMARY }} - OPENAI_API_KEY: ${{ secrets.CODEX_API_KEY || secrets.OPENAI_API_KEY }} + OPENAI_BASE_URL: http://172.30.0.30:8000 RUST_LOG: trace,hyper_util=info,mio=info,reqwest=info,os_info=info,codex_otel=warn,codex_core=debug,ocodex_exec=debug - name: Configure Git credentials env: @@ -1444,7 +1443,7 @@ jobs: uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8 env: GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }} - GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,172.30.0.1,api.openai.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,openai.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,s.symcb.com,s.symcd.com,security.ubuntu.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com" + GH_AW_ALLOWED_DOMAINS: "*.githubusercontent.com,172.30.0.30,api.openai.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,cdn.playwright.dev,codeload.github.com,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,github-cloud.githubusercontent.com,github-cloud.s3.amazonaws.com,github.githubassets.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,lfs.github.com,objects.githubusercontent.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,openai.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,playwright.download.prss.microsoft.com,ppa.launchpad.net,raw.githubusercontent.com,s.symcb.com,s.symcd.com,security.ubuntu.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com" GITHUB_SERVER_URL: ${{ github.server_url }} GITHUB_API_URL: ${{ github.api_url }} with: diff --git a/containers/agent/setup-iptables.sh b/containers/agent/setup-iptables.sh index 6214b69d..529dbaf8 100644 --- a/containers/agent/setup-iptables.sh +++ b/containers/agent/setup-iptables.sh @@ -127,6 +127,21 @@ fi echo "[iptables] Allow traffic to Squid proxy (${SQUID_IP}:${SQUID_PORT})..." iptables -t nat -A OUTPUT -d "$SQUID_IP" -j RETURN +# Bypass Squid for api-proxy when API proxy IP is configured. +# The agent needs to connect directly to api-proxy (not through Squid). +# The api-proxy then routes outbound traffic through Squid to enforce domain whitelisting. +# Architecture: agent -> api-proxy (direct) -> Squid -> internet +# Use AWF_API_PROXY_IP environment variable set by docker-manager (172.30.0.30) +if [ -n "$AWF_API_PROXY_IP" ]; then + if is_valid_ipv4 "$AWF_API_PROXY_IP"; then + echo "[iptables] Allow direct traffic to api-proxy (${AWF_API_PROXY_IP}) - bypassing Squid..." + # NAT: skip DNAT to Squid for all traffic to api-proxy + iptables -t nat -A OUTPUT -d "$AWF_API_PROXY_IP" -j RETURN + else + echo "[iptables] WARNING: AWF_API_PROXY_IP has invalid format '${AWF_API_PROXY_IP}', skipping api-proxy bypass" + fi +fi + # Bypass Squid for host.docker.internal when host access is enabled. # MCP gateway traffic to host.docker.internal gets DNAT'd to Squid, # where Squid fails with "Invalid URL" because rmcp sends relative URLs. @@ -263,6 +278,14 @@ iptables -A OUTPUT -p tcp -d 127.0.0.11 --dport 53 -j ACCEPT # Allow traffic to Squid proxy (after NAT redirection) iptables -A OUTPUT -p tcp -d "$SQUID_IP" -j ACCEPT +# Allow traffic to Kong API Gateway sidecar (port 8000 for OpenAI proxy, 8001 for admin API) +# Must be added before the final DROP rule +if [ -n "$AWF_API_PROXY_IP" ]; then + echo "[iptables] Allow traffic to Kong Gateway (${AWF_API_PROXY_IP}) ports 8000, 8001..." + iptables -A OUTPUT -p tcp -d "$AWF_API_PROXY_IP" --dport 8000 -j ACCEPT + iptables -A OUTPUT -p tcp -d "$AWF_API_PROXY_IP" --dport 8001 -j ACCEPT +fi + # Drop all other TCP traffic (default deny policy) # This ensures that only explicitly allowed ports can be accessed echo "[iptables] Drop all non-redirected TCP traffic (default deny)..." diff --git a/containers/api-proxy/Dockerfile b/containers/api-proxy/Dockerfile index 4e62e688..2f4bd57f 100644 --- a/containers/api-proxy/Dockerfile +++ b/containers/api-proxy/Dockerfile @@ -1,32 +1,28 @@ -# Node.js API proxy for credential management +# Kong API Gateway for credential management # Routes through Squid to respect domain whitelisting -FROM node:22-alpine +FROM kong:3.5-alpine -# Install curl for healthchecks -RUN apk add --no-cache curl +# Install curl for healthchecks and envsubst for config templating +USER root +RUN apk add --no-cache curl gettext -# Create app directory -WORKDIR /app +# Create configuration directory +RUN mkdir -p /etc/kong -# Copy package files -COPY package*.json ./ +# Copy Kong declarative configuration template +COPY kong.yml.template /etc/kong/kong.yml.template -# Install dependencies -RUN npm ci --only=production +# Copy entrypoint script that generates config from environment +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh -# Copy application files -COPY server.js ./ - -# Create non-root user -RUN addgroup -S apiproxy && adduser -S apiproxy -G apiproxy - -# Switch to non-root user -USER apiproxy +# Switch back to kong user +USER kong # Expose ports -# 10000 - OpenAI API proxy -# 10001 - Anthropic API proxy -EXPOSE 10000 10001 +# 8000 - HTTP proxy port (we'll use this for OpenAI API) +# 8001 - Admin API (for health checks) +EXPOSE 8000 8001 -# Start the proxy server -CMD ["node", "server.js"] +# Use our custom entrypoint that generates config from env vars +ENTRYPOINT ["/entrypoint.sh"] diff --git a/containers/api-proxy/README.md b/containers/api-proxy/README.md index b6b8805a..f9665656 100644 --- a/containers/api-proxy/README.md +++ b/containers/api-proxy/README.md @@ -6,7 +6,7 @@ Node.js-based API proxy that keeps LLM API credentials isolated from the agent c ``` Agent Container (172.30.0.20) - ↓ HTTP request to api-proxy:10000 + ↓ HTTP request to 172.30.0.30:10000 API Proxy Sidecar (172.30.0.30) ↓ Injects Authorization header ↓ Routes via HTTP_PROXY (172.30.0.10:3128) diff --git a/containers/api-proxy/entrypoint.sh b/containers/api-proxy/entrypoint.sh new file mode 100644 index 00000000..cd5ea995 --- /dev/null +++ b/containers/api-proxy/entrypoint.sh @@ -0,0 +1,44 @@ +#!/bin/sh +set -e + +echo "[Kong] Starting AWF Kong API Gateway..." +echo "[Kong] HTTP_PROXY: ${HTTP_PROXY:-not configured}" +echo "[Kong] HTTPS_PROXY: ${HTTPS_PROXY:-not configured}" + +if [ -n "$OPENAI_API_KEY" ]; then + echo "[Kong] OpenAI API key configured" +else + echo "[Kong] WARNING: OpenAI API key not configured" +fi + +# Generate Kong configuration from template with environment variable substitution +# This injects the OPENAI_API_KEY into the config file +echo "[Kong] Generating Kong configuration from template..." +envsubst < /etc/kong/kong.yml.template > /etc/kong/kong.yml + +# Validate the generated configuration +echo "[Kong] Validating Kong configuration..." +if ! kong config parse /etc/kong/kong.yml 2>/dev/null; then + echo "[Kong] ERROR: Invalid Kong configuration" + cat /etc/kong/kong.yml + exit 1 +fi + +echo "[Kong] Configuration validated successfully" + +# Set Kong environment variables +export KONG_DATABASE=off +export KONG_DECLARATIVE_CONFIG=/etc/kong/kong.yml +export KONG_PROXY_LISTEN="0.0.0.0:8000" +export KONG_ADMIN_LISTEN="0.0.0.0:8001" +export KONG_LOG_LEVEL=info + +# Kong will automatically use HTTP_PROXY and HTTPS_PROXY environment variables +# for routing upstream requests through Squid +if [ -n "$HTTPS_PROXY" ]; then + echo "[Kong] Routing upstream HTTPS requests through Squid proxy" +fi + +# Start Kong in foreground mode +echo "[Kong] Starting Kong Gateway..." +exec kong start --v diff --git a/containers/api-proxy/kong.yml.template b/containers/api-proxy/kong.yml.template new file mode 100644 index 00000000..be5c8ed2 --- /dev/null +++ b/containers/api-proxy/kong.yml.template @@ -0,0 +1,36 @@ +_format_version: "3.0" + +# Services define the upstream APIs Kong will proxy to +services: + - name: openai-api + url: https://api.openai.com + # Route traffic through Squid proxy for domain whitelisting + # Kong will use HTTP_PROXY/HTTPS_PROXY environment variables + connect_timeout: 60000 + write_timeout: 60000 + read_timeout: 60000 + + routes: + - name: openai-route + # Match all paths - Kong listens on port 8000 + paths: + - / + # Strip the path prefix if needed + strip_path: false + + plugins: + # Inject Authorization header with OpenAI API key + - name: request-transformer + config: + add: + headers: + - "Authorization: Bearer ${OPENAI_API_KEY}" + + # Add correlation ID for tracking + - name: correlation-id + config: + header_name: X-Kong-Request-ID + generator: uuid + + # Enable proxy support (Kong uses HTTP_PROXY/HTTPS_PROXY env vars automatically) + # No explicit plugin needed - Kong natively supports HTTP_PROXY diff --git a/containers/api-proxy/package.json b/containers/api-proxy/package.json deleted file mode 100644 index f5d48013..00000000 --- a/containers/api-proxy/package.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "name": "awf-api-proxy", - "version": "1.0.0", - "description": "API proxy sidecar for AWF - routes LLM API requests through Squid while injecting authentication headers", - "main": "server.js", - "scripts": { - "start": "node server.js" - }, - "dependencies": { - "express": "^4.18.2", - "http-proxy-middleware": "^2.0.6" - }, - "engines": { - "node": ">=18.0.0" - } -} diff --git a/containers/api-proxy/server.js b/containers/api-proxy/server.js deleted file mode 100644 index 4dc1d7f3..00000000 --- a/containers/api-proxy/server.js +++ /dev/null @@ -1,112 +0,0 @@ -#!/usr/bin/env node - -/** - * AWF API Proxy Sidecar - * - * Node.js-based proxy that: - * 1. Keeps LLM API credentials isolated from agent container - * 2. Routes all traffic through Squid via HTTP_PROXY/HTTPS_PROXY - * 3. Injects authentication headers (Authorization, x-api-key) - * 4. Respects domain whitelisting enforced by Squid - */ - -const express = require('express'); -const { createProxyMiddleware } = require('http-proxy-middleware'); - -// Read API keys from environment (set by docker-compose) -const OPENAI_API_KEY = process.env.OPENAI_API_KEY; -const ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY; - -// Squid proxy configuration (set via HTTP_PROXY/HTTPS_PROXY in docker-compose) -const HTTP_PROXY = process.env.HTTP_PROXY; -const HTTPS_PROXY = process.env.HTTPS_PROXY; - -console.log('[API Proxy] Starting AWF API proxy sidecar...'); -console.log(`[API Proxy] HTTP_PROXY: ${HTTP_PROXY}`); -console.log(`[API Proxy] HTTPS_PROXY: ${HTTPS_PROXY}`); -if (OPENAI_API_KEY) { - console.log('[API Proxy] OpenAI API key configured'); -} -if (ANTHROPIC_API_KEY) { - console.log('[API Proxy] Anthropic API key configured'); -} - -// Create Express app -const app = express(); - -// Health check endpoint -app.get('/health', (req, res) => { - res.status(200).json({ - status: 'healthy', - service: 'awf-api-proxy', - squid_proxy: HTTP_PROXY || 'not configured', - providers: { - openai: !!OPENAI_API_KEY, - anthropic: !!ANTHROPIC_API_KEY - } - }); -}); - -// OpenAI API proxy (port 10000) -if (OPENAI_API_KEY) { - app.use(createProxyMiddleware({ - target: 'https://api.openai.com', - changeOrigin: true, - secure: true, - onProxyReq: (proxyReq, req, res) => { - // Inject Authorization header - proxyReq.setHeader('Authorization', `Bearer ${OPENAI_API_KEY}`); - console.log(`[OpenAI Proxy] ${req.method} ${req.url}`); - }, - onError: (err, req, res) => { - console.error(`[OpenAI Proxy] Error: ${err.message}`); - res.status(502).json({ error: 'Proxy error', message: err.message }); - } - })); - - app.listen(10000, '0.0.0.0', () => { - console.log('[API Proxy] OpenAI proxy listening on port 10000'); - console.log('[API Proxy] Routing through Squid to api.openai.com'); - }); -} - -// Anthropic API proxy (port 10001) -if (ANTHROPIC_API_KEY) { - const anthropicApp = express(); - - anthropicApp.get('/health', (req, res) => { - res.status(200).json({ status: 'healthy', service: 'anthropic-proxy' }); - }); - - anthropicApp.use(createProxyMiddleware({ - target: 'https://api.anthropic.com', - changeOrigin: true, - secure: true, - onProxyReq: (proxyReq, req, res) => { - // Inject Anthropic authentication headers - proxyReq.setHeader('x-api-key', ANTHROPIC_API_KEY); - proxyReq.setHeader('anthropic-version', '2023-06-01'); - console.log(`[Anthropic Proxy] ${req.method} ${req.url}`); - }, - onError: (err, req, res) => { - console.error(`[Anthropic Proxy] Error: ${err.message}`); - res.status(502).json({ error: 'Proxy error', message: err.message }); - } - })); - - anthropicApp.listen(10001, '0.0.0.0', () => { - console.log('[API Proxy] Anthropic proxy listening on port 10001'); - console.log('[API Proxy] Routing through Squid to api.anthropic.com'); - }); -} - -// Graceful shutdown -process.on('SIGTERM', () => { - console.log('[API Proxy] Received SIGTERM, shutting down gracefully...'); - process.exit(0); -}); - -process.on('SIGINT', () => { - console.log('[API Proxy] Received SIGINT, shutting down gracefully...'); - process.exit(0); -}); diff --git a/docs/api-proxy-sidecar.md b/docs/api-proxy-sidecar.md index 0c1058b8..b4d86f73 100644 --- a/docs/api-proxy-sidecar.md +++ b/docs/api-proxy-sidecar.md @@ -1,6 +1,6 @@ # API Proxy Sidecar for Credential Management -The AWF firewall supports an optional Node.js-based API proxy sidecar that securely holds LLM API credentials and automatically injects authentication headers while routing all traffic through Squid to respect domain whitelisting. +The AWF firewall includes a Node.js-based API proxy sidecar (enabled by default) that securely holds LLM API credentials and automatically injects authentication headers while routing all traffic through Squid to respect domain whitelisting. ## Overview @@ -26,9 +26,9 @@ When enabled, the API proxy sidecar: │ │ │ Agent Container │ │ │ │ │ 172.30.0.20 │ │ │ │ │ OPENAI_BASE_URL= │ │ -│ │ │ http://api-proxy:10000 │────┘ +│ │ │ http://172.30.0.30:10000 │────┘ │ │ │ ANTHROPIC_BASE_URL= │ -│ │ │ http://api-proxy:10001 │ +│ │ │ http://172.30.0.30:10001 │ │ │ └──────────────────────────────┘ │ │ └─────────┼─────────────────────────────────────┘ @@ -38,7 +38,7 @@ When enabled, the API proxy sidecar: ``` **Traffic Flow:** -1. Agent makes request to `api-proxy:10000` or `api-proxy:10001` +1. Agent makes request to `172.30.0.30:10000` or `172.30.0.30:10001` 2. API proxy injects authentication headers 3. API proxy routes through Squid via HTTP_PROXY/HTTPS_PROXY 4. Squid enforces domain whitelist (only allowed domains pass) @@ -48,17 +48,30 @@ When enabled, the API proxy sidecar: ### Basic Usage +The API proxy is **enabled by default** and automatically deploys when API keys are present: + ```bash # Set API keys in environment export OPENAI_API_KEY="sk-..." export ANTHROPIC_API_KEY="sk-ant-..." -# Enable API proxy sidecar +# API proxy automatically enabled (no flag needed) +awf --allow-domains api.openai.com,api.anthropic.com \ + -- your-command + +# Explicitly enable if needed (same as default) awf --enable-api-proxy \ --allow-domains api.openai.com,api.anthropic.com \ -- your-command ``` +To disable the API proxy when not needed: +```bash +awf --no-enable-api-proxy \ + --allow-domains github.com \ + -- your-command +``` + ### Codex (OpenAI) Example ```bash @@ -69,7 +82,7 @@ awf --enable-api-proxy \ -- npx @openai/codex -p "write a hello world function" ``` -The agent container will automatically use `http://api-proxy:10000` as the base URL. +The agent container will automatically use `http://172.30.0.30:10000` as the base URL. ### Claude Code Example @@ -81,7 +94,7 @@ awf --enable-api-proxy \ -- claude-code "write a hello world function" ``` -The agent container will automatically use `http://api-proxy:10001` as the base URL. +The agent container will automatically use `http://172.30.0.30:10001` as the base URL. ### Both Providers @@ -100,8 +113,8 @@ When API keys are provided, the sidecar sets these environment variables in the | Variable | Value | When Set | Description | |----------|-------|----------|-------------| -| `OPENAI_BASE_URL` | `http://api-proxy:10000` | When `OPENAI_API_KEY` is provided | OpenAI API proxy endpoint | -| `ANTHROPIC_BASE_URL` | `http://api-proxy:10001` | When `ANTHROPIC_API_KEY` is provided | Anthropic API proxy endpoint | +| `OPENAI_BASE_URL` | `http://172.30.0.30:10000` | When `OPENAI_API_KEY` is provided | OpenAI API proxy endpoint | +| `ANTHROPIC_BASE_URL` | `http://172.30.0.30:10001` | When `ANTHROPIC_API_KEY` is provided | Anthropic API proxy endpoint | These are standard environment variables recognized by: - OpenAI Python SDK @@ -141,7 +154,7 @@ The sidecar has strict resource constraints: ### 1. Container Startup -When `--enable-api-proxy` is set: +When API keys are present (or `--enable-api-proxy` is explicitly set): 1. Node.js API proxy starts at 172.30.0.30 2. API keys passed via environment variables 3. HTTP_PROXY/HTTPS_PROXY configured to route through Squid @@ -151,7 +164,7 @@ When `--enable-api-proxy` is set: ``` Agent Code - ↓ (makes HTTP request to api-proxy:10000) + ↓ (makes HTTP request to 172.30.0.30:10000) Node.js API Proxy ↓ (injects Authorization: Bearer $OPENAI_API_KEY) ↓ (routes via HTTP_PROXY to Squid) @@ -172,10 +185,14 @@ The Node.js proxy automatically adds: ### CLI Options ```bash -awf --enable-api-proxy [OPTIONS] -- COMMAND +awf [OPTIONS] -- COMMAND ``` -**Required environment variables** (at least one): +**API Proxy behavior** (enabled by default): +- Automatically deploys when `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` is present +- Use `--no-enable-api-proxy` to disable explicitly + +**Environment variables** (at least one needed for deployment): - `OPENAI_API_KEY` - OpenAI API key - `ANTHROPIC_API_KEY` - Anthropic API key diff --git a/scripts/ci/postprocess-smoke-workflows.ts b/scripts/ci/postprocess-smoke-workflows.ts index 8cf3a2df..1c885a1c 100644 --- a/scripts/ci/postprocess-smoke-workflows.ts +++ b/scripts/ci/postprocess-smoke-workflows.ts @@ -94,6 +94,11 @@ const shallowDepthRegex = /^(\s+)depth: 1\n/gm; // instead of pre-built GHCR images that may be stale. const imageTagRegex = /--image-tag\s+[0-9.]+\s+--skip-pull/g; +// Remove ANTHROPIC_API_KEY from agent environment (security: API key should only be in api-proxy sidecar) +// The API key is passed to the api-proxy container by awf CLI when --enable-api-proxy is set. +// Match the env key + value line with any indentation +const anthropicApiKeyRegex = /^(\s*)ANTHROPIC_API_KEY:\s+\$\{\{\s*secrets\.ANTHROPIC_API_KEY\s*\}\}\n/gm; + for (const workflowPath of workflowPaths) { let content = fs.readFileSync(workflowPath, 'utf-8'); let modified = false; @@ -139,6 +144,14 @@ for (const workflowPath of workflowPaths) { console.log(` Replaced ${imageTagMatches.length} --image-tag/--skip-pull with --build-local`); } + // Remove ANTHROPIC_API_KEY from agent environment (security issue: key should only be in api-proxy) + const apiKeyMatches = content.match(anthropicApiKeyRegex); + if (apiKeyMatches) { + content = content.replace(anthropicApiKeyRegex, ''); + modified = true; + console.log(` Removed ${apiKeyMatches.length} ANTHROPIC_API_KEY env var(s) from agent`); + } + if (modified) { fs.writeFileSync(workflowPath, content); console.log(`Updated ${workflowPath}`); diff --git a/src/cli-workflow.ts b/src/cli-workflow.ts index 20079872..d14edb45 100644 --- a/src/cli-workflow.ts +++ b/src/cli-workflow.ts @@ -2,7 +2,7 @@ import { WrapperConfig } from './types'; export interface WorkflowDependencies { ensureFirewallNetwork: () => Promise<{ squidIp: string; agentIp: string; proxyIp: string; subnet: string }>; - setupHostIptables: (squidIp: string, port: number, dnsServers: string[], apiProxyIp?: string) => Promise; + setupHostIptables: (squidIp: string, port: number, dnsServers: string[]) => Promise; writeConfigs: (config: WrapperConfig) => Promise; startContainers: (workDir: string, allowedDomains: string[], proxyLogsDir?: string, skipPull?: boolean) => Promise; runAgentCommand: ( @@ -43,7 +43,7 @@ export async function runMainWorkflow( logger.info('Setting up host-level firewall network and iptables rules...'); const networkConfig = await dependencies.ensureFirewallNetwork(); const dnsServers = config.dnsServers || ['8.8.8.8', '8.8.4.4']; - // API proxy (when enabled) does NOT get a firewall exemption - it routes through Squid + // API proxy routes through Squid via https-proxy-agent, no firewall exemption needed await dependencies.setupHostIptables(networkConfig.squidIp, 3128, dnsServers); onHostIptablesSetup?.(); diff --git a/src/cli.ts b/src/cli.ts index 60b6b8a8..8b5c42d6 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -718,10 +718,10 @@ program ) .option( '--enable-api-proxy', - 'Enable API proxy sidecar for holding authentication credentials.\n' + - ' Deploys a Node.js proxy that injects API keys securely.\n' + - ' Supports OpenAI (Codex) and Anthropic (Claude) APIs.', - false + 'Enable Kong API Gateway sidecar for holding authentication credentials.\n' + + ' Deploys Kong Gateway that injects API keys securely.\n' + + ' Currently supports OpenAI (Codex) API only.', + true ) .argument('[args...]', 'Command and arguments to execute (use -- to separate from options)') .action(async (args: string[], options) => { diff --git a/src/docker-manager.test.ts b/src/docker-manager.test.ts index 7b115021..7752bd59 100644 --- a/src/docker-manager.test.ts +++ b/src/docker-manager.test.ts @@ -1450,22 +1450,20 @@ describe('docker-manager', () => { expect((proxy.networks as any)['awf-net'].ipv4_address).toBe('172.30.0.30'); }); - it('should include api-proxy service when enableApiProxy is true with Anthropic key', () => { + it('should NOT include api-proxy service when only Anthropic key is provided', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, anthropicApiKey: 'sk-ant-test-key' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); - expect(result.services['api-proxy']).toBeDefined(); - const proxy = result.services['api-proxy']; - expect(proxy.container_name).toBe('awf-api-proxy'); + expect(result.services['api-proxy']).toBeUndefined(); }); - it('should include api-proxy service with both keys', () => { + it('should include api-proxy service with OpenAI key only (Anthropic key goes to agent)', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-openai-key', anthropicApiKey: 'sk-ant-test-key' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); expect(result.services['api-proxy']).toBeDefined(); const proxy = result.services['api-proxy']; const env = proxy.environment as Record; expect(env.OPENAI_API_KEY).toBe('sk-test-openai-key'); - expect(env.ANTHROPIC_API_KEY).toBe('sk-ant-test-key'); + expect(env.ANTHROPIC_API_KEY).toBeUndefined(); }); it('should only pass OpenAI key when only OpenAI key is provided', () => { @@ -1477,21 +1475,19 @@ describe('docker-manager', () => { expect(env.ANTHROPIC_API_KEY).toBeUndefined(); }); - it('should only pass Anthropic key when only Anthropic key is provided', () => { + it('should NOT deploy api-proxy when only Anthropic key is provided', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, anthropicApiKey: 'sk-ant-test-key' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); - const proxy = result.services['api-proxy']; - const env = proxy.environment as Record; - expect(env.ANTHROPIC_API_KEY).toBe('sk-ant-test-key'); - expect(env.OPENAI_API_KEY).toBeUndefined(); + expect(result.services['api-proxy']).toBeUndefined(); }); - it('should use GHCR image by default', () => { + it('should always build api-proxy locally (GHCR image not yet published)', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-key', buildLocal: false }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const proxy = result.services['api-proxy']; - expect(proxy.image).toBe('ghcr.io/github/gh-aw-firewall/api-proxy:latest'); - expect(proxy.build).toBeUndefined(); + expect(proxy.build).toBeDefined(); + expect((proxy.build as any).context).toContain('containers/api-proxy'); + expect(proxy.image).toBeUndefined(); }); it('should build locally when buildLocal is true', () => { @@ -1503,11 +1499,49 @@ describe('docker-manager', () => { expect(proxy.image).toBeUndefined(); }); - it('should use custom registry and tag', () => { + it('should always build api-proxy locally regardless of registry/tag settings', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-key', buildLocal: false, imageRegistry: 'my-registry.com', imageTag: 'v1.0.0' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const proxy = result.services['api-proxy']; - expect(proxy.image).toBe('my-registry.com/api-proxy:v1.0.0'); + expect(proxy.build).toBeDefined(); + expect((proxy.build as any).context).toContain('containers/api-proxy'); + expect(proxy.image).toBeUndefined(); + }); + + it('should add api-proxy to NO_PROXY so agent traffic bypasses Squid', () => { + const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-key' }; + const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); + const agent = result.services.agent; + const env = agent.environment as Record; + expect(env.NO_PROXY).toContain('127.0.0.1'); + expect(env.NO_PROXY).toContain('localhost'); + expect(env.NO_PROXY).toContain('api-proxy'); + expect(env.NO_PROXY).toContain('172.30.0.30'); + expect(env.NO_PROXY).toContain('172.30.0.0/16'); + expect(env.no_proxy).toBe(env.NO_PROXY); + }); + + it('should append api-proxy to existing NO_PROXY when host access is enabled', () => { + const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-key', enableHostAccess: true }; + const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); + const agent = result.services.agent; + const env = agent.environment as Record; + // Should contain both the host access NO_PROXY entries and api-proxy + expect(env.NO_PROXY).toContain('127.0.0.1'); + expect(env.NO_PROXY).toContain('localhost'); + expect(env.NO_PROXY).toContain('host.docker.internal'); + expect(env.NO_PROXY).toContain('api-proxy'); + expect(env.NO_PROXY).toContain('172.30.0.30'); + expect(env.NO_PROXY).toContain('172.30.0.0/16'); + expect(env.no_proxy).toBe(env.NO_PROXY); + }); + + it('should pass AWF_API_PROXY_IP to agent environment', () => { + const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-key' }; + const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); + const agent = result.services.agent; + const env = agent.environment as Record; + expect(env.AWF_API_PROXY_IP).toBe('172.30.0.30'); }); it('should configure healthcheck for api-proxy', () => { @@ -1515,7 +1549,7 @@ describe('docker-manager', () => { const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const proxy = result.services['api-proxy']; expect(proxy.healthcheck).toBeDefined(); - expect((proxy.healthcheck as any).test).toEqual(['CMD', 'curl', '-f', 'http://localhost:10000/health']); + expect((proxy.healthcheck as any).test).toEqual(['CMD', 'curl', '-f', 'http://localhost:8001/status']); }); it('should drop all capabilities', () => { @@ -1550,7 +1584,7 @@ describe('docker-manager', () => { const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const agent = result.services.agent; const env = agent.environment as Record; - expect(env.OPENAI_BASE_URL).toBe('http://api-proxy:10000'); + expect(env.OPENAI_BASE_URL).toBe('http://172.30.0.30:8000'); }); it('should configure HTTP_PROXY and HTTPS_PROXY in api-proxy to route through Squid', () => { @@ -1562,30 +1596,30 @@ describe('docker-manager', () => { expect(env.HTTPS_PROXY).toBe('http://172.30.0.10:3128'); }); - it('should set ANTHROPIC_BASE_URL in agent when Anthropic key is provided', () => { + it('should NOT set ANTHROPIC_BASE_URL in agent when Anthropic key is provided', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, anthropicApiKey: 'sk-ant-test-key' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const agent = result.services.agent; const env = agent.environment as Record; - expect(env.ANTHROPIC_BASE_URL).toBe('http://api-proxy:10001'); + expect(env.ANTHROPIC_BASE_URL).toBeUndefined(); }); - it('should set both BASE_URL variables when both keys are provided', () => { + it('should set only OPENAI_BASE_URL when both keys are provided', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, openaiApiKey: 'sk-test-openai-key', anthropicApiKey: 'sk-ant-test-key' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const agent = result.services.agent; const env = agent.environment as Record; - expect(env.OPENAI_BASE_URL).toBe('http://api-proxy:10000'); - expect(env.ANTHROPIC_BASE_URL).toBe('http://api-proxy:10001'); + expect(env.OPENAI_BASE_URL).toBe('http://172.30.0.30:8000'); + expect(env.ANTHROPIC_BASE_URL).toBeUndefined(); }); - it('should not set OPENAI_BASE_URL in agent when only Anthropic key is provided', () => { + it('should not set OPENAI_BASE_URL or ANTHROPIC_BASE_URL in agent when only Anthropic key is provided', () => { const configWithProxy = { ...mockConfig, enableApiProxy: true, anthropicApiKey: 'sk-ant-test-key' }; const result = generateDockerCompose(configWithProxy, mockNetworkConfigWithProxy); const agent = result.services.agent; const env = agent.environment as Record; expect(env.OPENAI_BASE_URL).toBeUndefined(); - expect(env.ANTHROPIC_BASE_URL).toBe('http://api-proxy:10001'); + expect(env.ANTHROPIC_BASE_URL).toBeUndefined(); }); it('should not set ANTHROPIC_BASE_URL in agent when only OpenAI key is provided', () => { @@ -1594,7 +1628,7 @@ describe('docker-manager', () => { const agent = result.services.agent; const env = agent.environment as Record; expect(env.ANTHROPIC_BASE_URL).toBeUndefined(); - expect(env.OPENAI_BASE_URL).toBe('http://api-proxy:10000'); + expect(env.OPENAI_BASE_URL).toBe('http://172.30.0.30:8000'); }); }); }); @@ -1854,6 +1888,58 @@ describe('docker-manager', () => { process.env.SUDO_USER = originalSudoUser; } }); + + it('should include api-proxy in allowed domains when enableApiProxy is true', async () => { + const config: WrapperConfig = { + allowedDomains: ['github.com'], + agentCommand: 'echo test', + logLevel: 'info', + keepContainers: false, + workDir: testDir, + enableApiProxy: true, + openaiApiKey: 'sk-test-key', + }; + + try { + await writeConfigs(config); + } catch { + // May fail after writing configs + } + + // Verify squid.conf includes api-proxy hostname and IP in allowed domains + const squidConfPath = path.join(testDir, 'squid.conf'); + if (fs.existsSync(squidConfPath)) { + const content = fs.readFileSync(squidConfPath, 'utf-8'); + expect(content).toContain('github.com'); + expect(content).toContain('api-proxy'); + expect(content).toContain('172.30.0.30'); // api-proxy IP address + } + }); + + it('should not include api-proxy in allowed domains when enableApiProxy is false', async () => { + const config: WrapperConfig = { + allowedDomains: ['github.com'], + agentCommand: 'echo test', + logLevel: 'info', + keepContainers: false, + workDir: testDir, + enableApiProxy: false, + }; + + try { + await writeConfigs(config); + } catch { + // May fail after writing configs + } + + // Verify squid.conf does not include api-proxy when disabled + const squidConfPath = path.join(testDir, 'squid.conf'); + if (fs.existsSync(squidConfPath)) { + const content = fs.readFileSync(squidConfPath, 'utf-8'); + expect(content).toContain('github.com'); + expect(content).not.toContain('api-proxy'); + } + }); }); describe('startContainers', () => { diff --git a/src/docker-manager.ts b/src/docker-manager.ts index 4e5bbe3e..4e60d845 100644 --- a/src/docker-manager.ts +++ b/src/docker-manager.ts @@ -320,6 +320,14 @@ export function generateDockerCompose( 'SUDO_GID', // Sudo metadata ]); + // When api-proxy is enabled, exclude OpenAI API key from agent environment + // The key is passed to the api-proxy sidecar only (not to the agent) + // Note: ANTHROPIC_API_KEY is NOT excluded - Claude uses it directly in the agent container + const willUseApiProxy = config.enableApiProxy && config.openaiApiKey; + if (willUseApiProxy) { + EXCLUDED_ENV_VARS.add('OPENAI_API_KEY'); + } + // Start with required/overridden environment variables // Use the real user's home (not /root when running with sudo) const homeDir = getRealUserHome(); @@ -390,7 +398,10 @@ export function generateDockerCompose( if (process.env.GH_TOKEN) environment.GH_TOKEN = process.env.GH_TOKEN; if (process.env.GITHUB_PERSONAL_ACCESS_TOKEN) environment.GITHUB_PERSONAL_ACCESS_TOKEN = process.env.GITHUB_PERSONAL_ACCESS_TOKEN; // Anthropic API key for Claude Code - if (process.env.ANTHROPIC_API_KEY) environment.ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY; + // Claude always uses the key directly in the agent container (not via api-proxy) + if (process.env.ANTHROPIC_API_KEY) { + environment.ANTHROPIC_API_KEY = process.env.ANTHROPIC_API_KEY; + } if (process.env.USER) environment.USER = process.env.USER; if (process.env.TERM) environment.TERM = process.env.TERM; if (process.env.XDG_CONFIG_HOME) environment.XDG_CONFIG_HOME = process.env.XDG_CONFIG_HOME; @@ -844,6 +855,12 @@ export function generateDockerCompose( environment.AWF_ENABLE_HOST_ACCESS = '1'; } + // Pass API proxy flag to agent for iptables configuration + // Only set when api-proxy will actually be deployed (i.e., at least one API key is provided) + if (config.enableApiProxy && networkConfig.proxyIp && (config.openaiApiKey || config.anthropicApiKey)) { + environment.AWF_ENABLE_API_PROXY = '1'; + } + // Use GHCR image or build locally // Priority: GHCR preset images > local build (when requested) > custom images // For presets ('default', 'act'), use GHCR images @@ -898,8 +915,9 @@ export function generateDockerCompose( 'agent': agentService, }; - // Add Node.js API proxy sidecar if enabled - if (config.enableApiProxy && networkConfig.proxyIp) { + // Add Kong API Gateway sidecar if enabled and OpenAI API key is provided + // The api-proxy service is only used for OpenAI/Codex (Claude uses ANTHROPIC_API_KEY directly in agent) + if (config.enableApiProxy && networkConfig.proxyIp && config.openaiApiKey) { const proxyService: any = { container_name: 'awf-api-proxy', networks: { @@ -908,19 +926,18 @@ export function generateDockerCompose( }, }, environment: { - // Pass API keys securely to sidecar (not visible to agent) - ...(config.openaiApiKey && { OPENAI_API_KEY: config.openaiApiKey }), - ...(config.anthropicApiKey && { ANTHROPIC_API_KEY: config.anthropicApiKey }), + // Pass OpenAI API key securely to sidecar (not visible to agent) + OPENAI_API_KEY: config.openaiApiKey, // Route through Squid to respect domain whitelisting HTTP_PROXY: `http://${networkConfig.squidIp}:${SQUID_PORT}`, HTTPS_PROXY: `http://${networkConfig.squidIp}:${SQUID_PORT}`, }, healthcheck: { - test: ['CMD', 'curl', '-f', 'http://localhost:10000/health'], + test: ['CMD', 'curl', '-f', 'http://localhost:8001/status'], interval: '5s', timeout: '3s', retries: 5, - start_period: '5s', + start_period: '10s', }, // Security hardening: Drop all capabilities cap_drop: ['ALL'], @@ -934,15 +951,12 @@ export function generateDockerCompose( cpu_shares: 512, }; - // Use GHCR image or build locally - if (useGHCR) { - proxyService.image = `${registry}/api-proxy:${tag}`; - } else { - proxyService.build = { - context: path.join(projectRoot, 'containers/api-proxy'), - dockerfile: 'Dockerfile', - }; - } + // Always build api-proxy locally since it's not published to GHCR yet + // TODO: Once api-proxy image is published to GHCR, change this to use useGHCR like other containers + proxyService.build = { + context: path.join(projectRoot, 'containers/api-proxy'), + dockerfile: 'Dockerfile', + }; services['api-proxy'] = proxyService; @@ -951,18 +965,33 @@ export function generateDockerCompose( condition: 'service_healthy', }; - // Set environment variables in agent to use the proxy - if (config.openaiApiKey) { - environment.OPENAI_BASE_URL = `http://api-proxy:10000`; - logger.debug('OpenAI API will be proxied through sidecar at http://api-proxy:10000'); + // Add api-proxy to NO_PROXY so agent traffic goes directly to the sidecar + // instead of routing through Squid (which would block the "api-proxy" hostname) + // Include localhost, the specific IP, and the network CIDR to ensure all tools can bypass Squid + const proxyNoProxy = `127.0.0.1,localhost,${networkConfig.proxyIp},172.30.0.0/16,api-proxy`; + if (environment.NO_PROXY) { + environment.NO_PROXY += `,${proxyNoProxy}`; + environment.no_proxy = environment.NO_PROXY; + } else { + environment.NO_PROXY = proxyNoProxy; + environment.no_proxy = proxyNoProxy; } - if (config.anthropicApiKey) { - environment.ANTHROPIC_BASE_URL = `http://api-proxy:10001`; - logger.debug('Anthropic API will be proxied through sidecar at http://api-proxy:10001'); + + // Pass api-proxy IP to iptables setup so it can allow direct traffic + // Without this, the final DROP rule in setup-iptables.sh blocks ports 8000/8001 + environment.AWF_API_PROXY_IP = networkConfig.proxyIp; + + // Set environment variables in agent to use the Kong Gateway proxy + // Use IP address instead of hostname to avoid DNS resolution issues + // Kong listens on port 8000 for proxy traffic + // Note: ANTHROPIC_BASE_URL is NOT set - Claude uses ANTHROPIC_API_KEY directly + if (config.openaiApiKey) { + environment.OPENAI_BASE_URL = `http://${networkConfig.proxyIp}:8000`; + logger.debug(`OpenAI API will be proxied through Kong Gateway at http://${networkConfig.proxyIp}:8000`); } - logger.info('API proxy sidecar enabled - API keys will be held securely in sidecar container'); - logger.info('API proxy will route through Squid to respect domain whitelisting'); + logger.info('Kong API Gateway enabled for OpenAI/Codex - key will be held securely in Kong container'); + logger.info('Kong will route through Squid to respect domain whitelisting'); } return { @@ -1119,8 +1148,21 @@ export async function writeConfigs(config: WrapperConfig): Promise { // Write Squid config // Note: Use container path for SSL database since it's mounted at /var/spool/squid_ssl_db + // When API proxy is enabled and has API keys, add api-proxy hostname and IP to allowed domains/IPs so agent can communicate with it + // The IP address (172.30.0.30) must be separate from domains because Squid uses different ACL types: + // - domains use 'dstdomain' ACL (for DNS names like 'api-proxy') + // - IP addresses use 'dst' ACL (for IPs like '172.30.0.30') + const shouldAddApiProxyAccess = config.enableApiProxy && networkConfig.proxyIp && config.openaiApiKey; + const domainsForSquid = shouldAddApiProxyAccess + ? [...config.allowedDomains, 'api-proxy'] + : config.allowedDomains; + const ipsForSquid = shouldAddApiProxyAccess && networkConfig.proxyIp + ? [networkConfig.proxyIp] + : undefined; + const squidConfig = generateSquidConfig({ - domains: config.allowedDomains, + domains: domainsForSquid, + allowedIPs: ipsForSquid, blockedDomains: config.blockedDomains, port: SQUID_PORT, sslBump: config.sslBump, @@ -1129,6 +1171,7 @@ export async function writeConfigs(config: WrapperConfig): Promise { urlPatterns, enableHostAccess: config.enableHostAccess, allowHostPorts: config.allowHostPorts, + enableApiProxy: config.enableApiProxy, }); const squidConfigPath = path.join(config.workDir, 'squid.conf'); fs.writeFileSync(squidConfigPath, squidConfig, { mode: 0o600 }); @@ -1141,6 +1184,19 @@ export async function writeConfigs(config: WrapperConfig): Promise { const dockerComposePath = path.join(config.workDir, 'docker-compose.yml'); fs.writeFileSync(dockerComposePath, yaml.dump(dockerCompose), { mode: 0o600 }); logger.debug(`Docker Compose config written to: ${dockerComposePath}`); + + // Log BASE_URL environment variables for debugging + const agentEnv = dockerCompose.services['awf-agent']?.environment || {}; + if (agentEnv.ANTHROPIC_BASE_URL) { + logger.info(`Agent ANTHROPIC_BASE_URL set to: ${agentEnv.ANTHROPIC_BASE_URL}`); + } else { + logger.info('Agent ANTHROPIC_BASE_URL: not set (using default)'); + } + if (agentEnv.OPENAI_BASE_URL) { + logger.info(`Agent OPENAI_BASE_URL set to: ${agentEnv.OPENAI_BASE_URL}`); + } else { + logger.info('Agent OPENAI_BASE_URL: not set (using default)'); + } } /** diff --git a/src/host-iptables.ts b/src/host-iptables.ts index 30ad419d..abb154d5 100644 --- a/src/host-iptables.ts +++ b/src/host-iptables.ts @@ -247,9 +247,10 @@ export async function setupHostIptables(squidIp: string, squidPort: number, dnsS '-j', 'ACCEPT', ]); - // Note: API proxy sidecar (when enabled) does NOT get a firewall exemption. - // It routes through Squid via HTTP_PROXY/HTTPS_PROXY environment variables, - // ensuring domain whitelisting is enforced by Squid ACLs. + // Note: API proxy sidecar does NOT get a firewall exemption. + // It routes through Squid via https-proxy-agent, ensuring domain whitelisting + // is enforced by Squid ACLs. The api-proxy only needs to reach Squid (allowed + // by the Squid proxy rule below) for its outbound HTTPS connections. // 2. Allow established and related connections (return traffic) await execa('iptables', [ diff --git a/src/squid-config.test.ts b/src/squid-config.test.ts index 19a4c3c6..f3379e10 100644 --- a/src/squid-config.test.ts +++ b/src/squid-config.test.ts @@ -129,6 +129,46 @@ describe('generateSquidConfig', () => { }); }); + describe('Bare Hostname Handling', () => { + it('should handle bare hostnames without adding leading dot', () => { + // Bare hostnames (no dots) like Docker container names should not get a leading dot + // because they have no subdomains to match + const config: SquidConfig = { + domains: ['api-proxy', 'localhost'], + port: defaultPort, + }; + const result = generateSquidConfig(config); + expect(result).toContain('acl allowed_domains dstdomain api-proxy'); + expect(result).toContain('acl allowed_domains dstdomain localhost'); + expect(result).not.toContain('.api-proxy'); + expect(result).not.toContain('.localhost'); + }); + + it('should handle mixed bare hostnames and FQDNs', () => { + const config: SquidConfig = { + domains: ['api-proxy', 'github.com', 'localhost', 'example.org'], + port: defaultPort, + }; + const result = generateSquidConfig(config); + // Bare hostnames without leading dot + expect(result).toContain('acl allowed_domains dstdomain api-proxy'); + expect(result).toContain('acl allowed_domains dstdomain localhost'); + // FQDNs with leading dot + expect(result).toContain('acl allowed_domains dstdomain .github.com'); + expect(result).toContain('acl allowed_domains dstdomain .example.org'); + }); + + it('should handle bare hostnames with protocol prefixes', () => { + const config: SquidConfig = { + domains: ['http://api-proxy'], + port: defaultPort, + }; + const result = generateSquidConfig(config); + expect(result).toContain('acl allowed_http_only dstdomain api-proxy'); + expect(result).not.toContain('.api-proxy'); + }); + }); + describe('Redundant Subdomain Removal', () => { it('should remove subdomain when parent domain is present', () => { const config: SquidConfig = { @@ -293,12 +333,14 @@ describe('generateSquidConfig', () => { }); it('should handle TLD-only domain (edge case)', () => { + // TLD-only (e.g., 'com') is a bare hostname with no dots, so no leading dot const config: SquidConfig = { domains: ['com'], port: defaultPort, }; const result = generateSquidConfig(config); - expect(result).toContain('acl allowed_domains dstdomain .com'); + expect(result).toContain('acl allowed_domains dstdomain com'); + expect(result).not.toContain('.com'); }); }); @@ -1494,3 +1536,128 @@ describe('Empty Domain List', () => { expect(result).not.toContain('acl allowed_https_only'); }); }); + +describe('Kong Gateway Port Configuration', () => { + it('should add ports 8000 and 8001 to Safe_ports when enableApiProxy is true', () => { + const config: SquidConfig = { + domains: ['github.com'], + port: 3128, + enableApiProxy: true, + }; + const result = generateSquidConfig(config); + expect(result).toContain('acl Safe_ports port 8000 # Kong Gateway - OpenAI proxy'); + expect(result).toContain('acl Safe_ports port 8001 # Kong Gateway - Admin API'); + }); + + it('should NOT add ports 8000 and 8001 when enableApiProxy is false', () => { + const config: SquidConfig = { + domains: ['github.com'], + port: 3128, + enableApiProxy: false, + }; + const result = generateSquidConfig(config); + expect(result).not.toContain('acl Safe_ports port 8000'); + expect(result).not.toContain('acl Safe_ports port 8001'); + }); + + it('should NOT add ports 8000 and 8001 when enableApiProxy is undefined', () => { + const config: SquidConfig = { + domains: ['github.com'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).not.toContain('acl Safe_ports port 8000'); + expect(result).not.toContain('acl Safe_ports port 8001'); + }); + + it('should add Kong Gateway ports along with user-specified ports', () => { + const config: SquidConfig = { + domains: ['github.com'], + port: 3128, + enableApiProxy: true, + enableHostAccess: true, + allowHostPorts: '3000,8080', + }; + const result = generateSquidConfig(config); + expect(result).toContain('acl Safe_ports port 8000 # Kong Gateway - OpenAI proxy'); + expect(result).toContain('acl Safe_ports port 8001 # Kong Gateway - Admin API'); + expect(result).toContain('acl Safe_ports port 3000 # User-specified via --allow-host-ports'); + expect(result).toContain('acl Safe_ports port 8080 # User-specified via --allow-host-ports'); + }); +}); + +describe('IP Address ACL Support', () => { + it('should generate dst ACL for IP addresses', () => { + const config: SquidConfig = { + domains: ['github.com'], + allowedIPs: ['172.30.0.30', '10.0.0.5'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).toContain('# ACL definitions for allowed IP addresses (HTTP and HTTPS)'); + expect(result).toContain('acl allowed_ips dst 172.30.0.30'); + expect(result).toContain('acl allowed_ips dst 10.0.0.5'); + }); + + it('should include IPs in deny rule with domains', () => { + const config: SquidConfig = { + domains: ['github.com'], + allowedIPs: ['172.30.0.30'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).toContain('http_access deny !allowed_domains !allowed_ips'); + }); + + it('should include IPs in deny rule with patterns', () => { + const config: SquidConfig = { + domains: ['*.github.com'], + allowedIPs: ['172.30.0.30'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).toContain('http_access deny !allowed_domains_regex !allowed_ips'); + }); + + it('should include IPs in deny rule with both domains and patterns', () => { + const config: SquidConfig = { + domains: ['github.com', '*.example.com'], + allowedIPs: ['172.30.0.30'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).toContain('http_access deny !allowed_domains !allowed_domains_regex !allowed_ips'); + }); + + it('should work with only IPs (no domains)', () => { + const config: SquidConfig = { + domains: [], + allowedIPs: ['172.30.0.30', '10.0.0.5'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).toContain('acl allowed_ips dst 172.30.0.30'); + expect(result).toContain('acl allowed_ips dst 10.0.0.5'); + expect(result).toContain('http_access deny !allowed_ips'); + expect(result).not.toContain('allowed_domains'); + }); + + it('should not generate IP ACLs when allowedIPs is undefined', () => { + const config: SquidConfig = { + domains: ['github.com'], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).not.toContain('allowed_ips'); + }); + + it('should not generate IP ACLs when allowedIPs is empty array', () => { + const config: SquidConfig = { + domains: ['github.com'], + allowedIPs: [], + port: 3128, + }; + const result = generateSquidConfig(config); + expect(result).not.toContain('allowed_ips'); + }); +}); diff --git a/src/squid-config.ts b/src/squid-config.ts index 5e1478d6..1c7e03c0 100644 --- a/src/squid-config.ts +++ b/src/squid-config.ts @@ -53,10 +53,31 @@ interface PatternsByProtocol { } /** - * Helper to add leading dot to domain for Squid subdomain matching + * Helper to format domain for Squid ACL matching + * + * For fully qualified domains (containing dots), adds a leading dot to enable + * subdomain matching (e.g., .github.com matches both github.com and api.github.com). + * + * For bare hostnames (no dots, like Docker container names), returns as-is without + * a leading dot since bare hostnames have no subdomains to match. + * + * @param domain - Domain or hostname to format + * @returns Formatted string for Squid dstdomain ACL */ function formatDomainForSquid(domain: string): string { - return domain.startsWith('.') ? domain : `.${domain}`; + // Already has leading dot - return as-is + if (domain.startsWith('.')) { + return domain; + } + + // Bare hostname (no dots) - return as-is (e.g., 'api-proxy', 'localhost') + // These are typically Docker container names or single-word hostnames + if (!domain.includes('.')) { + return domain; + } + + // Fully qualified domain - add leading dot for subdomain matching + return `.${domain}`; } /** @@ -205,7 +226,7 @@ ${urlAclSection}${urlAccessRules}`; * // Blocked: internal.example.com -> acl blocked_domains dstdomain .internal.example.com */ export function generateSquidConfig(config: SquidConfig): string { - const { domains, blockedDomains, port, sslBump, caFiles, sslDbPath, urlPatterns, enableHostAccess, allowHostPorts } = config; + const { domains, blockedDomains, port, sslBump, caFiles, sslDbPath, urlPatterns, enableHostAccess, allowHostPorts, enableApiProxy, allowedIPs } = config; // Parse domains into plain domains and wildcard patterns // Note: parseDomainList extracts and preserves protocol info from prefixes (http://, https://) @@ -293,6 +314,17 @@ export function generateSquidConfig(config: SquidConfig): string { } } + // === IP ADDRESSES (BOTH PROTOCOLS) === + // IP addresses must use 'dst' ACL type, not 'dstdomain' + // This is required for api-proxy and other container-to-container communication + if (allowedIPs && allowedIPs.length > 0) { + aclLines.push(''); + aclLines.push('# ACL definitions for allowed IP addresses (HTTP and HTTPS)'); + for (const ip of allowedIPs) { + aclLines.push(`acl allowed_ips dst ${ip}`); + } + } + // Build access rules // Order matters: allow rules come before deny rules @@ -325,6 +357,7 @@ export function generateSquidConfig(config: SquidConfig): string { // Build the deny rule based on configured domains and their protocols const hasBothDomains = domainsByProto.both.length > 0; const hasBothPatterns = patternsByProto.both.length > 0; + const hasAllowedIPs = allowedIPs && allowedIPs.length > 0; // Process blocked domains (optional) - blocklist takes precedence over allowlist const blockedAclLines: string[] = []; @@ -361,12 +394,20 @@ export function generateSquidConfig(config: SquidConfig): string { // Build the deny rule based on configured domains and their protocols let denyRule: string; - if (hasBothDomains && hasBothPatterns) { + if (hasBothDomains && hasBothPatterns && hasAllowedIPs) { + denyRule = 'http_access deny !allowed_domains !allowed_domains_regex !allowed_ips'; + } else if (hasBothDomains && hasBothPatterns) { denyRule = 'http_access deny !allowed_domains !allowed_domains_regex'; + } else if (hasBothDomains && hasAllowedIPs) { + denyRule = 'http_access deny !allowed_domains !allowed_ips'; + } else if (hasBothPatterns && hasAllowedIPs) { + denyRule = 'http_access deny !allowed_domains_regex !allowed_ips'; } else if (hasBothDomains) { denyRule = 'http_access deny !allowed_domains'; } else if (hasBothPatterns) { denyRule = 'http_access deny !allowed_domains_regex'; + } else if (hasAllowedIPs) { + denyRule = 'http_access deny !allowed_ips'; } else if (hasHttpOnly || hasHttpsOnly) { // Only protocol-specific domains - deny all by default // The allow rules above will permit the specific traffic @@ -437,6 +478,12 @@ acl SSL_ports port 443 acl Safe_ports port 80 # HTTP acl Safe_ports port 443 # HTTPS`; + // Add Kong Gateway ports when enabled (port 8000 for OpenAI proxy, 8001 for admin API) + if (enableApiProxy) { + portAclsSection += `\nacl Safe_ports port 8000 # Kong Gateway - OpenAI proxy`; + portAclsSection += `\nacl Safe_ports port 8001 # Kong Gateway - Admin API`; + } + // Add user-specified ports if --allow-host-ports was provided if (enableHostAccess && allowHostPorts) { // Parse comma-separated ports/ranges and add to ACL diff --git a/src/types.ts b/src/types.ts index bf73cbbc..6299cb6a 100644 --- a/src/types.ts +++ b/src/types.ts @@ -384,26 +384,26 @@ export interface WrapperConfig { /** * Enable API proxy sidecar for holding authentication credentials * - * When true, deploys a Node.js proxy sidecar container that: - * - Holds OpenAI and Anthropic API keys securely + * When true, deploys a Kong API Gateway sidecar container that: + * - Holds OpenAI API key securely * - Automatically injects authentication headers * - Routes all traffic through Squid to respect domain whitelisting * - Proxies requests to LLM providers * - * The sidecar exposes two endpoints accessible from the agent container: - * - http://api-proxy:10000 - OpenAI API proxy (for Codex) - * - http://api-proxy:10001 - Anthropic API proxy (for Claude) + * The sidecar exposes Kong Gateway accessible from the agent container: + * - http://172.30.0.30:8000 - OpenAI API proxy (for Codex) * - * When the corresponding API key is provided, the following environment - * variables are set in the agent container: - * - OPENAI_BASE_URL=http://api-proxy:10000 (set when OPENAI_API_KEY is provided) - * - ANTHROPIC_BASE_URL=http://api-proxy:10001 (set when ANTHROPIC_API_KEY is provided) + * When OPENAI_API_KEY is provided, the following environment variable is set: + * - OPENAI_BASE_URL=http://172.30.0.30:8000 + * + * Note: Anthropic/Claude API key (ANTHROPIC_API_KEY) is passed directly to the + * agent container and does not use the api-proxy sidecar. * * API keys are passed via environment variables: - * - OPENAI_API_KEY - Optional OpenAI API key for Codex - * - ANTHROPIC_API_KEY - Optional Anthropic API key for Claude + * - OPENAI_API_KEY - Optional OpenAI API key for Codex (passed to Kong) + * - ANTHROPIC_API_KEY - Optional Anthropic API key for Claude (passed to agent) * - * @default false + * @default true * @example * ```bash * # Enable API proxy with keys from environment @@ -546,6 +546,32 @@ export interface SquidConfig { * @example "3000-3010,8000-8090" */ allowHostPorts?: string; + + /** + * Whether Kong API Gateway sidecar is enabled + * + * When true, adds ports 8000 (OpenAI proxy) and 8001 (Kong admin) to Safe_ports ACL + * to allow traffic to the Kong Gateway sidecar container. + * + * @default false + */ + enableApiProxy?: boolean; + + /** + * List of IP addresses to allow (in addition to domains) + * + * IP addresses must be specified separately from domains because + * Squid uses different ACL types: + * - domains use 'dstdomain' ACL type + * - IP addresses use 'dst' ACL type + * + * This is typically used for: + * - api-proxy sidecar (172.30.0.30) + * - Other container-to-container communication + * + * @example ['172.30.0.30', '10.0.0.5'] + */ + allowedIPs?: string[]; } /**