mirror of
https://github.com/anthropics/claude-code-action.git
synced 2026-01-23 23:14:13 +08:00
Compare commits
2 Commits
v1.0.19
...
claude/sla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b86acc4a28 | ||
|
|
8a85f207b9 |
@@ -17,7 +17,6 @@ TASK OVERVIEW:
|
||||
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
|
||||
|
||||
2. Next, use gh commands to get context about the issue:
|
||||
|
||||
- Use `gh issue view ${{ github.event.issue.number }}` to retrieve the current issue's details
|
||||
- Use `gh search issues` to find similar issues that might provide context for proper categorization
|
||||
- You have access to these Bash commands:
|
||||
@@ -27,7 +26,6 @@ TASK OVERVIEW:
|
||||
- Bash(gh search:\*) - to search for similar issues
|
||||
|
||||
3. Analyze the issue content, considering:
|
||||
|
||||
- The issue title and description
|
||||
- The type of issue (bug report, feature request, question, etc.)
|
||||
- Technical areas mentioned
|
||||
@@ -36,7 +34,6 @@ TASK OVERVIEW:
|
||||
- Components affected
|
||||
|
||||
4. Select appropriate labels from the available labels list provided above:
|
||||
|
||||
- Choose labels that accurately reflect the issue's nature
|
||||
- Be specific but comprehensive
|
||||
- IMPORTANT: Add a priority label (P1, P2, or P3) based on the label descriptions from gh label list
|
||||
|
||||
2
.github/workflows/claude-review.yml
vendored
2
.github/workflows/claude-review.yml
vendored
@@ -2,7 +2,7 @@ name: PR Review
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened]
|
||||
types: [opened, synchronize, ready_for_review, reopened]
|
||||
|
||||
jobs:
|
||||
review:
|
||||
|
||||
307
.github/workflows/test-structured-output.yml
vendored
307
.github/workflows/test-structured-output.yml
vendored
@@ -1,307 +0,0 @@
|
||||
name: Test Structured Outputs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-basic-types:
|
||||
name: Test Basic Type Conversions
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Test with explicit values
|
||||
id: test
|
||||
uses: ./base-action
|
||||
with:
|
||||
prompt: |
|
||||
Run this command: echo "test"
|
||||
|
||||
Then return EXACTLY these values:
|
||||
- text_field: "hello"
|
||||
- number_field: 42
|
||||
- boolean_true: true
|
||||
- boolean_false: false
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools Bash
|
||||
--json-schema '{"type":"object","properties":{"text_field":{"type":"string"},"number_field":{"type":"number"},"boolean_true":{"type":"boolean"},"boolean_false":{"type":"boolean"}},"required":["text_field","number_field","boolean_true","boolean_false"]}'
|
||||
|
||||
- name: Verify outputs
|
||||
run: |
|
||||
# Parse the structured_output JSON
|
||||
OUTPUT='${{ steps.test.outputs.structured_output }}'
|
||||
|
||||
# Test string pass-through
|
||||
TEXT_FIELD=$(echo "$OUTPUT" | jq -r '.text_field')
|
||||
if [ "$TEXT_FIELD" != "hello" ]; then
|
||||
echo "❌ String: expected 'hello', got '$TEXT_FIELD'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test number → string conversion
|
||||
NUMBER_FIELD=$(echo "$OUTPUT" | jq -r '.number_field')
|
||||
if [ "$NUMBER_FIELD" != "42" ]; then
|
||||
echo "❌ Number: expected '42', got '$NUMBER_FIELD'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test boolean → "true" conversion
|
||||
BOOLEAN_TRUE=$(echo "$OUTPUT" | jq -r '.boolean_true')
|
||||
if [ "$BOOLEAN_TRUE" != "true" ]; then
|
||||
echo "❌ Boolean true: expected 'true', got '$BOOLEAN_TRUE'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Test boolean → "false" conversion
|
||||
BOOLEAN_FALSE=$(echo "$OUTPUT" | jq -r '.boolean_false')
|
||||
if [ "$BOOLEAN_FALSE" != "false" ]; then
|
||||
echo "❌ Boolean false: expected 'false', got '$BOOLEAN_FALSE'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ All basic type conversions correct"
|
||||
|
||||
test-complex-types:
|
||||
name: Test Arrays and Objects
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Test complex types
|
||||
id: test
|
||||
uses: ./base-action
|
||||
with:
|
||||
prompt: |
|
||||
Run: echo "ready"
|
||||
|
||||
Return EXACTLY:
|
||||
- items: ["apple", "banana", "cherry"]
|
||||
- config: {"key": "value", "count": 3}
|
||||
- empty_array: []
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools Bash
|
||||
--json-schema '{"type":"object","properties":{"items":{"type":"array","items":{"type":"string"}},"config":{"type":"object"},"empty_array":{"type":"array"}},"required":["items","config","empty_array"]}'
|
||||
|
||||
- name: Verify JSON stringification
|
||||
run: |
|
||||
# Parse the structured_output JSON
|
||||
OUTPUT='${{ steps.test.outputs.structured_output }}'
|
||||
|
||||
# Arrays should be JSON stringified
|
||||
if ! echo "$OUTPUT" | jq -e '.items | length == 3' > /dev/null; then
|
||||
echo "❌ Array not properly formatted"
|
||||
echo "$OUTPUT" | jq '.items'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Objects should be JSON stringified
|
||||
if ! echo "$OUTPUT" | jq -e '.config.key == "value"' > /dev/null; then
|
||||
echo "❌ Object not properly formatted"
|
||||
echo "$OUTPUT" | jq '.config'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Empty arrays should work
|
||||
if ! echo "$OUTPUT" | jq -e '.empty_array | length == 0' > /dev/null; then
|
||||
echo "❌ Empty array not properly formatted"
|
||||
echo "$OUTPUT" | jq '.empty_array'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ All complex types handled correctly"
|
||||
|
||||
test-edge-cases:
|
||||
name: Test Edge Cases
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Test edge cases
|
||||
id: test
|
||||
uses: ./base-action
|
||||
with:
|
||||
prompt: |
|
||||
Run: echo "test"
|
||||
|
||||
Return EXACTLY:
|
||||
- zero: 0
|
||||
- empty_string: ""
|
||||
- negative: -5
|
||||
- decimal: 3.14
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools Bash
|
||||
--json-schema '{"type":"object","properties":{"zero":{"type":"number"},"empty_string":{"type":"string"},"negative":{"type":"number"},"decimal":{"type":"number"}},"required":["zero","empty_string","negative","decimal"]}'
|
||||
|
||||
- name: Verify edge cases
|
||||
run: |
|
||||
# Parse the structured_output JSON
|
||||
OUTPUT='${{ steps.test.outputs.structured_output }}'
|
||||
|
||||
# Zero should be "0", not empty or falsy
|
||||
ZERO=$(echo "$OUTPUT" | jq -r '.zero')
|
||||
if [ "$ZERO" != "0" ]; then
|
||||
echo "❌ Zero: expected '0', got '$ZERO'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Empty string should be empty (not "null" or missing)
|
||||
EMPTY_STRING=$(echo "$OUTPUT" | jq -r '.empty_string')
|
||||
if [ "$EMPTY_STRING" != "" ]; then
|
||||
echo "❌ Empty string: expected '', got '$EMPTY_STRING'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Negative numbers should work
|
||||
NEGATIVE=$(echo "$OUTPUT" | jq -r '.negative')
|
||||
if [ "$NEGATIVE" != "-5" ]; then
|
||||
echo "❌ Negative: expected '-5', got '$NEGATIVE'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Decimals should preserve precision
|
||||
DECIMAL=$(echo "$OUTPUT" | jq -r '.decimal')
|
||||
if [ "$DECIMAL" != "3.14" ]; then
|
||||
echo "❌ Decimal: expected '3.14', got '$DECIMAL'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ All edge cases handled correctly"
|
||||
|
||||
test-name-sanitization:
|
||||
name: Test Output Name Sanitization
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Test special characters in field names
|
||||
id: test
|
||||
uses: ./base-action
|
||||
with:
|
||||
prompt: |
|
||||
Run: echo "test"
|
||||
Return EXACTLY: {test-result: "passed", item_count: 10}
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools Bash
|
||||
--json-schema '{"type":"object","properties":{"test-result":{"type":"string"},"item_count":{"type":"number"}},"required":["test-result","item_count"]}'
|
||||
|
||||
- name: Verify sanitized names work
|
||||
run: |
|
||||
# Parse the structured_output JSON
|
||||
OUTPUT='${{ steps.test.outputs.structured_output }}'
|
||||
|
||||
# Hyphens should be preserved in the JSON
|
||||
TEST_RESULT=$(echo "$OUTPUT" | jq -r '.["test-result"]')
|
||||
if [ "$TEST_RESULT" != "passed" ]; then
|
||||
echo "❌ Hyphenated name failed: expected 'passed', got '$TEST_RESULT'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Underscores should work
|
||||
ITEM_COUNT=$(echo "$OUTPUT" | jq -r '.item_count')
|
||||
if [ "$ITEM_COUNT" != "10" ]; then
|
||||
echo "❌ Underscore name failed: expected '10', got '$ITEM_COUNT'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Name sanitization works"
|
||||
|
||||
test-execution-file-structure:
|
||||
name: Test Execution File Format
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Run with structured output
|
||||
id: test
|
||||
uses: ./base-action
|
||||
with:
|
||||
prompt: "Run: echo 'complete'. Return: {done: true}"
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools Bash
|
||||
--json-schema '{"type":"object","properties":{"done":{"type":"boolean"}},"required":["done"]}'
|
||||
|
||||
- name: Verify execution file contains structured_output
|
||||
run: |
|
||||
FILE="${{ steps.test.outputs.execution_file }}"
|
||||
|
||||
# Check file exists
|
||||
if [ ! -f "$FILE" ]; then
|
||||
echo "❌ Execution file missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for structured_output field
|
||||
if ! jq -e '.[] | select(.type == "result") | .structured_output' "$FILE" > /dev/null; then
|
||||
echo "❌ No structured_output in execution file"
|
||||
cat "$FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify the actual value
|
||||
DONE=$(jq -r '.[] | select(.type == "result") | .structured_output.done' "$FILE")
|
||||
if [ "$DONE" != "true" ]; then
|
||||
echo "❌ Wrong value in execution file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Execution file format correct"
|
||||
|
||||
test-summary:
|
||||
name: Summary
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- test-basic-types
|
||||
- test-complex-types
|
||||
- test-edge-cases
|
||||
- test-name-sanitization
|
||||
- test-execution-file-structure
|
||||
if: always()
|
||||
steps:
|
||||
- name: Generate Summary
|
||||
run: |
|
||||
echo "# Structured Output Tests (Optimized)" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Fast, deterministic tests using explicit prompts" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Test | Result |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|------|--------|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Basic Types | ${{ needs.test-basic-types.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Complex Types | ${{ needs.test-complex-types.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Edge Cases | ${{ needs.test-edge-cases.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Name Sanitization | ${{ needs.test-name-sanitization.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
|
||||
echo "| Execution File | ${{ needs.test-execution-file-structure.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Check if all passed
|
||||
ALL_PASSED=${{
|
||||
needs.test-basic-types.result == 'success' &&
|
||||
needs.test-complex-types.result == 'success' &&
|
||||
needs.test-edge-cases.result == 'success' &&
|
||||
needs.test-name-sanitization.result == 'success' &&
|
||||
needs.test-execution-file-structure.result == 'success'
|
||||
}}
|
||||
|
||||
if [ "$ALL_PASSED" = "true" ]; then
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "## ✅ All Tests Passed" >> $GITHUB_STEP_SUMMARY
|
||||
else
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "## ❌ Some Tests Failed" >> $GITHUB_STEP_SUMMARY
|
||||
exit 1
|
||||
fi
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# Claude Code Action
|
||||
|
||||
A general-purpose [Claude Code](https://claude.ai/code) action for GitHub PRs and issues that can answer questions and implement code changes. This action intelligently detects when to activate based on your workflow context—whether responding to @claude mentions, issue assignments, or executing automation tasks with explicit prompts. It supports multiple authentication methods including Anthropic direct API, Amazon Bedrock, Google Vertex AI, and Microsoft Foundry.
|
||||
A general-purpose [Claude Code](https://claude.ai/code) action for GitHub PRs and issues that can answer questions and implement code changes. This action intelligently detects when to activate based on your workflow context—whether responding to @claude mentions, issue assignments, or executing automation tasks with explicit prompts. It supports multiple authentication methods including Anthropic direct API, Amazon Bedrock, and Google Vertex AI.
|
||||
|
||||
## Features
|
||||
|
||||
@@ -13,7 +13,6 @@ A general-purpose [Claude Code](https://claude.ai/code) action for GitHub PRs an
|
||||
- 💬 **PR/Issue Integration**: Works seamlessly with GitHub comments and PR reviews
|
||||
- 🛠️ **Flexible Tool Access**: Access to GitHub APIs and file operations (additional tools can be enabled via configuration)
|
||||
- 📋 **Progress Tracking**: Visual progress indicators with checkboxes that dynamically update as Claude completes tasks
|
||||
- 📊 **Structured Outputs**: Get validated JSON results that automatically become GitHub Action outputs for complex automations
|
||||
- 🏃 **Runs on Your Infrastructure**: The action executes entirely on your own GitHub runner (Anthropic API calls go to your chosen provider)
|
||||
- ⚙️ **Simplified Configuration**: Unified `prompt` and `claude_args` inputs provide clean, powerful configuration aligned with Claude Code SDK
|
||||
|
||||
@@ -30,7 +29,7 @@ This command will guide you through setting up the GitHub app and required secre
|
||||
**Note**:
|
||||
|
||||
- You must be a repository admin to install the GitHub app and add secrets
|
||||
- This quickstart method is only available for direct Anthropic API users. For AWS Bedrock, Google Vertex AI, or Microsoft Foundry setup, see [docs/cloud-providers.md](./docs/cloud-providers.md).
|
||||
- This quickstart method is only available for direct Anthropic API users. For AWS Bedrock or Google Vertex AI setup, see [docs/cloud-providers.md](./docs/cloud-providers.md).
|
||||
|
||||
## 📚 Solutions & Use Cases
|
||||
|
||||
@@ -50,6 +49,7 @@ Each solution includes complete working examples, configuration details, and exp
|
||||
|
||||
## Documentation
|
||||
|
||||
- **[Examples](./examples/)** - **🔧 Complete workflow examples** - Ready-to-use GitHub Actions workflow files demonstrating various automation patterns
|
||||
- **[Solutions Guide](./docs/solutions.md)** - **🎯 Ready-to-use automation patterns**
|
||||
- **[Migration Guide](./docs/migration-guide.md)** - **⭐ Upgrading from v0.x to v1.0**
|
||||
- [Setup Guide](./docs/setup.md) - Manual setup, custom GitHub apps, and security best practices
|
||||
@@ -57,7 +57,7 @@ Each solution includes complete working examples, configuration details, and exp
|
||||
- [Custom Automations](./docs/custom-automations.md) - Examples of automated workflows and custom prompts
|
||||
- [Configuration](./docs/configuration.md) - MCP servers, permissions, environment variables, and advanced settings
|
||||
- [Experimental Features](./docs/experimental.md) - Execution modes and network restrictions
|
||||
- [Cloud Providers](./docs/cloud-providers.md) - AWS Bedrock, Google Vertex AI, and Microsoft Foundry setup
|
||||
- [Cloud Providers](./docs/cloud-providers.md) - AWS Bedrock and Google Vertex AI setup
|
||||
- [Capabilities & Limitations](./docs/capabilities-and-limitations.md) - What Claude can and cannot do
|
||||
- [Security](./docs/security.md) - Access control, permissions, and commit signing
|
||||
- [FAQ](./docs/faq.md) - Common questions and troubleshooting
|
||||
|
||||
20
action.yml
20
action.yml
@@ -44,7 +44,7 @@ inputs:
|
||||
|
||||
# Auth configuration
|
||||
anthropic_api_key:
|
||||
description: "Anthropic API key (required for direct API, not needed for Bedrock/Vertex/Foundry)"
|
||||
description: "Anthropic API key (required for direct API, not needed for Bedrock/Vertex)"
|
||||
required: false
|
||||
claude_code_oauth_token:
|
||||
description: "Claude Code OAuth token (alternative to anthropic_api_key)"
|
||||
@@ -60,10 +60,6 @@ inputs:
|
||||
description: "Use Google Vertex AI with OIDC authentication instead of direct Anthropic API"
|
||||
required: false
|
||||
default: "false"
|
||||
use_foundry:
|
||||
description: "Use Microsoft Foundry with OIDC authentication instead of direct Anthropic API"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
claude_args:
|
||||
description: "Additional arguments to pass directly to Claude CLI"
|
||||
@@ -128,9 +124,6 @@ outputs:
|
||||
github_token:
|
||||
description: "The GitHub token used by the action (Claude App token if available)"
|
||||
value: ${{ steps.prepare.outputs.github_token }}
|
||||
structured_output:
|
||||
description: "JSON string containing all structured output fields when --json-schema is provided in claude_args. Use fromJSON() to parse: fromJSON(steps.id.outputs.structured_output).field_name"
|
||||
value: ${{ steps.claude-code.outputs.structured_output }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -196,7 +189,7 @@ runs:
|
||||
# Install Claude Code if no custom executable is provided
|
||||
if [ -z "${{ inputs.path_to_claude_code_executable }}" ]; then
|
||||
echo "Installing Claude Code..."
|
||||
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.49
|
||||
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.37
|
||||
echo "$HOME/.local/bin" >> "$GITHUB_PATH"
|
||||
else
|
||||
echo "Using custom Claude Code executable: ${{ inputs.path_to_claude_code_executable }}"
|
||||
@@ -248,14 +241,12 @@ runs:
|
||||
ANTHROPIC_CUSTOM_HEADERS: ${{ env.ANTHROPIC_CUSTOM_HEADERS }}
|
||||
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
|
||||
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
|
||||
CLAUDE_CODE_USE_FOUNDRY: ${{ inputs.use_foundry == 'true' && '1' || '' }}
|
||||
|
||||
# AWS configuration
|
||||
AWS_REGION: ${{ env.AWS_REGION }}
|
||||
AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }}
|
||||
AWS_BEARER_TOKEN_BEDROCK: ${{ env.AWS_BEARER_TOKEN_BEDROCK }}
|
||||
ANTHROPIC_BEDROCK_BASE_URL: ${{ env.ANTHROPIC_BEDROCK_BASE_URL || (env.AWS_REGION && format('https://bedrock-runtime.{0}.amazonaws.com', env.AWS_REGION)) }}
|
||||
|
||||
# GCP configuration
|
||||
@@ -269,13 +260,6 @@ runs:
|
||||
VERTEX_REGION_CLAUDE_3_5_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_5_SONNET }}
|
||||
VERTEX_REGION_CLAUDE_3_7_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_7_SONNET }}
|
||||
|
||||
# Microsoft Foundry configuration
|
||||
ANTHROPIC_FOUNDRY_RESOURCE: ${{ env.ANTHROPIC_FOUNDRY_RESOURCE }}
|
||||
ANTHROPIC_FOUNDRY_BASE_URL: ${{ env.ANTHROPIC_FOUNDRY_BASE_URL }}
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL: ${{ env.ANTHROPIC_DEFAULT_SONNET_MODEL }}
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL: ${{ env.ANTHROPIC_DEFAULT_HAIKU_MODEL }}
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL: ${{ env.ANTHROPIC_DEFAULT_OPUS_MODEL }}
|
||||
|
||||
- name: Update comment with job link
|
||||
if: steps.prepare.outputs.contains_trigger == 'true' && steps.prepare.outputs.claude_comment_id && always()
|
||||
shell: bash
|
||||
|
||||
@@ -57,7 +57,6 @@ Thank you for your interest in contributing to Claude Code Base Action! This doc
|
||||
```
|
||||
|
||||
This script:
|
||||
|
||||
- Installs `act` if not present (requires Homebrew on macOS)
|
||||
- Runs the GitHub Action workflow locally using Docker
|
||||
- Requires your `ANTHROPIC_API_KEY` to be set
|
||||
|
||||
@@ -490,7 +490,6 @@ This example shows how to use OIDC authentication with GCP Vertex AI:
|
||||
To securely use your Anthropic API key:
|
||||
|
||||
1. Add your API key as a repository secret:
|
||||
|
||||
- Go to your repository's Settings
|
||||
- Navigate to "Secrets and variables" → "Actions"
|
||||
- Click "New repository secret"
|
||||
|
||||
@@ -42,10 +42,6 @@ inputs:
|
||||
description: "Use Google Vertex AI with OIDC authentication instead of direct Anthropic API"
|
||||
required: false
|
||||
default: "false"
|
||||
use_foundry:
|
||||
description: "Use Microsoft Foundry with OIDC authentication instead of direct Anthropic API"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
use_node_cache:
|
||||
description: "Whether to use Node.js dependency caching (set to true only for Node.js projects with lock files)"
|
||||
@@ -79,9 +75,6 @@ outputs:
|
||||
execution_file:
|
||||
description: "Path to the JSON file containing Claude Code execution log"
|
||||
value: ${{ steps.run_claude.outputs.execution_file }}
|
||||
structured_output:
|
||||
description: "JSON string containing all structured output fields when --json-schema is provided in claude_args (use fromJSON() or jq to parse)"
|
||||
value: ${{ steps.run_claude.outputs.structured_output }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -118,7 +111,7 @@ runs:
|
||||
run: |
|
||||
if [ -z "${{ inputs.path_to_claude_code_executable }}" ]; then
|
||||
echo "Installing Claude Code..."
|
||||
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.49
|
||||
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.37
|
||||
else
|
||||
echo "Using custom Claude Code executable: ${{ inputs.path_to_claude_code_executable }}"
|
||||
# Add the directory containing the custom executable to PATH
|
||||
@@ -157,14 +150,12 @@ runs:
|
||||
# Only set provider flags if explicitly true, since any value (including "false") is truthy
|
||||
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
|
||||
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
|
||||
CLAUDE_CODE_USE_FOUNDRY: ${{ inputs.use_foundry == 'true' && '1' || '' }}
|
||||
|
||||
# AWS configuration
|
||||
AWS_REGION: ${{ env.AWS_REGION }}
|
||||
AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }}
|
||||
AWS_BEARER_TOKEN_BEDROCK: ${{ env.AWS_BEARER_TOKEN_BEDROCK }}
|
||||
ANTHROPIC_BEDROCK_BASE_URL: ${{ env.ANTHROPIC_BEDROCK_BASE_URL || (env.AWS_REGION && format('https://bedrock-runtime.{0}.amazonaws.com', env.AWS_REGION)) }}
|
||||
|
||||
# GCP configuration
|
||||
@@ -172,10 +163,3 @@ runs:
|
||||
CLOUD_ML_REGION: ${{ env.CLOUD_ML_REGION }}
|
||||
GOOGLE_APPLICATION_CREDENTIALS: ${{ env.GOOGLE_APPLICATION_CREDENTIALS }}
|
||||
ANTHROPIC_VERTEX_BASE_URL: ${{ env.ANTHROPIC_VERTEX_BASE_URL }}
|
||||
|
||||
# Microsoft Foundry configuration
|
||||
ANTHROPIC_FOUNDRY_RESOURCE: ${{ env.ANTHROPIC_FOUNDRY_RESOURCE }}
|
||||
ANTHROPIC_FOUNDRY_BASE_URL: ${{ env.ANTHROPIC_FOUNDRY_BASE_URL }}
|
||||
ANTHROPIC_DEFAULT_SONNET_MODEL: ${{ env.ANTHROPIC_DEFAULT_SONNET_MODEL }}
|
||||
ANTHROPIC_DEFAULT_HAIKU_MODEL: ${{ env.ANTHROPIC_DEFAULT_HAIKU_MODEL }}
|
||||
ANTHROPIC_DEFAULT_OPUS_MODEL: ${{ env.ANTHROPIC_DEFAULT_OPUS_MODEL }}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import * as core from "@actions/core";
|
||||
import { exec } from "child_process";
|
||||
import { promisify } from "util";
|
||||
import { unlink, writeFile, stat, readFile } from "fs/promises";
|
||||
import { unlink, writeFile, stat } from "fs/promises";
|
||||
import { createWriteStream } from "fs";
|
||||
import { spawn } from "child_process";
|
||||
import { parse as parseShellArgs } from "shell-quote";
|
||||
@@ -122,54 +122,9 @@ export function prepareRunConfig(
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses structured_output from execution file and sets GitHub Action outputs
|
||||
* Only runs if --json-schema was explicitly provided in claude_args
|
||||
* Exported for testing
|
||||
*/
|
||||
export async function parseAndSetStructuredOutputs(
|
||||
executionFile: string,
|
||||
): Promise<void> {
|
||||
try {
|
||||
const content = await readFile(executionFile, "utf-8");
|
||||
const messages = JSON.parse(content) as {
|
||||
type: string;
|
||||
structured_output?: Record<string, unknown>;
|
||||
}[];
|
||||
|
||||
// Search backwards - result is typically last or second-to-last message
|
||||
const result = messages.findLast(
|
||||
(m) => m.type === "result" && m.structured_output,
|
||||
);
|
||||
|
||||
if (!result?.structured_output) {
|
||||
throw new Error(
|
||||
`--json-schema was provided but Claude did not return structured_output.\n` +
|
||||
`Found ${messages.length} messages. Result exists: ${!!result}\n`,
|
||||
);
|
||||
}
|
||||
|
||||
// Set the complete structured output as a single JSON string
|
||||
// This works around GitHub Actions limitation that composite actions can't have dynamic outputs
|
||||
const structuredOutputJson = JSON.stringify(result.structured_output);
|
||||
core.setOutput("structured_output", structuredOutputJson);
|
||||
core.info(
|
||||
`Set structured_output with ${Object.keys(result.structured_output).length} field(s)`,
|
||||
);
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
throw error; // Preserve original error and stack trace
|
||||
}
|
||||
throw new Error(`Failed to parse structured outputs: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function runClaude(promptPath: string, options: ClaudeOptions) {
|
||||
const config = prepareRunConfig(promptPath, options);
|
||||
|
||||
// Detect if --json-schema is present in claude args
|
||||
const hasJsonSchema = options.claudeArgs?.includes("--json-schema") ?? false;
|
||||
|
||||
// Create a named pipe
|
||||
try {
|
||||
await unlink(PIPE_PATH);
|
||||
@@ -353,23 +308,8 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
|
||||
core.warning(`Failed to process output for execution metrics: ${e}`);
|
||||
}
|
||||
|
||||
core.setOutput("execution_file", EXECUTION_FILE);
|
||||
|
||||
// Parse and set structured outputs only if user provided --json-schema in claude_args
|
||||
if (hasJsonSchema) {
|
||||
try {
|
||||
await parseAndSetStructuredOutputs(EXECUTION_FILE);
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : String(error);
|
||||
core.setFailed(errorMessage);
|
||||
core.setOutput("conclusion", "failure");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Set conclusion to success if we reached here
|
||||
core.setOutput("conclusion", "success");
|
||||
core.setOutput("execution_file", EXECUTION_FILE);
|
||||
} else {
|
||||
core.setOutput("conclusion", "failure");
|
||||
|
||||
|
||||
@@ -1,50 +1,39 @@
|
||||
/**
|
||||
* Validates the environment variables required for running Claude Code
|
||||
* based on the selected provider (Anthropic API, AWS Bedrock, Google Vertex AI, or Microsoft Foundry)
|
||||
* based on the selected provider (Anthropic API, AWS Bedrock, or Google Vertex AI)
|
||||
*/
|
||||
export function validateEnvironmentVariables() {
|
||||
const useBedrock = process.env.CLAUDE_CODE_USE_BEDROCK === "1";
|
||||
const useVertex = process.env.CLAUDE_CODE_USE_VERTEX === "1";
|
||||
const useFoundry = process.env.CLAUDE_CODE_USE_FOUNDRY === "1";
|
||||
const anthropicApiKey = process.env.ANTHROPIC_API_KEY;
|
||||
const claudeCodeOAuthToken = process.env.CLAUDE_CODE_OAUTH_TOKEN;
|
||||
|
||||
const errors: string[] = [];
|
||||
|
||||
// Check for mutual exclusivity between providers
|
||||
const activeProviders = [useBedrock, useVertex, useFoundry].filter(Boolean);
|
||||
if (activeProviders.length > 1) {
|
||||
if (useBedrock && useVertex) {
|
||||
errors.push(
|
||||
"Cannot use multiple providers simultaneously. Please set only one of: CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX, or CLAUDE_CODE_USE_FOUNDRY.",
|
||||
"Cannot use both Bedrock and Vertex AI simultaneously. Please set only one provider.",
|
||||
);
|
||||
}
|
||||
|
||||
if (!useBedrock && !useVertex && !useFoundry) {
|
||||
if (!useBedrock && !useVertex) {
|
||||
if (!anthropicApiKey && !claudeCodeOAuthToken) {
|
||||
errors.push(
|
||||
"Either ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN is required when using direct Anthropic API.",
|
||||
);
|
||||
}
|
||||
} else if (useBedrock) {
|
||||
const awsRegion = process.env.AWS_REGION;
|
||||
const awsAccessKeyId = process.env.AWS_ACCESS_KEY_ID;
|
||||
const awsSecretAccessKey = process.env.AWS_SECRET_ACCESS_KEY;
|
||||
const awsBearerToken = process.env.AWS_BEARER_TOKEN_BEDROCK;
|
||||
const requiredBedrockVars = {
|
||||
AWS_REGION: process.env.AWS_REGION,
|
||||
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID,
|
||||
AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY,
|
||||
};
|
||||
|
||||
// AWS_REGION is always required for Bedrock
|
||||
if (!awsRegion) {
|
||||
errors.push("AWS_REGION is required when using AWS Bedrock.");
|
||||
}
|
||||
|
||||
// Either bearer token OR access key credentials must be provided
|
||||
const hasAccessKeyCredentials = awsAccessKeyId && awsSecretAccessKey;
|
||||
const hasBearerToken = awsBearerToken;
|
||||
|
||||
if (!hasAccessKeyCredentials && !hasBearerToken) {
|
||||
errors.push(
|
||||
"Either AWS_BEARER_TOKEN_BEDROCK or both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required when using AWS Bedrock.",
|
||||
);
|
||||
}
|
||||
Object.entries(requiredBedrockVars).forEach(([key, value]) => {
|
||||
if (!value) {
|
||||
errors.push(`${key} is required when using AWS Bedrock.`);
|
||||
}
|
||||
});
|
||||
} else if (useVertex) {
|
||||
const requiredVertexVars = {
|
||||
ANTHROPIC_VERTEX_PROJECT_ID: process.env.ANTHROPIC_VERTEX_PROJECT_ID,
|
||||
@@ -56,16 +45,6 @@ export function validateEnvironmentVariables() {
|
||||
errors.push(`${key} is required when using Google Vertex AI.`);
|
||||
}
|
||||
});
|
||||
} else if (useFoundry) {
|
||||
const foundryResource = process.env.ANTHROPIC_FOUNDRY_RESOURCE;
|
||||
const foundryBaseUrl = process.env.ANTHROPIC_FOUNDRY_BASE_URL;
|
||||
|
||||
// Either resource name or base URL is required
|
||||
if (!foundryResource && !foundryBaseUrl) {
|
||||
errors.push(
|
||||
"Either ANTHROPIC_FOUNDRY_RESOURCE or ANTHROPIC_FOUNDRY_BASE_URL is required when using Microsoft Foundry.",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
|
||||
@@ -78,19 +78,5 @@ describe("prepareRunConfig", () => {
|
||||
"stream-json",
|
||||
]);
|
||||
});
|
||||
|
||||
test("should include json-schema flag when provided", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs:
|
||||
'--json-schema \'{"type":"object","properties":{"result":{"type":"boolean"}}}\'',
|
||||
};
|
||||
|
||||
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
|
||||
|
||||
expect(prepared.claudeArgs).toContain("--json-schema");
|
||||
expect(prepared.claudeArgs).toContain(
|
||||
'{"type":"object","properties":{"result":{"type":"boolean"}}}',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
import { describe, test, expect, afterEach, beforeEach, spyOn } from "bun:test";
|
||||
import { writeFile, unlink } from "fs/promises";
|
||||
import { tmpdir } from "os";
|
||||
import { join } from "path";
|
||||
import { parseAndSetStructuredOutputs } from "../src/run-claude";
|
||||
import * as core from "@actions/core";
|
||||
|
||||
// Mock execution file path
|
||||
const TEST_EXECUTION_FILE = join(tmpdir(), "test-execution-output.json");
|
||||
|
||||
// Helper to create mock execution file with structured output
|
||||
async function createMockExecutionFile(
|
||||
structuredOutput?: Record<string, unknown>,
|
||||
includeResult: boolean = true,
|
||||
): Promise<void> {
|
||||
const messages: any[] = [
|
||||
{ type: "system", subtype: "init" },
|
||||
{ type: "turn", content: "test" },
|
||||
];
|
||||
|
||||
if (includeResult) {
|
||||
messages.push({
|
||||
type: "result",
|
||||
cost_usd: 0.01,
|
||||
duration_ms: 1000,
|
||||
structured_output: structuredOutput,
|
||||
});
|
||||
}
|
||||
|
||||
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
|
||||
}
|
||||
|
||||
// Spy on core functions
|
||||
let setOutputSpy: any;
|
||||
let infoSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
setOutputSpy = spyOn(core, "setOutput").mockImplementation(() => {});
|
||||
infoSpy = spyOn(core, "info").mockImplementation(() => {});
|
||||
});
|
||||
|
||||
describe("parseAndSetStructuredOutputs", () => {
|
||||
afterEach(async () => {
|
||||
setOutputSpy?.mockRestore();
|
||||
infoSpy?.mockRestore();
|
||||
try {
|
||||
await unlink(TEST_EXECUTION_FILE);
|
||||
} catch {
|
||||
// Ignore if file doesn't exist
|
||||
}
|
||||
});
|
||||
|
||||
test("should set structured_output with valid data", async () => {
|
||||
await createMockExecutionFile({
|
||||
is_flaky: true,
|
||||
confidence: 0.85,
|
||||
summary: "Test looks flaky",
|
||||
});
|
||||
|
||||
await parseAndSetStructuredOutputs(TEST_EXECUTION_FILE);
|
||||
|
||||
expect(setOutputSpy).toHaveBeenCalledWith(
|
||||
"structured_output",
|
||||
'{"is_flaky":true,"confidence":0.85,"summary":"Test looks flaky"}',
|
||||
);
|
||||
expect(infoSpy).toHaveBeenCalledWith(
|
||||
"Set structured_output with 3 field(s)",
|
||||
);
|
||||
});
|
||||
|
||||
test("should handle arrays and nested objects", async () => {
|
||||
await createMockExecutionFile({
|
||||
items: ["a", "b", "c"],
|
||||
config: { key: "value", nested: { deep: true } },
|
||||
});
|
||||
|
||||
await parseAndSetStructuredOutputs(TEST_EXECUTION_FILE);
|
||||
|
||||
const callArgs = setOutputSpy.mock.calls[0];
|
||||
expect(callArgs[0]).toBe("structured_output");
|
||||
const parsed = JSON.parse(callArgs[1]);
|
||||
expect(parsed).toEqual({
|
||||
items: ["a", "b", "c"],
|
||||
config: { key: "value", nested: { deep: true } },
|
||||
});
|
||||
});
|
||||
|
||||
test("should handle special characters in field names", async () => {
|
||||
await createMockExecutionFile({
|
||||
"test-result": "passed",
|
||||
"item.count": 10,
|
||||
"user@email": "test",
|
||||
});
|
||||
|
||||
await parseAndSetStructuredOutputs(TEST_EXECUTION_FILE);
|
||||
|
||||
const callArgs = setOutputSpy.mock.calls[0];
|
||||
const parsed = JSON.parse(callArgs[1]);
|
||||
expect(parsed["test-result"]).toBe("passed");
|
||||
expect(parsed["item.count"]).toBe(10);
|
||||
expect(parsed["user@email"]).toBe("test");
|
||||
});
|
||||
|
||||
test("should throw error when result exists but structured_output is undefined", async () => {
|
||||
const messages = [
|
||||
{ type: "system", subtype: "init" },
|
||||
{ type: "result", cost_usd: 0.01, duration_ms: 1000 },
|
||||
];
|
||||
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
|
||||
|
||||
await expect(
|
||||
parseAndSetStructuredOutputs(TEST_EXECUTION_FILE),
|
||||
).rejects.toThrow(
|
||||
"--json-schema was provided but Claude did not return structured_output",
|
||||
);
|
||||
});
|
||||
|
||||
test("should throw error when no result message exists", async () => {
|
||||
const messages = [
|
||||
{ type: "system", subtype: "init" },
|
||||
{ type: "turn", content: "test" },
|
||||
];
|
||||
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
|
||||
|
||||
await expect(
|
||||
parseAndSetStructuredOutputs(TEST_EXECUTION_FILE),
|
||||
).rejects.toThrow(
|
||||
"--json-schema was provided but Claude did not return structured_output",
|
||||
);
|
||||
});
|
||||
|
||||
test("should throw error with malformed JSON", async () => {
|
||||
await writeFile(TEST_EXECUTION_FILE, "{ invalid json");
|
||||
|
||||
await expect(
|
||||
parseAndSetStructuredOutputs(TEST_EXECUTION_FILE),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
test("should throw error when file does not exist", async () => {
|
||||
await expect(
|
||||
parseAndSetStructuredOutputs("/nonexistent/file.json"),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
test("should handle empty structured_output object", async () => {
|
||||
await createMockExecutionFile({});
|
||||
|
||||
await parseAndSetStructuredOutputs(TEST_EXECUTION_FILE);
|
||||
|
||||
expect(setOutputSpy).toHaveBeenCalledWith("structured_output", "{}");
|
||||
expect(infoSpy).toHaveBeenCalledWith(
|
||||
"Set structured_output with 0 field(s)",
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -13,19 +13,15 @@ describe("validateEnvironmentVariables", () => {
|
||||
delete process.env.ANTHROPIC_API_KEY;
|
||||
delete process.env.CLAUDE_CODE_USE_BEDROCK;
|
||||
delete process.env.CLAUDE_CODE_USE_VERTEX;
|
||||
delete process.env.CLAUDE_CODE_USE_FOUNDRY;
|
||||
delete process.env.AWS_REGION;
|
||||
delete process.env.AWS_ACCESS_KEY_ID;
|
||||
delete process.env.AWS_SECRET_ACCESS_KEY;
|
||||
delete process.env.AWS_SESSION_TOKEN;
|
||||
delete process.env.AWS_BEARER_TOKEN_BEDROCK;
|
||||
delete process.env.ANTHROPIC_BEDROCK_BASE_URL;
|
||||
delete process.env.ANTHROPIC_VERTEX_PROJECT_ID;
|
||||
delete process.env.CLOUD_ML_REGION;
|
||||
delete process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
||||
delete process.env.ANTHROPIC_VERTEX_BASE_URL;
|
||||
delete process.env.ANTHROPIC_FOUNDRY_RESOURCE;
|
||||
delete process.env.ANTHROPIC_FOUNDRY_BASE_URL;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -96,58 +92,31 @@ describe("validateEnvironmentVariables", () => {
|
||||
);
|
||||
});
|
||||
|
||||
test("should fail when only AWS_SECRET_ACCESS_KEY is provided without bearer token", () => {
|
||||
test("should fail when AWS_ACCESS_KEY_ID is missing", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Either AWS_BEARER_TOKEN_BEDROCK or both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required when using AWS Bedrock.",
|
||||
"AWS_ACCESS_KEY_ID is required when using AWS Bedrock.",
|
||||
);
|
||||
});
|
||||
|
||||
test("should fail when only AWS_ACCESS_KEY_ID is provided without bearer token", () => {
|
||||
test("should fail when AWS_SECRET_ACCESS_KEY is missing", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Either AWS_BEARER_TOKEN_BEDROCK or both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required when using AWS Bedrock.",
|
||||
"AWS_SECRET_ACCESS_KEY is required when using AWS Bedrock.",
|
||||
);
|
||||
});
|
||||
|
||||
test("should pass when AWS_BEARER_TOKEN_BEDROCK is provided instead of access keys", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
process.env.AWS_BEARER_TOKEN_BEDROCK = "test-bearer-token";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).not.toThrow();
|
||||
});
|
||||
|
||||
test("should pass when both bearer token and access keys are provided", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
process.env.AWS_BEARER_TOKEN_BEDROCK = "test-bearer-token";
|
||||
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
|
||||
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).not.toThrow();
|
||||
});
|
||||
|
||||
test("should fail when no authentication method is provided", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Either AWS_BEARER_TOKEN_BEDROCK or both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required when using AWS Bedrock.",
|
||||
);
|
||||
});
|
||||
|
||||
test("should report missing region and authentication", () => {
|
||||
test("should report all missing Bedrock variables", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
/AWS_REGION is required when using AWS Bedrock.*Either AWS_BEARER_TOKEN_BEDROCK or both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required when using AWS Bedrock/s,
|
||||
/AWS_REGION is required when using AWS Bedrock.*AWS_ACCESS_KEY_ID is required when using AWS Bedrock.*AWS_SECRET_ACCESS_KEY is required when using AWS Bedrock/s,
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -198,56 +167,6 @@ describe("validateEnvironmentVariables", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("Microsoft Foundry", () => {
|
||||
test("should pass when ANTHROPIC_FOUNDRY_RESOURCE is provided", () => {
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
process.env.ANTHROPIC_FOUNDRY_RESOURCE = "test-resource";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).not.toThrow();
|
||||
});
|
||||
|
||||
test("should pass when ANTHROPIC_FOUNDRY_BASE_URL is provided", () => {
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
process.env.ANTHROPIC_FOUNDRY_BASE_URL =
|
||||
"https://test-resource.services.ai.azure.com";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).not.toThrow();
|
||||
});
|
||||
|
||||
test("should pass when both resource and base URL are provided", () => {
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
process.env.ANTHROPIC_FOUNDRY_RESOURCE = "test-resource";
|
||||
process.env.ANTHROPIC_FOUNDRY_BASE_URL =
|
||||
"https://custom.services.ai.azure.com";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).not.toThrow();
|
||||
});
|
||||
|
||||
test("should construct Foundry base URL from resource name when ANTHROPIC_FOUNDRY_BASE_URL is not provided", () => {
|
||||
// This test verifies our action.yml change, which constructs:
|
||||
// ANTHROPIC_FOUNDRY_BASE_URL: ${{ env.ANTHROPIC_FOUNDRY_BASE_URL || (env.ANTHROPIC_FOUNDRY_RESOURCE && format('https://{0}.services.ai.azure.com', env.ANTHROPIC_FOUNDRY_RESOURCE)) }}
|
||||
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
process.env.ANTHROPIC_FOUNDRY_RESOURCE = "my-foundry-resource";
|
||||
// ANTHROPIC_FOUNDRY_BASE_URL is intentionally not set
|
||||
|
||||
// The actual URL construction happens in the composite action in action.yml
|
||||
// This test is a placeholder to document the behavior
|
||||
expect(() => validateEnvironmentVariables()).not.toThrow();
|
||||
|
||||
// In the actual action, ANTHROPIC_FOUNDRY_BASE_URL would be:
|
||||
// https://my-foundry-resource.services.ai.azure.com
|
||||
});
|
||||
|
||||
test("should fail when neither ANTHROPIC_FOUNDRY_RESOURCE nor ANTHROPIC_FOUNDRY_BASE_URL is provided", () => {
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Either ANTHROPIC_FOUNDRY_RESOURCE or ANTHROPIC_FOUNDRY_BASE_URL is required when using Microsoft Foundry.",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("Multiple providers", () => {
|
||||
test("should fail when both Bedrock and Vertex are enabled", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
@@ -260,51 +179,7 @@ describe("validateEnvironmentVariables", () => {
|
||||
process.env.CLOUD_ML_REGION = "us-central1";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Cannot use multiple providers simultaneously. Please set only one of: CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX, or CLAUDE_CODE_USE_FOUNDRY.",
|
||||
);
|
||||
});
|
||||
|
||||
test("should fail when both Bedrock and Foundry are enabled", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
// Provide all required vars to isolate the mutual exclusion error
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
|
||||
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
|
||||
process.env.ANTHROPIC_FOUNDRY_RESOURCE = "test-resource";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Cannot use multiple providers simultaneously. Please set only one of: CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX, or CLAUDE_CODE_USE_FOUNDRY.",
|
||||
);
|
||||
});
|
||||
|
||||
test("should fail when both Vertex and Foundry are enabled", () => {
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = "1";
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
// Provide all required vars to isolate the mutual exclusion error
|
||||
process.env.ANTHROPIC_VERTEX_PROJECT_ID = "test-project";
|
||||
process.env.CLOUD_ML_REGION = "us-central1";
|
||||
process.env.ANTHROPIC_FOUNDRY_RESOURCE = "test-resource";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Cannot use multiple providers simultaneously. Please set only one of: CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX, or CLAUDE_CODE_USE_FOUNDRY.",
|
||||
);
|
||||
});
|
||||
|
||||
test("should fail when all three providers are enabled", () => {
|
||||
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
|
||||
process.env.CLAUDE_CODE_USE_VERTEX = "1";
|
||||
process.env.CLAUDE_CODE_USE_FOUNDRY = "1";
|
||||
// Provide all required vars to isolate the mutual exclusion error
|
||||
process.env.AWS_REGION = "us-east-1";
|
||||
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
|
||||
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
|
||||
process.env.ANTHROPIC_VERTEX_PROJECT_ID = "test-project";
|
||||
process.env.CLOUD_ML_REGION = "us-central1";
|
||||
process.env.ANTHROPIC_FOUNDRY_RESOURCE = "test-resource";
|
||||
|
||||
expect(() => validateEnvironmentVariables()).toThrow(
|
||||
"Cannot use multiple providers simultaneously. Please set only one of: CLAUDE_CODE_USE_BEDROCK, CLAUDE_CODE_USE_VERTEX, or CLAUDE_CODE_USE_FOUNDRY.",
|
||||
"Cannot use both Bedrock and Vertex AI simultaneously. Please set only one provider.",
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -329,7 +204,10 @@ describe("validateEnvironmentVariables", () => {
|
||||
" - AWS_REGION is required when using AWS Bedrock.",
|
||||
);
|
||||
expect(error!.message).toContain(
|
||||
" - Either AWS_BEARER_TOKEN_BEDROCK or both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are required when using AWS Bedrock.",
|
||||
" - AWS_ACCESS_KEY_ID is required when using AWS Bedrock.",
|
||||
);
|
||||
expect(error!.message).toContain(
|
||||
" - AWS_SECRET_ACCESS_KEY is required when using AWS Bedrock.",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
# Cloud Providers
|
||||
|
||||
You can authenticate with Claude using any of these four methods:
|
||||
You can authenticate with Claude using any of these three methods:
|
||||
|
||||
1. Direct Anthropic API (default)
|
||||
2. Amazon Bedrock with OIDC authentication
|
||||
3. Google Vertex AI with OIDC authentication
|
||||
4. Microsoft Foundry with OIDC authentication
|
||||
|
||||
For detailed setup instructions for AWS Bedrock and Google Vertex AI, see the [official documentation](https://docs.anthropic.com/en/docs/claude-code/github-actions#using-with-aws-bedrock-%26-google-vertex-ai).
|
||||
|
||||
**Note**:
|
||||
|
||||
- Bedrock, Vertex, and Microsoft Foundry use OIDC authentication exclusively
|
||||
- Bedrock and Vertex use OIDC authentication exclusively
|
||||
- AWS Bedrock automatically uses cross-region inference profiles for certain models
|
||||
- For cross-region inference profile models, you need to request and be granted access to the Claude models in all regions that the inference profile uses
|
||||
|
||||
@@ -41,19 +40,11 @@ Use provider-specific model names based on your chosen provider:
|
||||
claude_args: |
|
||||
--model claude-4-0-sonnet@20250805
|
||||
# ... other inputs
|
||||
|
||||
# For Microsoft Foundry with OIDC
|
||||
- uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
use_foundry: "true"
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5
|
||||
# ... other inputs
|
||||
```
|
||||
|
||||
## OIDC Authentication for Cloud Providers
|
||||
## OIDC Authentication for Bedrock and Vertex
|
||||
|
||||
AWS Bedrock, GCP Vertex AI, and Microsoft Foundry all support OIDC authentication.
|
||||
Both AWS Bedrock and GCP Vertex AI require OIDC authentication.
|
||||
|
||||
```yaml
|
||||
# For AWS Bedrock with OIDC
|
||||
@@ -106,36 +97,3 @@ AWS Bedrock, GCP Vertex AI, and Microsoft Foundry all support OIDC authenticatio
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
```
|
||||
|
||||
```yaml
|
||||
# For Microsoft Foundry with OIDC
|
||||
- name: Authenticate to Azure
|
||||
uses: azure/login@v2
|
||||
with:
|
||||
client-id: ${{ secrets.AZURE_CLIENT_ID }}
|
||||
tenant-id: ${{ secrets.AZURE_TENANT_ID }}
|
||||
subscription-id: ${{ secrets.AZURE_SUBSCRIPTION_ID }}
|
||||
|
||||
- name: Generate GitHub App token
|
||||
id: app-token
|
||||
uses: actions/create-github-app-token@v2
|
||||
with:
|
||||
app-id: ${{ secrets.APP_ID }}
|
||||
private-key: ${{ secrets.APP_PRIVATE_KEY }}
|
||||
|
||||
- uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
use_foundry: "true"
|
||||
claude_args: |
|
||||
--model claude-sonnet-4-5
|
||||
# ... other inputs
|
||||
env:
|
||||
ANTHROPIC_FOUNDRY_BASE_URL: https://my-resource.services.ai.azure.com
|
||||
|
||||
permissions:
|
||||
id-token: write # Required for OIDC
|
||||
```
|
||||
|
||||
## Microsoft Foundry Setup
|
||||
|
||||
For detailed setup instructions for Microsoft Foundry, see the [official documentation](https://docs.anthropic.com/en/docs/claude-code/microsoft-foundry).
|
||||
|
||||
@@ -116,7 +116,6 @@ The `additional_permissions` input allows Claude to access GitHub Actions workfl
|
||||
To allow Claude to view workflow run results, job logs, and CI status:
|
||||
|
||||
1. **Grant the necessary permission to your GitHub token**:
|
||||
|
||||
- When using the default `GITHUB_TOKEN`, add the `actions: read` permission to your workflow:
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -228,12 +228,10 @@ jobs:
|
||||
The action now automatically detects the appropriate mode:
|
||||
|
||||
1. **If `prompt` is provided** → Runs in **automation mode**
|
||||
|
||||
- Executes immediately without waiting for @claude mentions
|
||||
- Perfect for scheduled tasks, PR automation, etc.
|
||||
|
||||
2. **If no `prompt` but @claude is mentioned** → Runs in **interactive mode**
|
||||
|
||||
- Waits for and responds to @claude mentions
|
||||
- Creates tracking comments with progress
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ The following permissions are requested but not yet actively used. These will en
|
||||
|
||||
## Commit Signing
|
||||
|
||||
Commits made by Claude through this action are no longer automatically signed with commit signatures. To enable commit signing set `use_commit_signing: True` in the workflow(s). This ensures the authenticity and integrity of commits, providing a verifiable trail of changes made by the action.
|
||||
All commits made by Claude through this action are automatically signed with commit signatures. This ensures the authenticity and integrity of commits, providing a verifiable trail of changes made by the action.
|
||||
|
||||
## ⚠️ Authentication Protection
|
||||
|
||||
|
||||
@@ -31,27 +31,23 @@ The fastest way to create a custom GitHub App is using our pre-configured manife
|
||||
**🚀 [Download the Quick Setup Tool](./create-app.html)** (Right-click → "Save Link As" or "Download Linked File")
|
||||
|
||||
After downloading, open `create-app.html` in your web browser:
|
||||
|
||||
- **For Personal Accounts:** Click the "Create App for Personal Account" button
|
||||
- **For Organizations:** Enter your organization name and click "Create App for Organization"
|
||||
|
||||
The tool will automatically configure all required permissions and submit the manifest.
|
||||
|
||||
Alternatively, you can use the manifest file directly:
|
||||
|
||||
- Use the [`github-app-manifest.json`](../github-app-manifest.json) file from this repository
|
||||
- Visit https://github.com/settings/apps/new (for personal) or your organization's app settings
|
||||
- Look for the "Create from manifest" option and paste the JSON content
|
||||
|
||||
2. **Complete the creation flow:**
|
||||
|
||||
- GitHub will show you a preview of the app configuration
|
||||
- Confirm the app name (you can customize it)
|
||||
- Click "Create GitHub App"
|
||||
- The app will be created with all required permissions automatically configured
|
||||
|
||||
3. **Generate and download a private key:**
|
||||
|
||||
- After creating the app, you'll be redirected to the app settings
|
||||
- Scroll down to "Private keys"
|
||||
- Click "Generate a private key"
|
||||
@@ -64,7 +60,6 @@ The fastest way to create a custom GitHub App is using our pre-configured manife
|
||||
If you prefer to configure the app manually or need custom permissions:
|
||||
|
||||
1. **Create a new GitHub App:**
|
||||
|
||||
- Go to https://github.com/settings/apps (for personal apps) or your organization's settings
|
||||
- Click "New GitHub App"
|
||||
- Configure the app with these minimum permissions:
|
||||
@@ -77,19 +72,16 @@ If you prefer to configure the app manually or need custom permissions:
|
||||
- Create the app
|
||||
|
||||
2. **Generate and download a private key:**
|
||||
|
||||
- After creating the app, scroll down to "Private keys"
|
||||
- Click "Generate a private key"
|
||||
- Download the `.pem` file (keep this secure!)
|
||||
|
||||
3. **Install the app on your repository:**
|
||||
|
||||
- Go to the app's settings page
|
||||
- Click "Install App"
|
||||
- Select the repositories where you want to use Claude
|
||||
|
||||
4. **Add the app credentials to your repository secrets:**
|
||||
|
||||
- Go to your repository's Settings → Secrets and variables → Actions
|
||||
- Add these secrets:
|
||||
- `APP_ID`: Your GitHub App's ID (found in the app settings)
|
||||
@@ -138,7 +130,6 @@ For more information on creating GitHub Apps, see the [GitHub documentation](htt
|
||||
To securely use your Anthropic API key:
|
||||
|
||||
1. Add your API key as a repository secret:
|
||||
|
||||
- Go to your repository's Settings
|
||||
- Navigate to "Secrets and variables" → "Actions"
|
||||
- Click "New repository secret"
|
||||
|
||||
@@ -185,74 +185,6 @@ For a comprehensive guide on migrating from v0.x to v1.0, including step-by-step
|
||||
Focus on the changed files in this PR.
|
||||
```
|
||||
|
||||
## Structured Outputs
|
||||
|
||||
Get validated JSON results from Claude that automatically become GitHub Action outputs. This enables building complex automation workflows where Claude analyzes data and subsequent steps use the results.
|
||||
|
||||
### Basic Example
|
||||
|
||||
```yaml
|
||||
- name: Detect flaky tests
|
||||
id: analyze
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
prompt: |
|
||||
Check the CI logs and determine if this is a flaky test.
|
||||
Return: is_flaky (boolean), confidence (0-1), summary (string)
|
||||
claude_args: |
|
||||
--json-schema '{"type":"object","properties":{"is_flaky":{"type":"boolean"},"confidence":{"type":"number"},"summary":{"type":"string"}},"required":["is_flaky"]}'
|
||||
|
||||
- name: Retry if flaky
|
||||
if: fromJSON(steps.analyze.outputs.structured_output).is_flaky == true
|
||||
run: gh workflow run CI
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Define Schema**: Provide a JSON schema via `--json-schema` flag in `claude_args`
|
||||
2. **Claude Executes**: Claude uses tools to complete your task
|
||||
3. **Validated Output**: Result is validated against your schema
|
||||
4. **JSON Output**: All fields are returned in a single `structured_output` JSON string
|
||||
|
||||
### Accessing Structured Outputs
|
||||
|
||||
All structured output fields are available in the `structured_output` output as a JSON string:
|
||||
|
||||
**In GitHub Actions expressions:**
|
||||
|
||||
```yaml
|
||||
if: fromJSON(steps.analyze.outputs.structured_output).is_flaky == true
|
||||
run: |
|
||||
CONFIDENCE=${{ fromJSON(steps.analyze.outputs.structured_output).confidence }}
|
||||
```
|
||||
|
||||
**In bash with jq:**
|
||||
|
||||
```yaml
|
||||
- name: Process results
|
||||
run: |
|
||||
OUTPUT='${{ steps.analyze.outputs.structured_output }}'
|
||||
IS_FLAKY=$(echo "$OUTPUT" | jq -r '.is_flaky')
|
||||
SUMMARY=$(echo "$OUTPUT" | jq -r '.summary')
|
||||
```
|
||||
|
||||
**Note**: Due to GitHub Actions limitations, composite actions cannot expose dynamic outputs. All fields are bundled in the single `structured_output` JSON string.
|
||||
|
||||
### Complete Example
|
||||
|
||||
See `examples/test-failure-analysis.yml` for a working example that:
|
||||
|
||||
- Detects flaky test failures
|
||||
- Uses confidence thresholds in conditionals
|
||||
- Auto-retries workflows
|
||||
- Comments on PRs
|
||||
|
||||
### Documentation
|
||||
|
||||
For complete details on JSON Schema syntax and Agent SDK structured outputs:
|
||||
https://docs.claude.com/en/docs/agent-sdk/structured-outputs
|
||||
|
||||
## Ways to Tag @claude
|
||||
|
||||
These examples show how to interact with Claude using comments in PRs and issues. By default, Claude will be triggered anytime you mention `@claude`, but you can customize the exact trigger phrase using the `trigger_phrase` input in the workflow.
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
name: Auto-Retry Flaky Tests
|
||||
|
||||
# This example demonstrates using structured outputs to detect flaky test failures
|
||||
# and automatically retry them, reducing noise from intermittent failures.
|
||||
#
|
||||
# Use case: When CI fails, automatically determine if it's likely flaky and retry if so.
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["CI"]
|
||||
types: [completed]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
|
||||
jobs:
|
||||
detect-flaky:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.workflow_run.conclusion == 'failure' }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Detect flaky test failures
|
||||
id: detect
|
||||
uses: anthropics/claude-code-action@main
|
||||
with:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
prompt: |
|
||||
The CI workflow failed: ${{ github.event.workflow_run.html_url }}
|
||||
|
||||
Check the logs: gh run view ${{ github.event.workflow_run.id }} --log-failed
|
||||
|
||||
Determine if this looks like a flaky test failure by checking for:
|
||||
- Timeout errors
|
||||
- Race conditions
|
||||
- Network errors
|
||||
- "Expected X but got Y" intermittent failures
|
||||
- Tests that passed in previous commits
|
||||
|
||||
Return:
|
||||
- is_flaky: true if likely flaky, false if real bug
|
||||
- confidence: number 0-1 indicating confidence level
|
||||
- summary: brief one-sentence explanation
|
||||
claude_args: |
|
||||
--json-schema '{"type":"object","properties":{"is_flaky":{"type":"boolean","description":"Whether this appears to be a flaky test failure"},"confidence":{"type":"number","minimum":0,"maximum":1,"description":"Confidence level in the determination"},"summary":{"type":"string","description":"One-sentence explanation of the failure"}},"required":["is_flaky","confidence","summary"]}'
|
||||
|
||||
# Auto-retry only if flaky AND high confidence (>= 0.7)
|
||||
- name: Retry flaky tests
|
||||
if: |
|
||||
fromJSON(steps.detect.outputs.structured_output).is_flaky == true &&
|
||||
fromJSON(steps.detect.outputs.structured_output).confidence >= 0.7
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
OUTPUT='${{ steps.detect.outputs.structured_output }}'
|
||||
CONFIDENCE=$(echo "$OUTPUT" | jq -r '.confidence')
|
||||
SUMMARY=$(echo "$OUTPUT" | jq -r '.summary')
|
||||
|
||||
echo "🔄 Flaky test detected (confidence: $CONFIDENCE)"
|
||||
echo "Summary: $SUMMARY"
|
||||
echo ""
|
||||
echo "Triggering automatic retry..."
|
||||
|
||||
gh workflow run "${{ github.event.workflow_run.name }}" \
|
||||
--ref "${{ github.event.workflow_run.head_branch }}"
|
||||
|
||||
# Low confidence flaky detection - skip retry
|
||||
- name: Low confidence detection
|
||||
if: |
|
||||
fromJSON(steps.detect.outputs.structured_output).is_flaky == true &&
|
||||
fromJSON(steps.detect.outputs.structured_output).confidence < 0.7
|
||||
run: |
|
||||
OUTPUT='${{ steps.detect.outputs.structured_output }}'
|
||||
CONFIDENCE=$(echo "$OUTPUT" | jq -r '.confidence')
|
||||
|
||||
echo "⚠️ Possible flaky test but confidence too low ($CONFIDENCE)"
|
||||
echo "Not retrying automatically - manual review recommended"
|
||||
|
||||
# Comment on PR if this was a PR build
|
||||
- name: Comment on PR
|
||||
if: github.event.workflow_run.event == 'pull_request'
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
OUTPUT='${{ steps.detect.outputs.structured_output }}'
|
||||
IS_FLAKY=$(echo "$OUTPUT" | jq -r '.is_flaky')
|
||||
CONFIDENCE=$(echo "$OUTPUT" | jq -r '.confidence')
|
||||
SUMMARY=$(echo "$OUTPUT" | jq -r '.summary')
|
||||
|
||||
pr_number=$(gh pr list --head "${{ github.event.workflow_run.head_branch }}" --json number --jq '.[0].number')
|
||||
|
||||
if [ -n "$pr_number" ]; then
|
||||
if [ "$IS_FLAKY" = "true" ]; then
|
||||
TITLE="🔄 Flaky Test Detected"
|
||||
ACTION="✅ Automatically retrying the workflow"
|
||||
else
|
||||
TITLE="❌ Test Failure"
|
||||
ACTION="⚠️ This appears to be a real bug - manual intervention needed"
|
||||
fi
|
||||
|
||||
gh pr comment "$pr_number" --body "$(cat <<EOF
|
||||
## $TITLE
|
||||
|
||||
**Analysis**: $SUMMARY
|
||||
**Confidence**: $CONFIDENCE
|
||||
|
||||
$ACTION
|
||||
|
||||
[View workflow run](${{ github.event.workflow_run.html_url }})
|
||||
EOF
|
||||
)"
|
||||
fi
|
||||
Reference in New Issue
Block a user