feat: integrate claude-code-base-action as local subaction (#285)

* feat: integrate claude-code-base-action as local subaction

- Copy claude-code-base-action into base-action/ directory
- Update action.yml to reference ./base-action instead of external repo
- Preserve complete base action structure for future refactoring

This eliminates the external dependency while maintaining modularity.

* feat: consolidate CI workflows and add version bump workflow

- Move base-action test workflows to main .github/workflows/
- Update workflow references to use ./base-action
- Add CI jobs for base-action (test, typecheck, prettier)
- Add bump-claude-code-version workflow for base-action
- Remove redundant .github directory from base-action

This consolidates all CI workflows in one place while maintaining
full test coverage for both the main action and base-action.

* tsc

* copy again

* fix tests

* fix: use absolute path for base-action reference

Replace relative path ./base-action with ${{ github.action_path }}/base-action
to ensure the action works correctly when used in other repositories.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: inline base-action execution to support usage in other repos

Replace uses: ./base-action with direct shell execution since GitHub Actions
doesn't support dynamic paths in composite actions. This ensures the action
works correctly when used in other repositories.

Changes:
- Install Claude Code globally before execution
- Run base-action's index.ts directly with bun
- Pass all required INPUT_* environment variables
- Maintain base-action for future separate publishing

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Ashwin Bhat
2025-07-18 13:52:56 -07:00
committed by GitHub
parent 00b4a23551
commit 8335bda243
38 changed files with 3586 additions and 17 deletions

View File

@@ -0,0 +1,132 @@
name: Bump Claude Code Version
on:
repository_dispatch:
types: [bump_claude_code_version]
workflow_dispatch:
inputs:
version:
description: "Claude Code version to bump to"
required: true
type: string
permissions:
contents: write
jobs:
bump-version:
name: Bump Claude Code Version
runs-on: ubuntu-latest
environment: release
timeout-minutes: 5
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4
with:
token: ${{ secrets.RELEASE_PAT }}
fetch-depth: 0
- name: Get version from event payload
id: get_version
run: |
# Get version from either repository_dispatch or workflow_dispatch
if [ "${{ github.event_name }}" = "repository_dispatch" ]; then
NEW_VERSION="${CLIENT_PAYLOAD_VERSION}"
else
NEW_VERSION="${INPUT_VERSION}"
fi
# Sanitize the version to avoid issues enabled by problematic characters
NEW_VERSION=$(echo "$NEW_VERSION" | tr -d '`;$(){}[]|&<>' | tr -s ' ' '-')
if [ -z "$NEW_VERSION" ]; then
echo "Error: version not provided"
exit 1
fi
echo "NEW_VERSION=$NEW_VERSION" >> $GITHUB_ENV
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
env:
INPUT_VERSION: ${{ inputs.version }}
CLIENT_PAYLOAD_VERSION: ${{ github.event.client_payload.version }}
- name: Create branch and update base-action/action.yml
run: |
# Variables
TIMESTAMP=$(date +'%Y%m%d-%H%M%S')
BRANCH_NAME="bump-claude-code-${{ env.NEW_VERSION }}-$TIMESTAMP"
echo "BRANCH_NAME=$BRANCH_NAME" >> $GITHUB_ENV
# Get the default branch
DEFAULT_BRANCH=$(gh api repos/${GITHUB_REPOSITORY} --jq '.default_branch')
echo "DEFAULT_BRANCH=$DEFAULT_BRANCH" >> $GITHUB_ENV
# Get the latest commit SHA from the default branch
BASE_SHA=$(gh api repos/${GITHUB_REPOSITORY}/git/refs/heads/$DEFAULT_BRANCH --jq '.object.sha')
# Create a new branch
gh api \
--method POST \
repos/${GITHUB_REPOSITORY}/git/refs \
-f ref="refs/heads/$BRANCH_NAME" \
-f sha="$BASE_SHA"
# Get the current base-action/action.yml content
ACTION_CONTENT=$(gh api repos/${GITHUB_REPOSITORY}/contents/base-action/action.yml?ref=$DEFAULT_BRANCH --jq '.content' | base64 -d)
# Update the Claude Code version in the npm install command
UPDATED_CONTENT=$(echo "$ACTION_CONTENT" | sed -E "s/(npm install -g @anthropic-ai\/claude-code@)[0-9]+\.[0-9]+\.[0-9]+/\1${{ env.NEW_VERSION }}/")
# Verify the change would be made
if ! echo "$UPDATED_CONTENT" | grep -q "@anthropic-ai/claude-code@${{ env.NEW_VERSION }}"; then
echo "Error: Failed to update Claude Code version in content"
exit 1
fi
# Get the current SHA of base-action/action.yml for the update API call
FILE_SHA=$(gh api repos/${GITHUB_REPOSITORY}/contents/base-action/action.yml?ref=$DEFAULT_BRANCH --jq '.sha')
# Create the updated base-action/action.yml content in base64
echo "$UPDATED_CONTENT" | base64 > action.yml.b64
# Commit the updated base-action/action.yml via GitHub API
gh api \
--method PUT \
repos/${GITHUB_REPOSITORY}/contents/base-action/action.yml \
-f message="chore: bump Claude Code version to ${{ env.NEW_VERSION }}" \
-F content=@action.yml.b64 \
-f sha="$FILE_SHA" \
-f branch="$BRANCH_NAME"
echo "Successfully created branch and updated Claude Code version to ${{ env.NEW_VERSION }}"
env:
GH_TOKEN: ${{ secrets.RELEASE_PAT }}
GITHUB_REPOSITORY: ${{ github.repository }}
- name: Create Pull Request
run: |
# Determine trigger type for PR body
if [ "${{ github.event_name }}" = "repository_dispatch" ]; then
TRIGGER_INFO="repository dispatch event"
else
TRIGGER_INFO="manual workflow dispatch by @${GITHUB_ACTOR}"
fi
# Create PR body with proper YAML escape
printf -v PR_BODY "## Bump Claude Code to ${{ env.NEW_VERSION }}\n\nThis PR updates the Claude Code version in base-action/action.yml to ${{ env.NEW_VERSION }}.\n\n### Changes\n- Updated Claude Code version from current to \`${{ env.NEW_VERSION }}\`\n\n### Triggered by\n- $TRIGGER_INFO\n\n🤖 This PR was automatically created by the bump-claude-code-version workflow."
echo "Creating PR with gh pr create command"
PR_URL=$(gh pr create \
--repo "${GITHUB_REPOSITORY}" \
--title "chore: bump Claude Code version to ${{ env.NEW_VERSION }}" \
--body "$PR_BODY" \
--base "${DEFAULT_BRANCH}" \
--head "${BRANCH_NAME}")
echo "PR created successfully: $PR_URL"
env:
GH_TOKEN: ${{ secrets.RELEASE_PAT }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_ACTOR: ${{ github.actor }}
DEFAULT_BRANCH: ${{ env.DEFAULT_BRANCH }}
BRANCH_NAME: ${{ env.BRANCH_NAME }}

122
.github/workflows/test-base-action.yml vendored Normal file
View File

@@ -0,0 +1,122 @@
name: Test Claude Code Action
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
inputs:
test_prompt:
description: "Test prompt for Claude"
required: false
default: "List the files in the current directory starting with 'package'"
jobs:
test-inline-prompt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test with inline prompt
id: inline-test
uses: ./base-action
with:
prompt: ${{ github.event.inputs.test_prompt || 'List the files in the current directory starting with "package"' }}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "LS,Read"
timeout_minutes: "3"
- name: Verify inline prompt output
run: |
OUTPUT_FILE="${{ steps.inline-test.outputs.execution_file }}"
CONCLUSION="${{ steps.inline-test.outputs.conclusion }}"
echo "Conclusion: $CONCLUSION"
echo "Output file: $OUTPUT_FILE"
if [ "$CONCLUSION" = "success" ]; then
echo "✅ Action completed successfully"
else
echo "❌ Action failed"
exit 1
fi
if [ -f "$OUTPUT_FILE" ]; then
if [ -s "$OUTPUT_FILE" ]; then
echo "✅ Execution log file created successfully with content"
echo "Validating JSON format:"
if jq . "$OUTPUT_FILE" > /dev/null 2>&1; then
echo "✅ Output is valid JSON"
echo "Content preview:"
head -c 200 "$OUTPUT_FILE"
else
echo "❌ Output is not valid JSON"
exit 1
fi
else
echo "❌ Execution log file is empty"
exit 1
fi
else
echo "❌ Execution log file not found"
exit 1
fi
test-prompt-file:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Create test prompt file
run: |
cat > test-prompt.txt << EOF
${PROMPT}
EOF
env:
PROMPT: ${{ github.event.inputs.test_prompt || 'List the files in the current directory starting with "package"' }}
- name: Test with prompt file and allowed tools
id: prompt-file-test
uses: ./base-action
with:
prompt_file: "test-prompt.txt"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "LS,Read"
timeout_minutes: "3"
- name: Verify prompt file output
run: |
OUTPUT_FILE="${{ steps.prompt-file-test.outputs.execution_file }}"
CONCLUSION="${{ steps.prompt-file-test.outputs.conclusion }}"
echo "Conclusion: $CONCLUSION"
echo "Output file: $OUTPUT_FILE"
if [ "$CONCLUSION" = "success" ]; then
echo "✅ Action completed successfully"
else
echo "❌ Action failed"
exit 1
fi
if [ -f "$OUTPUT_FILE" ]; then
if [ -s "$OUTPUT_FILE" ]; then
echo "✅ Execution log file created successfully with content"
echo "Validating JSON format:"
if jq . "$OUTPUT_FILE" > /dev/null 2>&1; then
echo "✅ Output is valid JSON"
echo "Content preview:"
head -c 200 "$OUTPUT_FILE"
else
echo "❌ Output is not valid JSON"
exit 1
fi
else
echo "❌ Execution log file is empty"
exit 1
fi
else
echo "❌ Execution log file not found"
exit 1
fi

47
.github/workflows/test-claude-env.yml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Test Claude Env Feature
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
jobs:
test-claude-env-with-comments:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test with comments in env
id: comment-test
uses: ./base-action
with:
prompt: |
Use the Bash tool to run: echo "VAR1: $VAR1" && echo "VAR2: $VAR2"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_env: |
# This is a comment
VAR1: value1
# Another comment
VAR2: value2
# Empty lines above should be ignored
allowed_tools: "Bash(echo:*)"
timeout_minutes: "2"
- name: Verify comment handling
run: |
OUTPUT_FILE="${{ steps.comment-test.outputs.execution_file }}"
if [ "${{ steps.comment-test.outputs.conclusion }}" = "success" ]; then
echo "✅ Comments in claude_env handled correctly"
if grep -q "value1" "$OUTPUT_FILE" && grep -q "value2" "$OUTPUT_FILE"; then
echo "✅ Environment variables set correctly despite comments"
else
echo "❌ Environment variables not found"
exit 1
fi
else
echo "❌ Failed with comments in claude_env"
exit 1
fi

160
.github/workflows/test-mcp-servers.yml vendored Normal file
View File

@@ -0,0 +1,160 @@
name: Test MCP Servers
on:
push:
branches: [main]
pull_request:
branches: [main]
workflow_dispatch:
jobs:
test-mcp-integration:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4
- name: Setup Bun
uses: oven-sh/setup-bun@735343b667d3e6f658f44d0eca948eb6282f2b76 #v2
- name: Install dependencies
run: |
bun install
cd base-action/test/mcp-test
bun install
- name: Run Claude Code with MCP test
uses: ./base-action
id: claude-test
with:
prompt: "List all available tools"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
env:
# Change to test directory so it finds .mcp.json
CLAUDE_WORKING_DIR: ${{ github.workspace }}/base-action/test/mcp-test
- name: Check MCP server output
run: |
echo "Checking Claude output for MCP servers..."
# Parse the JSON output
OUTPUT_FILE="${RUNNER_TEMP}/claude-execution-output.json"
if [ ! -f "$OUTPUT_FILE" ]; then
echo "Error: Output file not found!"
exit 1
fi
echo "Output file contents:"
cat $OUTPUT_FILE
# Check if mcp_servers field exists in the init event
if jq -e '.[] | select(.type == "system" and .subtype == "init") | .mcp_servers' "$OUTPUT_FILE" > /dev/null; then
echo "✓ Found mcp_servers in output"
# Check if test-server is connected
if jq -e '.[] | select(.type == "system" and .subtype == "init") | .mcp_servers[] | select(.name == "test-server" and .status == "connected")' "$OUTPUT_FILE" > /dev/null; then
echo "✓ test-server is connected"
else
echo "✗ test-server not found or not connected"
jq '.[] | select(.type == "system" and .subtype == "init") | .mcp_servers' "$OUTPUT_FILE"
exit 1
fi
# Check if mcp tools are available
if jq -e '.[] | select(.type == "system" and .subtype == "init") | .tools[] | select(. == "mcp__test-server__test_tool")' "$OUTPUT_FILE" > /dev/null; then
echo "✓ MCP test tool found"
else
echo "✗ MCP test tool not found"
jq '.[] | select(.type == "system" and .subtype == "init") | .tools' "$OUTPUT_FILE"
exit 1
fi
else
echo "✗ No mcp_servers field found in init event"
jq '.[] | select(.type == "system" and .subtype == "init")' "$OUTPUT_FILE"
exit 1
fi
echo "✓ All MCP server checks passed!"
test-mcp-config-flag:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4
- name: Setup Bun
uses: oven-sh/setup-bun@735343b667d3e6f658f44d0eca948eb6282f2b76 #v2
- name: Install dependencies
run: |
bun install
cd base-action/test/mcp-test
bun install
- name: Debug environment paths (--mcp-config test)
run: |
echo "=== Environment Variables (--mcp-config test) ==="
echo "HOME: $HOME"
echo ""
echo "=== Expected Config Paths ==="
echo "GitHub action writes to: $HOME/.claude/settings.json"
echo "Claude should read from: $HOME/.claude/settings.json"
echo ""
echo "=== Actual File System ==="
ls -la $HOME/.claude/ || echo "No $HOME/.claude directory"
- name: Run Claude Code with --mcp-config flag
uses: ./base-action
id: claude-config-test
with:
prompt: "List all available tools"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
mcp_config: '{"mcpServers":{"test-server":{"type":"stdio","command":"bun","args":["simple-mcp-server.ts"],"env":{}}}}'
env:
# Change to test directory so bun can find the MCP server script
CLAUDE_WORKING_DIR: ${{ github.workspace }}/base-action/test/mcp-test
- name: Check MCP server output with --mcp-config
run: |
echo "Checking Claude output for MCP servers with --mcp-config flag..."
# Parse the JSON output
OUTPUT_FILE="${RUNNER_TEMP}/claude-execution-output.json"
if [ ! -f "$OUTPUT_FILE" ]; then
echo "Error: Output file not found!"
exit 1
fi
echo "Output file contents:"
cat $OUTPUT_FILE
# Check if mcp_servers field exists in the init event
if jq -e '.[] | select(.type == "system" and .subtype == "init") | .mcp_servers' "$OUTPUT_FILE" > /dev/null; then
echo "✓ Found mcp_servers in output"
# Check if test-server is connected
if jq -e '.[] | select(.type == "system" and .subtype == "init") | .mcp_servers[] | select(.name == "test-server" and .status == "connected")' "$OUTPUT_FILE" > /dev/null; then
echo "✓ test-server is connected"
else
echo "✗ test-server not found or not connected"
jq '.[] | select(.type == "system" and .subtype == "init") | .mcp_servers' "$OUTPUT_FILE"
exit 1
fi
# Check if mcp tools are available
if jq -e '.[] | select(.type == "system" and .subtype == "init") | .tools[] | select(. == "mcp__test-server__test_tool")' "$OUTPUT_FILE" > /dev/null; then
echo "✓ MCP test tool found"
else
echo "✗ MCP test tool not found"
jq '.[] | select(.type == "system" and .subtype == "init") | .tools' "$OUTPUT_FILE"
exit 1
fi
else
echo "✗ No mcp_servers field found in init event"
jq '.[] | select(.type == "system" and .subtype == "init")' "$OUTPUT_FILE"
exit 1
fi
echo "✓ All MCP server checks passed with --mcp-config flag!"

185
.github/workflows/test-settings.yml vendored Normal file
View File

@@ -0,0 +1,185 @@
name: Test Settings Feature
on:
push:
branches:
- main
pull_request:
workflow_dispatch:
jobs:
test-settings-inline-allow:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test with inline settings JSON (echo allowed)
id: inline-settings-test
uses: ./base-action
with:
prompt: |
Use Bash to echo "Hello from settings test"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
settings: |
{
"permissions": {
"allow": ["Bash(echo:*)"]
}
}
timeout_minutes: "2"
- name: Verify echo worked
run: |
OUTPUT_FILE="${{ steps.inline-settings-test.outputs.execution_file }}"
CONCLUSION="${{ steps.inline-settings-test.outputs.conclusion }}"
echo "Conclusion: $CONCLUSION"
if [ "$CONCLUSION" = "success" ]; then
echo "✅ Action completed successfully"
else
echo "❌ Action failed"
exit 1
fi
# Check that permission was NOT denied
if grep -q "Permission to use Bash with command echo.*has been denied" "$OUTPUT_FILE"; then
echo "❌ Echo command was denied when it should have been allowed"
cat "$OUTPUT_FILE"
exit 1
fi
# Check if the echo command worked
if grep -q "Hello from settings test" "$OUTPUT_FILE"; then
echo "✅ Bash echo command worked (allowed by permissions)"
else
echo "❌ Bash echo command didn't work"
cat "$OUTPUT_FILE"
exit 1
fi
test-settings-inline-deny:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test with inline settings JSON (echo denied)
id: inline-settings-test
uses: ./base-action
with:
prompt: |
Use Bash to echo "This should not work"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
settings: |
{
"permissions": {
"deny": ["Bash(echo:*)"]
}
}
timeout_minutes: "2"
- name: Verify echo was denied
run: |
OUTPUT_FILE="${{ steps.inline-settings-test.outputs.execution_file }}"
# Check that permission was denied in the tool_result
if grep -q "Permission to use Bash with command echo.*has been denied" "$OUTPUT_FILE"; then
echo "✅ Echo command was correctly denied by permissions"
else
echo "❌ Expected permission denied message not found"
cat "$OUTPUT_FILE"
exit 1
fi
test-settings-file-allow:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Create settings file (echo allowed)
run: |
cat > test-settings.json << EOF
{
"permissions": {
"allow": ["Bash(echo:*)"]
}
}
EOF
- name: Test with settings file
id: file-settings-test
uses: ./base-action
with:
prompt: |
Use Bash to echo "Hello from settings file test"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
settings: "test-settings.json"
timeout_minutes: "2"
- name: Verify echo worked
run: |
OUTPUT_FILE="${{ steps.file-settings-test.outputs.execution_file }}"
CONCLUSION="${{ steps.file-settings-test.outputs.conclusion }}"
echo "Conclusion: $CONCLUSION"
if [ "$CONCLUSION" = "success" ]; then
echo "✅ Action completed successfully"
else
echo "❌ Action failed"
exit 1
fi
# Check that permission was NOT denied
if grep -q "Permission to use Bash with command echo.*has been denied" "$OUTPUT_FILE"; then
echo "❌ Echo command was denied when it should have been allowed"
cat "$OUTPUT_FILE"
exit 1
fi
# Check if the echo command worked
if grep -q "Hello from settings file test" "$OUTPUT_FILE"; then
echo "✅ Bash echo command worked (allowed by permissions)"
else
echo "❌ Bash echo command didn't work"
cat "$OUTPUT_FILE"
exit 1
fi
test-settings-file-deny:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Create settings file (echo denied)
run: |
cat > test-settings.json << EOF
{
"permissions": {
"deny": ["Bash(echo:*)"]
}
}
EOF
- name: Test with settings file
id: file-settings-test
uses: ./base-action
with:
prompt: |
Use Bash to echo "This should not work from file"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
settings: "test-settings.json"
timeout_minutes: "2"
- name: Verify echo was denied
run: |
OUTPUT_FILE="${{ steps.file-settings-test.outputs.execution_file }}"
# Check that permission was denied in the tool_result
if grep -q "Permission to use Bash with command echo.*has been denied" "$OUTPUT_FILE"; then
echo "✅ Echo command was correctly denied by permissions"
else
echo "❌ Expected permission denied message not found"
cat "$OUTPUT_FILE"
exit 1
fi

View File

@@ -185,30 +185,41 @@ runs:
- name: Run Claude Code
id: claude-code
if: steps.prepare.outputs.contains_trigger == 'true'
uses: anthropics/claude-code-base-action@03e2a2d6923a9187c8e93b04ef2f8dae3219d0b1 # v0.0.36
with:
prompt_file: ${{ runner.temp }}/claude-prompts/claude-prompt.txt
allowed_tools: ${{ env.ALLOWED_TOOLS }}
disallowed_tools: ${{ env.DISALLOWED_TOOLS }}
timeout_minutes: ${{ inputs.timeout_minutes }}
max_turns: ${{ inputs.max_turns }}
model: ${{ inputs.model || inputs.anthropic_model }}
fallback_model: ${{ inputs.fallback_model }}
mcp_config: ${{ steps.prepare.outputs.mcp_config }}
use_bedrock: ${{ inputs.use_bedrock }}
use_vertex: ${{ inputs.use_vertex }}
anthropic_api_key: ${{ inputs.anthropic_api_key }}
claude_code_oauth_token: ${{ inputs.claude_code_oauth_token }}
claude_env: ${{ inputs.claude_env }}
settings: ${{ inputs.settings }}
shell: bash
run: |
# Install Claude Code globally
npm install -g @anthropic-ai/claude-code@1.0.53
# Run the base-action
cd ${GITHUB_ACTION_PATH}/base-action
bun install
bun run src/index.ts
env:
# Base-action inputs
CLAUDE_CODE_ACTION: "1"
INPUT_PROMPT_FILE: ${{ runner.temp }}/claude-prompts/claude-prompt.txt
INPUT_ALLOWED_TOOLS: ${{ env.ALLOWED_TOOLS }}
INPUT_DISALLOWED_TOOLS: ${{ env.DISALLOWED_TOOLS }}
INPUT_MAX_TURNS: ${{ inputs.max_turns }}
INPUT_MCP_CONFIG: ${{ steps.prepare.outputs.mcp_config }}
INPUT_SETTINGS: ${{ inputs.settings }}
INPUT_SYSTEM_PROMPT: ""
INPUT_APPEND_SYSTEM_PROMPT: ""
INPUT_TIMEOUT_MINUTES: ${{ inputs.timeout_minutes }}
INPUT_CLAUDE_ENV: ${{ inputs.claude_env }}
INPUT_FALLBACK_MODEL: ${{ inputs.fallback_model }}
# Model configuration
ANTHROPIC_MODEL: ${{ inputs.model || inputs.anthropic_model }}
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
NODE_VERSION: ${{ env.NODE_VERSION }}
# Provider configuration
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
CLAUDE_CODE_OAUTH_TOKEN: ${{ inputs.claude_code_oauth_token }}
ANTHROPIC_BASE_URL: ${{ env.ANTHROPIC_BASE_URL }}
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
# AWS configuration
AWS_REGION: ${{ env.AWS_REGION }}

4
base-action/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
.DS_Store
node_modules
**/.claude/settings.local.json

2
base-action/.npmrc Normal file
View File

@@ -0,0 +1,2 @@
engine-strict=true
registry=https://registry.npmjs.org/

1
base-action/.prettierrc Normal file
View File

@@ -0,0 +1 @@
{}

60
base-action/CLAUDE.md Normal file
View File

@@ -0,0 +1,60 @@
# CLAUDE.md
## Common Commands
### Development Commands
- Build/Type check: `bun run typecheck`
- Format code: `bun run format`
- Check formatting: `bun run format:check`
- Run tests: `bun test`
- Install dependencies: `bun install`
### Action Testing
- Test action locally: `./test-local.sh`
- Test specific file: `bun test test/prepare-prompt.test.ts`
## Architecture Overview
This is a GitHub Action that allows running Claude Code within GitHub workflows. The action consists of:
### Core Components
1. **Action Definition** (`action.yml`): Defines inputs, outputs, and the composite action steps
2. **Prompt Preparation** (`src/index.ts`): Runs Claude Code with specified arguments
### Key Design Patterns
- Uses Bun runtime for development and execution
- Named pipes for IPC between prompt input and Claude process
- JSON streaming output format for execution logs
- Composite action pattern to orchestrate multiple steps
- Provider-agnostic design supporting Anthropic API, AWS Bedrock, and Google Vertex AI
## Provider Authentication
1. **Anthropic API** (default): Requires API key via `anthropic_api_key` input
2. **AWS Bedrock**: Uses OIDC authentication when `use_bedrock: true`
3. **Google Vertex AI**: Uses OIDC authentication when `use_vertex: true`
## Testing Strategy
### Local Testing
- Use `act` tool to run GitHub Actions workflows locally
- `test-local.sh` script automates local testing setup
- Requires `ANTHROPIC_API_KEY` environment variable
### Test Structure
- Unit tests for configuration logic
- Integration tests for prompt preparation
- Full workflow tests in `.github/workflows/test-action.yml`
## Important Technical Details
- Uses `mkfifo` to create named pipes for prompt input
- Outputs execution logs as JSON to `/tmp/claude-execution-output.json`
- Timeout enforcement via `timeout` command wrapper
- Strict TypeScript configuration with Bun-specific settings

View File

@@ -0,0 +1,128 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or
advances of any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email
address, without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
claude-code-action-coc@anthropic.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

136
base-action/CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,136 @@
# Contributing to Claude Code Base Action
Thank you for your interest in contributing to Claude Code Base Action! This document provides guidelines and instructions for contributing to the project.
## Getting Started
### Prerequisites
- [Bun](https://bun.sh/) runtime
- [Docker](https://www.docker.com/) (for running GitHub Actions locally)
- [act](https://github.com/nektos/act) (installed automatically by our test script)
- An Anthropic API key (for testing)
### Setup
1. Fork the repository on GitHub and clone your fork:
```bash
git clone https://github.com/your-username/claude-code-base-action.git
cd claude-code-base-action
```
2. Install dependencies:
```bash
bun install
```
3. Set up your Anthropic API key:
```bash
export ANTHROPIC_API_KEY="your-api-key-here"
```
## Development
### Available Scripts
- `bun test` - Run all tests
- `bun run typecheck` - Type check the code
- `bun run format` - Format code with Prettier
- `bun run format:check` - Check code formatting
## Testing
### Running Tests Locally
1. **Unit Tests**:
```bash
bun test
```
2. **Integration Tests** (using GitHub Actions locally):
```bash
./test-local.sh
```
This script:
- Installs `act` if not present (requires Homebrew on macOS)
- Runs the GitHub Action workflow locally using Docker
- Requires your `ANTHROPIC_API_KEY` to be set
On Apple Silicon Macs, the script automatically adds the `--container-architecture linux/amd64` flag to avoid compatibility issues.
## Pull Request Process
1. Create a new branch from `main`:
```bash
git checkout -b feature/your-feature-name
```
2. Make your changes and commit them:
```bash
git add .
git commit -m "feat: add new feature"
```
3. Run tests and formatting:
```bash
bun test
bun run typecheck
bun run format:check
```
4. Push your branch and create a Pull Request:
```bash
git push origin feature/your-feature-name
```
5. Ensure all CI checks pass
6. Request review from maintainers
## Action Development
### Testing Your Changes
When modifying the action:
1. Test locally with the test script:
```bash
./test-local.sh
```
2. Test in a real GitHub Actions workflow by:
- Creating a test repository
- Using your branch as the action source:
```yaml
uses: your-username/claude-code-base-action@your-branch
```
### Debugging
- Use `console.log` for debugging in development
- Check GitHub Actions logs for runtime issues
- Use `act` with `-v` flag for verbose output:
```bash
act push -v --secret ANTHROPIC_API_KEY="$ANTHROPIC_API_KEY"
```
## Common Issues
### Docker Issues
Make sure Docker is running before using `act`. You can check with:
```bash
docker ps
```

21
base-action/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Anthropic, PBC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

523
base-action/README.md Normal file
View File

@@ -0,0 +1,523 @@
# Claude Code Base Action
This GitHub Action allows you to run [Claude Code](https://www.anthropic.com/claude-code) within your GitHub Actions workflows. You can use this to build any custom workflow on top of Claude Code.
For simply tagging @claude in issues and PRs out of the box, [check out the Claude Code action and GitHub app](https://github.com/anthropics/claude-code-action).
## Usage
Add the following to your workflow file:
```yaml
# Using a direct prompt
- name: Run Claude Code with direct prompt
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Or using a prompt from a file
- name: Run Claude Code with prompt file
uses: anthropics/claude-code-base-action@beta
with:
prompt_file: "/path/to/prompt.txt"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Or limiting the conversation turns
- name: Run Claude Code with limited turns
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
max_turns: "5" # Limit conversation to 5 turns
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Using custom system prompts
- name: Run Claude Code with custom system prompt
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Build a REST API"
system_prompt: "You are a senior backend engineer. Focus on security, performance, and maintainability."
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Or appending to the default system prompt
- name: Run Claude Code with appended system prompt
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Create a database schema"
append_system_prompt: "After writing code, be sure to code review yourself."
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Using custom environment variables
- name: Run Claude Code with custom environment variables
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Deploy to staging environment"
claude_env: |
ENVIRONMENT: staging
API_URL: https://api-staging.example.com
DEBUG: true
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Using fallback model for handling API errors
- name: Run Claude Code with fallback model
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Review and fix TypeScript errors"
model: "claude-opus-4-20250514"
fallback_model: "claude-sonnet-4-20250514"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# Using OAuth token instead of API key
- name: Run Claude Code with OAuth token
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Update dependencies"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
claude_code_oauth_token: ${{ secrets.CLAUDE_CODE_OAUTH_TOKEN }}
```
## Inputs
| Input | Description | Required | Default |
| ------------------------- | ------------------------------------------------------------------------------------------------- | -------- | ---------------------------- |
| `prompt` | The prompt to send to Claude Code | No\* | '' |
| `prompt_file` | Path to a file containing the prompt to send to Claude Code | No\* | '' |
| `allowed_tools` | Comma-separated list of allowed tools for Claude Code to use | No | '' |
| `disallowed_tools` | Comma-separated list of disallowed tools that Claude Code cannot use | No | '' |
| `max_turns` | Maximum number of conversation turns (default: no limit) | No | '' |
| `mcp_config` | Path to the MCP configuration JSON file, or MCP configuration JSON string | No | '' |
| `settings` | Path to Claude Code settings JSON file, or settings JSON string | No | '' |
| `system_prompt` | Override system prompt | No | '' |
| `append_system_prompt` | Append to system prompt | No | '' |
| `claude_env` | Custom environment variables to pass to Claude Code execution (YAML multiline format) | No | '' |
| `model` | Model to use (provider-specific format required for Bedrock/Vertex) | No | 'claude-4-0-sonnet-20250219' |
| `anthropic_model` | DEPRECATED: Use 'model' instead | No | 'claude-4-0-sonnet-20250219' |
| `fallback_model` | Enable automatic fallback to specified model when default model is overloaded | No | '' |
| `timeout_minutes` | Timeout in minutes for Claude Code execution | No | '10' |
| `anthropic_api_key` | Anthropic API key (required for direct Anthropic API) | No | '' |
| `claude_code_oauth_token` | Claude Code OAuth token (alternative to anthropic_api_key) | No | '' |
| `use_bedrock` | Use Amazon Bedrock with OIDC authentication instead of direct Anthropic API | No | 'false' |
| `use_vertex` | Use Google Vertex AI with OIDC authentication instead of direct Anthropic API | No | 'false' |
| `use_node_cache` | Whether to use Node.js dependency caching (set to true only for Node.js projects with lock files) | No | 'false' |
\*Either `prompt` or `prompt_file` must be provided, but not both.
## Outputs
| Output | Description |
| ---------------- | ---------------------------------------------------------- |
| `conclusion` | Execution status of Claude Code ('success' or 'failure') |
| `execution_file` | Path to the JSON file containing Claude Code execution log |
## Environment Variables
The following environment variables can be used to configure the action:
| Variable | Description | Default |
| -------------- | ----------------------------------------------------- | ------- |
| `NODE_VERSION` | Node.js version to use (e.g., '18.x', '20.x', '22.x') | '18.x' |
Example usage:
```yaml
- name: Run Claude Code with Node.js 20
uses: anthropics/claude-code-base-action@beta
env:
NODE_VERSION: "20.x"
with:
prompt: "Your prompt here"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
## Custom Environment Variables
You can pass custom environment variables to Claude Code execution using the `claude_env` input. This allows Claude to access environment-specific configuration during its execution.
The `claude_env` input accepts YAML multiline format with key-value pairs:
```yaml
- name: Deploy with custom environment
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Deploy the application to the staging environment"
claude_env: |
ENVIRONMENT: staging
API_BASE_URL: https://api-staging.example.com
DATABASE_URL: ${{ secrets.STAGING_DB_URL }}
DEBUG: true
LOG_LEVEL: debug
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
### Features:
- **YAML Format**: Use standard YAML key-value syntax (`KEY: value`)
- **Multiline Support**: Define multiple environment variables in a single input
- **Comments**: Lines starting with `#` are ignored
- **GitHub Secrets**: Can reference GitHub secrets using `${{ secrets.SECRET_NAME }}`
- **Runtime Access**: Environment variables are available to Claude during execution
### Example Use Cases:
```yaml
# Development configuration
claude_env: |
NODE_ENV: development
API_URL: http://localhost:3000
DEBUG: true
# Production deployment
claude_env: |
NODE_ENV: production
API_URL: https://api.example.com
DATABASE_URL: ${{ secrets.PROD_DB_URL }}
REDIS_URL: ${{ secrets.REDIS_URL }}
# Feature flags and configuration
claude_env: |
FEATURE_NEW_UI: enabled
MAX_RETRIES: 3
TIMEOUT_MS: 5000
```
## Using Settings Configuration
You can provide Claude Code settings configuration in two ways:
### Option 1: Settings Configuration File
Provide a path to a JSON file containing Claude Code settings:
```yaml
- name: Run Claude Code with settings file
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
settings: "path/to/settings.json"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
### Option 2: Inline Settings Configuration
Provide the settings configuration directly as a JSON string:
```yaml
- name: Run Claude Code with inline settings
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
settings: |
{
"model": "claude-opus-4-20250514",
"env": {
"DEBUG": "true",
"API_URL": "https://api.example.com"
},
"permissions": {
"allow": ["Bash", "Read"],
"deny": ["WebFetch"]
},
"hooks": {
"PreToolUse": [{
"matcher": "Bash",
"hooks": [{
"type": "command",
"command": "echo Running bash command..."
}]
}]
}
}
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
The settings file supports all Claude Code settings options including:
- `model`: Override the default model
- `env`: Environment variables for the session
- `permissions`: Tool usage permissions
- `hooks`: Pre/post tool execution hooks
- `includeCoAuthoredBy`: Include co-authored-by in git commits
- And more...
**Note**: The `enableAllProjectMcpServers` setting is always set to `true` by this action to ensure MCP servers work correctly.
## Using MCP Config
You can provide MCP configuration in two ways:
### Option 1: MCP Configuration File
Provide a path to a JSON file containing MCP configuration:
```yaml
- name: Run Claude Code with MCP config file
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
mcp_config: "path/to/mcp-config.json"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
### Option 2: Inline MCP Configuration
Provide the MCP configuration directly as a JSON string:
```yaml
- name: Run Claude Code with inline MCP config
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
mcp_config: |
{
"mcpServers": {
"server-name": {
"command": "node",
"args": ["./server.js"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
The MCP config file should follow this format:
```json
{
"mcpServers": {
"server-name": {
"command": "node",
"args": ["./server.js"],
"env": {
"API_KEY": "your-api-key"
}
}
}
}
```
You can combine MCP config with other inputs like allowed tools:
```yaml
# Using multiple inputs together
- name: Run Claude Code with MCP and custom tools
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Access the custom MCP server and use its tools"
mcp_config: "mcp-config.json"
allowed_tools: "Bash(git:*),View,mcp__server-name__custom_tool"
timeout_minutes: "15"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
## Example: PR Code Review
```yaml
name: Claude Code Review
on:
pull_request:
types: [opened, synchronize]
jobs:
code-review:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Run Code Review with Claude
id: code-review
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Review the PR changes. Focus on code quality, potential bugs, and performance issues. Suggest improvements where appropriate. Write your review as markdown text."
allowed_tools: "Bash(git diff --name-only HEAD~1),Bash(git diff HEAD~1),View,GlobTool,GrepTool,Write"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
- name: Extract and Comment PR Review
if: steps.code-review.outputs.conclusion == 'success'
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const executionFile = '${{ steps.code-review.outputs.execution_file }}';
const executionLog = JSON.parse(fs.readFileSync(executionFile, 'utf8'));
// Extract the review content from the execution log
// The execution log contains the full conversation including Claude's responses
let review = '';
// Find the last assistant message which should contain the review
for (let i = executionLog.length - 1; i >= 0; i--) {
if (executionLog[i].role === 'assistant') {
review = executionLog[i].content;
break;
}
}
if (review) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: "## Claude Code Review\n\n" + review + "\n\n*Generated by Claude Code*"
});
}
```
Check out additional examples in [`./examples`](./examples).
## Using Cloud Providers
You can authenticate with Claude using any of these methods:
1. Direct Anthropic API (default) - requires API key or OAuth token
2. Amazon Bedrock - requires OIDC authentication and automatically uses cross-region inference profiles
3. Google Vertex AI - requires OIDC authentication
**Note**:
- Bedrock and Vertex use OIDC authentication exclusively
- AWS Bedrock automatically uses cross-region inference profiles for certain models
- For cross-region inference profile models, you need to request and be granted access to the Claude models in all regions that the inference profile uses
- The Bedrock API endpoint URL is automatically constructed using the AWS_REGION environment variable (e.g., `https://bedrock-runtime.us-west-2.amazonaws.com`)
- You can override the Bedrock API endpoint URL by setting the `ANTHROPIC_BEDROCK_BASE_URL` environment variable
### Model Configuration
Use provider-specific model names based on your chosen provider:
```yaml
# For direct Anthropic API (default)
- name: Run Claude Code with Anthropic API
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
model: "claude-3-7-sonnet-20250219"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
# For Amazon Bedrock (requires OIDC authentication)
- name: Configure AWS Credentials (OIDC)
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: us-west-2
- name: Run Claude Code with Bedrock
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
model: "anthropic.claude-3-7-sonnet-20250219-v1:0"
use_bedrock: "true"
# For Google Vertex AI (requires OIDC authentication)
- name: Authenticate to Google Cloud
uses: google-github-actions/auth@v2
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: Run Claude Code with Vertex AI
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
model: "claude-3-7-sonnet@20250219"
use_vertex: "true"
```
## Example: Using OIDC Authentication for AWS Bedrock
This example shows how to use OIDC authentication with AWS Bedrock:
```yaml
- name: Configure AWS Credentials (OIDC)
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: us-west-2
- name: Run Claude Code with AWS OIDC
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
use_bedrock: "true"
model: "anthropic.claude-3-7-sonnet-20250219-v1:0"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
```
## Example: Using OIDC Authentication for GCP Vertex AI
This example shows how to use OIDC authentication with GCP Vertex AI:
```yaml
- name: Authenticate to Google Cloud
uses: google-github-actions/auth@v2
with:
workload_identity_provider: ${{ secrets.GCP_WORKLOAD_IDENTITY_PROVIDER }}
service_account: ${{ secrets.GCP_SERVICE_ACCOUNT }}
- name: Run Claude Code with GCP OIDC
uses: anthropics/claude-code-base-action@beta
with:
prompt: "Your prompt here"
use_vertex: "true"
model: "claude-3-7-sonnet@20250219"
allowed_tools: "Bash(git:*),View,GlobTool,GrepTool,BatchTool"
```
## Security Best Practices
**⚠️ IMPORTANT: Never commit API keys directly to your repository! Always use GitHub Actions secrets.**
To securely use your Anthropic API key:
1. Add your API key as a repository secret:
- Go to your repository's Settings
- Navigate to "Secrets and variables" → "Actions"
- Click "New repository secret"
- Name it `ANTHROPIC_API_KEY`
- Paste your API key as the value
2. Reference the secret in your workflow:
```yaml
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
**Never do this:**
```yaml
# ❌ WRONG - Exposes your API key
anthropic_api_key: "sk-ant-..."
```
**Always do this:**
```yaml
# ✅ CORRECT - Uses GitHub secrets
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
```
This applies to all sensitive values including API keys, access tokens, and credentials.
We also recommend that you always use short-lived tokens when possible
## License
This project is licensed under the MIT License—see the LICENSE file for details.

166
base-action/action.yml Normal file
View File

@@ -0,0 +1,166 @@
name: "Claude Code Base Action"
description: "Run Claude Code in GitHub Actions workflows"
branding:
icon: "code"
color: "orange"
inputs:
# Claude Code arguments
prompt:
description: "The prompt to send to Claude Code (mutually exclusive with prompt_file)"
required: false
default: ""
prompt_file:
description: "Path to a file containing the prompt to send to Claude Code (mutually exclusive with prompt)"
required: false
default: ""
allowed_tools:
description: "Comma-separated list of allowed tools for Claude Code to use"
required: false
default: ""
disallowed_tools:
description: "Comma-separated list of disallowed tools that Claude Code cannot use"
required: false
default: ""
max_turns:
description: "Maximum number of conversation turns (default: no limit)"
required: false
default: ""
mcp_config:
description: "MCP configuration as JSON string or path to MCP configuration JSON file"
required: false
default: ""
settings:
description: "Claude Code settings as JSON string or path to settings JSON file"
required: false
default: ""
system_prompt:
description: "Override system prompt"
required: false
default: ""
append_system_prompt:
description: "Append to system prompt"
required: false
default: ""
model:
description: "Model to use (provider-specific format required for Bedrock/Vertex)"
required: false
anthropic_model:
description: "DEPRECATED: Use 'model' instead. Model to use (provider-specific format required for Bedrock/Vertex)"
required: false
fallback_model:
description: "Enable automatic fallback to specified model when default model is unavailable"
required: false
claude_env:
description: "Custom environment variables to pass to Claude Code execution (YAML multiline format)"
required: false
default: ""
# Action settings
timeout_minutes:
description: "Timeout in minutes for Claude Code execution"
required: false
default: "10"
# Authentication settings
anthropic_api_key:
description: "Anthropic API key (required for direct Anthropic API)"
required: false
default: ""
claude_code_oauth_token:
description: "Claude Code OAuth token (alternative to anthropic_api_key)"
required: false
default: ""
use_bedrock:
description: "Use Amazon Bedrock with OIDC authentication instead of direct Anthropic API"
required: false
default: "false"
use_vertex:
description: "Use Google Vertex AI with OIDC authentication instead of direct Anthropic API"
required: false
default: "false"
use_node_cache:
description: "Whether to use Node.js dependency caching (set to true only for Node.js projects with lock files)"
required: false
default: "false"
outputs:
conclusion:
description: "Execution status of Claude Code ('success' or 'failure')"
value: ${{ steps.run_claude.outputs.conclusion }}
execution_file:
description: "Path to the JSON file containing Claude Code execution log"
value: ${{ steps.run_claude.outputs.execution_file }}
runs:
using: "composite"
steps:
- name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # https://github.com/actions/setup-node/releases/tag/v4.4.0
with:
node-version: ${{ env.NODE_VERSION || '18.x' }}
cache: ${{ inputs.use_node_cache == 'true' && 'npm' || '' }}
- name: Install Bun
uses: oven-sh/setup-bun@735343b667d3e6f658f44d0eca948eb6282f2b76 # https://github.com/oven-sh/setup-bun/releases/tag/v2.0.2
with:
bun-version: 1.2.11
- name: Install Dependencies
shell: bash
run: |
cd ${GITHUB_ACTION_PATH}
bun install
- name: Install Claude Code
shell: bash
run: npm install -g @anthropic-ai/claude-code@1.0.53
- name: Run Claude Code Action
shell: bash
id: run_claude
run: |
# Change to CLAUDE_WORKING_DIR if set (for running in custom directories)
if [ -n "$CLAUDE_WORKING_DIR" ]; then
echo "Changing directory to CLAUDE_WORKING_DIR: $CLAUDE_WORKING_DIR"
cd "$CLAUDE_WORKING_DIR"
fi
bun run ${GITHUB_ACTION_PATH}/src/index.ts
env:
# Model configuration
CLAUDE_CODE_ACTION: "1"
ANTHROPIC_MODEL: ${{ inputs.model || inputs.anthropic_model }}
INPUT_PROMPT: ${{ inputs.prompt }}
INPUT_PROMPT_FILE: ${{ inputs.prompt_file }}
INPUT_ALLOWED_TOOLS: ${{ inputs.allowed_tools }}
INPUT_DISALLOWED_TOOLS: ${{ inputs.disallowed_tools }}
INPUT_MAX_TURNS: ${{ inputs.max_turns }}
INPUT_MCP_CONFIG: ${{ inputs.mcp_config }}
INPUT_SETTINGS: ${{ inputs.settings }}
INPUT_SYSTEM_PROMPT: ${{ inputs.system_prompt }}
INPUT_APPEND_SYSTEM_PROMPT: ${{ inputs.append_system_prompt }}
INPUT_TIMEOUT_MINUTES: ${{ inputs.timeout_minutes }}
INPUT_CLAUDE_ENV: ${{ inputs.claude_env }}
INPUT_FALLBACK_MODEL: ${{ inputs.fallback_model }}
# Provider configuration
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
CLAUDE_CODE_OAUTH_TOKEN: ${{ inputs.claude_code_oauth_token }}
ANTHROPIC_BASE_URL: ${{ env.ANTHROPIC_BASE_URL }}
# Only set provider flags if explicitly true, since any value (including "false") is truthy
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
# AWS configuration
AWS_REGION: ${{ env.AWS_REGION }}
AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }}
AWS_SESSION_TOKEN: ${{ env.AWS_SESSION_TOKEN }}
ANTHROPIC_BEDROCK_BASE_URL: ${{ env.ANTHROPIC_BEDROCK_BASE_URL || (env.AWS_REGION && format('https://bedrock-runtime.{0}.amazonaws.com', env.AWS_REGION)) }}
# GCP configuration
ANTHROPIC_VERTEX_PROJECT_ID: ${{ env.ANTHROPIC_VERTEX_PROJECT_ID }}
CLOUD_ML_REGION: ${{ env.CLOUD_ML_REGION }}
GOOGLE_APPLICATION_CREDENTIALS: ${{ env.GOOGLE_APPLICATION_CREDENTIALS }}
ANTHROPIC_VERTEX_BASE_URL: ${{ env.ANTHROPIC_VERTEX_BASE_URL }}

44
base-action/bun.lock Normal file
View File

@@ -0,0 +1,44 @@
{
"lockfileVersion": 1,
"workspaces": {
"": {
"name": "@anthropic-ai/claude-code-base-action",
"dependencies": {
"@actions/core": "^1.10.1",
},
"devDependencies": {
"@types/bun": "^1.2.12",
"@types/node": "^20.0.0",
"prettier": "3.5.3",
"typescript": "^5.8.3",
},
},
},
"packages": {
"@actions/core": ["@actions/core@1.11.1", "", { "dependencies": { "@actions/exec": "^1.1.1", "@actions/http-client": "^2.0.1" } }, "sha512-hXJCSrkwfA46Vd9Z3q4cpEpHB1rL5NG04+/rbqW9d3+CSvtB1tYe8UTpAlixa1vj0m/ULglfEK2UKxMGxCxv5A=="],
"@actions/exec": ["@actions/exec@1.1.1", "", { "dependencies": { "@actions/io": "^1.0.1" } }, "sha512-+sCcHHbVdk93a0XT19ECtO/gIXoxvdsgQLzb2fE2/5sIZmWQuluYyjPQtrtTHdU1YzTZ7bAPN4sITq2xi1679w=="],
"@actions/http-client": ["@actions/http-client@2.2.3", "", { "dependencies": { "tunnel": "^0.0.6", "undici": "^5.25.4" } }, "sha512-mx8hyJi/hjFvbPokCg4uRd4ZX78t+YyRPtnKWwIl+RzNaVuFpQHfmlGVfsKEJN8LwTCvL+DfVgAM04XaHkm6bA=="],
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
"@types/bun": ["@types/bun@1.2.12", "", { "dependencies": { "bun-types": "1.2.12" } }, "sha512-lY/GQTXDGsolT/TiH72p1tuyUORuRrdV7VwOTOjDOt8uTBJQOJc5zz3ufwwDl0VBaoxotSk4LdP0hhjLJ6ypIQ=="],
"@types/node": ["@types/node@20.17.32", "", { "dependencies": { "undici-types": "~6.19.2" } }, "sha512-zeMXFn8zQ+UkjK4ws0RiOC9EWByyW1CcVmLe+2rQocXRsGEDxUCwPEIVgpsGcLHS/P8JkT0oa3839BRABS0oPw=="],
"bun-types": ["bun-types@1.2.12", "", { "dependencies": { "@types/node": "*" } }, "sha512-tvWMx5vPqbRXgE8WUZI94iS1xAYs8bkqESR9cxBB1Wi+urvfTrF1uzuDgBHFAdO0+d2lmsbG3HmeKMvUyj6pWA=="],
"prettier": ["prettier@3.5.3", "", { "bin": "bin/prettier.cjs" }, "sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw=="],
"tunnel": ["tunnel@0.0.6", "", {}, "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg=="],
"typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
"undici": ["undici@5.29.0", "", { "dependencies": { "@fastify/busboy": "^2.0.0" } }, "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg=="],
"undici-types": ["undici-types@6.19.8", "", {}, "sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw=="],
}
}

View File

@@ -0,0 +1,108 @@
name: Claude Issue Triage Example
description: Run Claude Code for issue triage in GitHub Actions
on:
issues:
types: [opened]
jobs:
triage-issue:
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
contents: read
issues: write
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
with:
fetch-depth: 0
- name: Setup GitHub MCP Server
run: |
mkdir -p /tmp/mcp-config
cat > /tmp/mcp-config/mcp-servers.json << 'EOF'
{
"mcpServers": {
"github": {
"command": "docker",
"args": [
"run",
"-i",
"--rm",
"-e",
"GITHUB_PERSONAL_ACCESS_TOKEN",
"ghcr.io/github/github-mcp-server:sha-7aced2b"
],
"env": {
"GITHUB_PERSONAL_ACCESS_TOKEN": "${{ secrets.GITHUB_TOKEN }}"
}
}
}
}
EOF
- name: Create triage prompt
run: |
mkdir -p /tmp/claude-prompts
cat > /tmp/claude-prompts/triage-prompt.txt << 'EOF'
You're an issue triage assistant for GitHub issues. Your task is to analyze the issue and select appropriate labels from the provided list.
IMPORTANT: Don't post any comments or messages to the issue. Your only action should be to apply labels.
Issue Information:
- REPO: ${GITHUB_REPOSITORY}
- ISSUE_NUMBER: ${{ github.event.issue.number }}
TASK OVERVIEW:
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
2. Next, use the GitHub tools to get context about the issue:
- You have access to these tools:
- mcp__github__get_issue: Use this to retrieve the current issue's details including title, description, and existing labels
- mcp__github__get_issue_comments: Use this to read any discussion or additional context provided in the comments
- mcp__github__update_issue: Use this to apply labels to the issue (do not use this for commenting)
- mcp__github__search_issues: Use this to find similar issues that might provide context for proper categorization and to identify potential duplicate issues
- mcp__github__list_issues: Use this to understand patterns in how other issues are labeled
- Start by using mcp__github__get_issue to get the issue details
3. Analyze the issue content, considering:
- The issue title and description
- The type of issue (bug report, feature request, question, etc.)
- Technical areas mentioned
- Severity or priority indicators
- User impact
- Components affected
4. Select appropriate labels from the available labels list provided above:
- Choose labels that accurately reflect the issue's nature
- Be specific but comprehensive
- Select priority labels if you can determine urgency (high-priority, med-priority, or low-priority)
- Consider platform labels (android, ios) if applicable
- If you find similar issues using mcp__github__search_issues, consider using a "duplicate" label if appropriate. Only do so if the issue is a duplicate of another OPEN issue.
5. Apply the selected labels:
- Use mcp__github__update_issue to apply your selected labels
- DO NOT post any comments explaining your decision
- DO NOT communicate directly with users
- If no labels are clearly applicable, do not apply any labels
IMPORTANT GUIDELINES:
- Be thorough in your analysis
- Only select labels from the provided list above
- DO NOT post any comments to the issue
- Your ONLY action should be to apply labels using mcp__github__update_issue
- It's okay to not add any labels if none are clearly applicable
EOF
env:
GITHUB_REPOSITORY: ${{ github.repository }}
- name: Run Claude Code for Issue Triage
uses: anthropics/claude-code-base-action@beta
with:
prompt_file: /tmp/claude-prompts/triage-prompt.txt
allowed_tools: "Bash(gh label list),mcp__github__get_issue,mcp__github__get_issue_comments,mcp__github__update_issue,mcp__github__search_issues,mcp__github__list_issues"
mcp_config: /tmp/mcp-config/mcp-servers.json
timeout_minutes: "5"
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}

21
base-action/package.json Normal file
View File

@@ -0,0 +1,21 @@
{
"name": "@anthropic-ai/claude-code-base-action",
"version": "1.0.0",
"private": true,
"scripts": {
"format": "prettier --write .",
"format:check": "prettier --check .",
"install-hooks": "bun run scripts/install-hooks.sh",
"test": "bun test",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@actions/core": "^1.10.1"
},
"devDependencies": {
"@types/bun": "^1.2.12",
"@types/node": "^20.0.0",
"prettier": "3.5.3",
"typescript": "^5.8.3"
}
}

View File

@@ -0,0 +1,13 @@
#!/bin/sh
# Install git hooks
echo "Installing git hooks..."
# Make sure hooks directory exists
mkdir -p .git/hooks
# Install pre-push hook
cp scripts/pre-push .git/hooks/pre-push
chmod +x .git/hooks/pre-push
echo "Git hooks installed successfully!"

View File

@@ -0,0 +1,46 @@
#!/bin/sh
# Check if files need formatting before push
echo "Checking code formatting..."
# First check if any files need formatting
if ! bun run format:check; then
echo "Code formatting errors found. Running formatter..."
bun run format
# Check if there are any staged changes after formatting
if git diff --name-only --exit-code; then
echo "All files are now properly formatted."
else
echo ""
echo "ERROR: Code has been formatted but changes need to be committed!"
echo "Please commit the formatted files and try again."
echo ""
echo "The following files were modified:"
git diff --name-only
echo ""
exit 1
fi
else
echo "Code formatting is already correct."
fi
# Run type checking
echo "Running type checking..."
if ! bun run typecheck; then
echo "Type checking failed. Please fix the type errors and try again."
exit 1
else
echo "Type checking passed."
fi
# Run tests
echo "Running tests..."
if ! bun run test; then
echo "Tests failed. Please fix the failing tests and try again."
exit 1
else
echo "All tests passed."
fi
exit 0

39
base-action/src/index.ts Normal file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env bun
import * as core from "@actions/core";
import { preparePrompt } from "./prepare-prompt";
import { runClaude } from "./run-claude";
import { setupClaudeCodeSettings } from "./setup-claude-code-settings";
import { validateEnvironmentVariables } from "./validate-env";
async function run() {
try {
validateEnvironmentVariables();
await setupClaudeCodeSettings(process.env.INPUT_SETTINGS);
const promptConfig = await preparePrompt({
prompt: process.env.INPUT_PROMPT || "",
promptFile: process.env.INPUT_PROMPT_FILE || "",
});
await runClaude(promptConfig.path, {
allowedTools: process.env.INPUT_ALLOWED_TOOLS,
disallowedTools: process.env.INPUT_DISALLOWED_TOOLS,
maxTurns: process.env.INPUT_MAX_TURNS,
mcpConfig: process.env.INPUT_MCP_CONFIG,
systemPrompt: process.env.INPUT_SYSTEM_PROMPT,
appendSystemPrompt: process.env.INPUT_APPEND_SYSTEM_PROMPT,
claudeEnv: process.env.INPUT_CLAUDE_ENV,
fallbackModel: process.env.INPUT_FALLBACK_MODEL,
});
} catch (error) {
core.setFailed(`Action failed with error: ${error}`);
core.setOutput("conclusion", "failure");
process.exit(1);
}
}
if (import.meta.main) {
run();
}

View File

@@ -0,0 +1,82 @@
import { existsSync, statSync } from "fs";
import { mkdir, writeFile } from "fs/promises";
export type PreparePromptInput = {
prompt: string;
promptFile: string;
};
export type PreparePromptConfig = {
type: "file" | "inline";
path: string;
};
async function validateAndPreparePrompt(
input: PreparePromptInput,
): Promise<PreparePromptConfig> {
// Validate inputs
if (!input.prompt && !input.promptFile) {
throw new Error(
"Neither 'prompt' nor 'prompt_file' was provided. At least one is required.",
);
}
if (input.prompt && input.promptFile) {
throw new Error(
"Both 'prompt' and 'prompt_file' were provided. Please specify only one.",
);
}
// Handle prompt file
if (input.promptFile) {
if (!existsSync(input.promptFile)) {
throw new Error(`Prompt file '${input.promptFile}' does not exist.`);
}
// Validate that the file is not empty
const stats = statSync(input.promptFile);
if (stats.size === 0) {
throw new Error(
"Prompt file is empty. Please provide a non-empty prompt.",
);
}
return {
type: "file",
path: input.promptFile,
};
}
// Handle inline prompt
if (!input.prompt || input.prompt.trim().length === 0) {
throw new Error("Prompt is empty. Please provide a non-empty prompt.");
}
const inlinePath = "/tmp/claude-action/prompt.txt";
return {
type: "inline",
path: inlinePath,
};
}
async function createTemporaryPromptFile(
prompt: string,
promptPath: string,
): Promise<void> {
// Create the directory path
const dirPath = promptPath.substring(0, promptPath.lastIndexOf("/"));
await mkdir(dirPath, { recursive: true });
await writeFile(promptPath, prompt);
}
export async function preparePrompt(
input: PreparePromptInput,
): Promise<PreparePromptConfig> {
const config = await validateAndPreparePrompt(input);
if (config.type === "inline") {
await createTemporaryPromptFile(input.prompt, config.path);
}
return config;
}

View File

@@ -0,0 +1,327 @@
import * as core from "@actions/core";
import { exec } from "child_process";
import { promisify } from "util";
import { unlink, writeFile, stat } from "fs/promises";
import { createWriteStream } from "fs";
import { spawn } from "child_process";
const execAsync = promisify(exec);
const PIPE_PATH = `${process.env.RUNNER_TEMP}/claude_prompt_pipe`;
const EXECUTION_FILE = `${process.env.RUNNER_TEMP}/claude-execution-output.json`;
const BASE_ARGS = ["-p", "--verbose", "--output-format", "stream-json"];
export type ClaudeOptions = {
allowedTools?: string;
disallowedTools?: string;
maxTurns?: string;
mcpConfig?: string;
systemPrompt?: string;
appendSystemPrompt?: string;
claudeEnv?: string;
fallbackModel?: string;
timeoutMinutes?: string;
};
type PreparedConfig = {
claudeArgs: string[];
promptPath: string;
env: Record<string, string>;
};
function parseCustomEnvVars(claudeEnv?: string): Record<string, string> {
if (!claudeEnv || claudeEnv.trim() === "") {
return {};
}
const customEnv: Record<string, string> = {};
// Split by lines and parse each line as KEY: VALUE
const lines = claudeEnv.split("\n");
for (const line of lines) {
const trimmedLine = line.trim();
if (trimmedLine === "" || trimmedLine.startsWith("#")) {
continue; // Skip empty lines and comments
}
const colonIndex = trimmedLine.indexOf(":");
if (colonIndex === -1) {
continue; // Skip lines without colons
}
const key = trimmedLine.substring(0, colonIndex).trim();
const value = trimmedLine.substring(colonIndex + 1).trim();
if (key) {
customEnv[key] = value;
}
}
return customEnv;
}
export function prepareRunConfig(
promptPath: string,
options: ClaudeOptions,
): PreparedConfig {
const claudeArgs = [...BASE_ARGS];
if (options.allowedTools) {
claudeArgs.push("--allowedTools", options.allowedTools);
}
if (options.disallowedTools) {
claudeArgs.push("--disallowedTools", options.disallowedTools);
}
if (options.maxTurns) {
const maxTurnsNum = parseInt(options.maxTurns, 10);
if (isNaN(maxTurnsNum) || maxTurnsNum <= 0) {
throw new Error(
`maxTurns must be a positive number, got: ${options.maxTurns}`,
);
}
claudeArgs.push("--max-turns", options.maxTurns);
}
if (options.mcpConfig) {
claudeArgs.push("--mcp-config", options.mcpConfig);
}
if (options.systemPrompt) {
claudeArgs.push("--system-prompt", options.systemPrompt);
}
if (options.appendSystemPrompt) {
claudeArgs.push("--append-system-prompt", options.appendSystemPrompt);
}
if (options.fallbackModel) {
claudeArgs.push("--fallback-model", options.fallbackModel);
}
if (options.timeoutMinutes) {
const timeoutMinutesNum = parseInt(options.timeoutMinutes, 10);
if (isNaN(timeoutMinutesNum) || timeoutMinutesNum <= 0) {
throw new Error(
`timeoutMinutes must be a positive number, got: ${options.timeoutMinutes}`,
);
}
}
// Parse custom environment variables
const customEnv = parseCustomEnvVars(options.claudeEnv);
return {
claudeArgs,
promptPath,
env: customEnv,
};
}
export async function runClaude(promptPath: string, options: ClaudeOptions) {
const config = prepareRunConfig(promptPath, options);
// Create a named pipe
try {
await unlink(PIPE_PATH);
} catch (e) {
// Ignore if file doesn't exist
}
// Create the named pipe
await execAsync(`mkfifo "${PIPE_PATH}"`);
// Log prompt file size
let promptSize = "unknown";
try {
const stats = await stat(config.promptPath);
promptSize = stats.size.toString();
} catch (e) {
// Ignore error
}
console.log(`Prompt file size: ${promptSize} bytes`);
// Log custom environment variables if any
if (Object.keys(config.env).length > 0) {
const envKeys = Object.keys(config.env).join(", ");
console.log(`Custom environment variables: ${envKeys}`);
}
// Output to console
console.log(`Running Claude with prompt from file: ${config.promptPath}`);
// Start sending prompt to pipe in background
const catProcess = spawn("cat", [config.promptPath], {
stdio: ["ignore", "pipe", "inherit"],
});
const pipeStream = createWriteStream(PIPE_PATH);
catProcess.stdout.pipe(pipeStream);
catProcess.on("error", (error) => {
console.error("Error reading prompt file:", error);
pipeStream.destroy();
});
const claudeProcess = spawn("claude", config.claudeArgs, {
stdio: ["pipe", "pipe", "inherit"],
env: {
...process.env,
...config.env,
},
});
// Handle Claude process errors
claudeProcess.on("error", (error) => {
console.error("Error spawning Claude process:", error);
pipeStream.destroy();
});
// Capture output for parsing execution metrics
let output = "";
claudeProcess.stdout.on("data", (data) => {
const text = data.toString();
// Try to parse as JSON and pretty print if it's on a single line
const lines = text.split("\n");
lines.forEach((line: string, index: number) => {
if (line.trim() === "") return;
try {
// Check if this line is a JSON object
const parsed = JSON.parse(line);
const prettyJson = JSON.stringify(parsed, null, 2);
process.stdout.write(prettyJson);
if (index < lines.length - 1 || text.endsWith("\n")) {
process.stdout.write("\n");
}
} catch (e) {
// Not a JSON object, print as is
process.stdout.write(line);
if (index < lines.length - 1 || text.endsWith("\n")) {
process.stdout.write("\n");
}
}
});
output += text;
});
// Handle stdout errors
claudeProcess.stdout.on("error", (error) => {
console.error("Error reading Claude stdout:", error);
});
// Pipe from named pipe to Claude
const pipeProcess = spawn("cat", [PIPE_PATH]);
pipeProcess.stdout.pipe(claudeProcess.stdin);
// Handle pipe process errors
pipeProcess.on("error", (error) => {
console.error("Error reading from named pipe:", error);
claudeProcess.kill("SIGTERM");
});
// Wait for Claude to finish with timeout
let timeoutMs = 10 * 60 * 1000; // Default 10 minutes
if (options.timeoutMinutes) {
timeoutMs = parseInt(options.timeoutMinutes, 10) * 60 * 1000;
} else if (process.env.INPUT_TIMEOUT_MINUTES) {
const envTimeout = parseInt(process.env.INPUT_TIMEOUT_MINUTES, 10);
if (isNaN(envTimeout) || envTimeout <= 0) {
throw new Error(
`INPUT_TIMEOUT_MINUTES must be a positive number, got: ${process.env.INPUT_TIMEOUT_MINUTES}`,
);
}
timeoutMs = envTimeout * 60 * 1000;
}
const exitCode = await new Promise<number>((resolve) => {
let resolved = false;
// Set a timeout for the process
const timeoutId = setTimeout(() => {
if (!resolved) {
console.error(
`Claude process timed out after ${timeoutMs / 1000} seconds`,
);
claudeProcess.kill("SIGTERM");
// Give it 5 seconds to terminate gracefully, then force kill
setTimeout(() => {
try {
claudeProcess.kill("SIGKILL");
} catch (e) {
// Process may already be dead
}
}, 5000);
resolved = true;
resolve(124); // Standard timeout exit code
}
}, timeoutMs);
claudeProcess.on("close", (code) => {
if (!resolved) {
clearTimeout(timeoutId);
resolved = true;
resolve(code || 0);
}
});
claudeProcess.on("error", (error) => {
if (!resolved) {
console.error("Claude process error:", error);
clearTimeout(timeoutId);
resolved = true;
resolve(1);
}
});
});
// Clean up processes
try {
catProcess.kill("SIGTERM");
} catch (e) {
// Process may already be dead
}
try {
pipeProcess.kill("SIGTERM");
} catch (e) {
// Process may already be dead
}
// Clean up pipe file
try {
await unlink(PIPE_PATH);
} catch (e) {
// Ignore errors during cleanup
}
// Set conclusion based on exit code
if (exitCode === 0) {
// Try to process the output and save execution metrics
try {
await writeFile("output.txt", output);
// Process output.txt into JSON and save to execution file
const { stdout: jsonOutput } = await execAsync("jq -s '.' output.txt");
await writeFile(EXECUTION_FILE, jsonOutput);
console.log(`Log saved to ${EXECUTION_FILE}`);
} catch (e) {
core.warning(`Failed to process output for execution metrics: ${e}`);
}
core.setOutput("conclusion", "success");
core.setOutput("execution_file", EXECUTION_FILE);
} else {
core.setOutput("conclusion", "failure");
// Still try to save execution file if we have output
if (output) {
try {
await writeFile("output.txt", output);
const { stdout: jsonOutput } = await execAsync("jq -s '.' output.txt");
await writeFile(EXECUTION_FILE, jsonOutput);
core.setOutput("execution_file", EXECUTION_FILE);
} catch (e) {
// Ignore errors when processing output during failure
}
}
process.exit(exitCode);
}
}

View File

@@ -0,0 +1,68 @@
import { $ } from "bun";
import { homedir } from "os";
import { readFile } from "fs/promises";
export async function setupClaudeCodeSettings(
settingsInput?: string,
homeDir?: string,
) {
const home = homeDir ?? homedir();
const settingsPath = `${home}/.claude/settings.json`;
console.log(`Setting up Claude settings at: ${settingsPath}`);
// Ensure .claude directory exists
console.log(`Creating .claude directory...`);
await $`mkdir -p ${home}/.claude`.quiet();
let settings: Record<string, unknown> = {};
try {
const existingSettings = await $`cat ${settingsPath}`.quiet().text();
if (existingSettings.trim()) {
settings = JSON.parse(existingSettings);
console.log(
`Found existing settings:`,
JSON.stringify(settings, null, 2),
);
} else {
console.log(`Settings file exists but is empty`);
}
} catch (e) {
console.log(`No existing settings file found, creating new one`);
}
// Handle settings input (either file path or JSON string)
if (settingsInput && settingsInput.trim()) {
console.log(`Processing settings input...`);
let inputSettings: Record<string, unknown> = {};
try {
// First try to parse as JSON
inputSettings = JSON.parse(settingsInput);
console.log(`Parsed settings input as JSON`);
} catch (e) {
// If not JSON, treat as file path
console.log(
`Settings input is not JSON, treating as file path: ${settingsInput}`,
);
try {
const fileContent = await readFile(settingsInput, "utf-8");
inputSettings = JSON.parse(fileContent);
console.log(`Successfully read and parsed settings from file`);
} catch (fileError) {
console.error(`Failed to read or parse settings file: ${fileError}`);
throw new Error(`Failed to process settings input: ${fileError}`);
}
}
// Merge input settings with existing settings
settings = { ...settings, ...inputSettings };
console.log(`Merged settings with input settings`);
}
// Always set enableAllProjectMcpServers to true
settings.enableAllProjectMcpServers = true;
console.log(`Updated settings with enableAllProjectMcpServers: true`);
await $`echo ${JSON.stringify(settings, null, 2)} > ${settingsPath}`.quiet();
console.log(`Settings saved successfully`);
}

View File

@@ -0,0 +1,54 @@
/**
* Validates the environment variables required for running Claude Code
* based on the selected provider (Anthropic API, AWS Bedrock, or Google Vertex AI)
*/
export function validateEnvironmentVariables() {
const useBedrock = process.env.CLAUDE_CODE_USE_BEDROCK === "1";
const useVertex = process.env.CLAUDE_CODE_USE_VERTEX === "1";
const anthropicApiKey = process.env.ANTHROPIC_API_KEY;
const claudeCodeOAuthToken = process.env.CLAUDE_CODE_OAUTH_TOKEN;
const errors: string[] = [];
if (useBedrock && useVertex) {
errors.push(
"Cannot use both Bedrock and Vertex AI simultaneously. Please set only one provider.",
);
}
if (!useBedrock && !useVertex) {
if (!anthropicApiKey && !claudeCodeOAuthToken) {
errors.push(
"Either ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN is required when using direct Anthropic API.",
);
}
} else if (useBedrock) {
const requiredBedrockVars = {
AWS_REGION: process.env.AWS_REGION,
AWS_ACCESS_KEY_ID: process.env.AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY: process.env.AWS_SECRET_ACCESS_KEY,
};
Object.entries(requiredBedrockVars).forEach(([key, value]) => {
if (!value) {
errors.push(`${key} is required when using AWS Bedrock.`);
}
});
} else if (useVertex) {
const requiredVertexVars = {
ANTHROPIC_VERTEX_PROJECT_ID: process.env.ANTHROPIC_VERTEX_PROJECT_ID,
CLOUD_ML_REGION: process.env.CLOUD_ML_REGION,
};
Object.entries(requiredVertexVars).forEach(([key, value]) => {
if (!value) {
errors.push(`${key} is required when using Google Vertex AI.`);
}
});
}
if (errors.length > 0) {
const errorMessage = `Environment variable validation failed:\n${errors.map((e) => ` - ${e}`).join("\n")}`;
throw new Error(errorMessage);
}
}

12
base-action/test-local.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Install act if not already installed
if ! command -v act &> /dev/null; then
echo "Installing act..."
brew install act
fi
# Run the test workflow locally
# You'll need to provide your ANTHROPIC_API_KEY
echo "Running action locally with act..."
act push --secret ANTHROPIC_API_KEY="$ANTHROPIC_API_KEY" -W .github/workflows/test-action.yml --container-architecture linux/amd64

18
base-action/test-mcp-local.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Install act if not already installed
if ! command -v act &> /dev/null; then
echo "Installing act..."
brew install act
fi
# Check if ANTHROPIC_API_KEY is set
if [ -z "$ANTHROPIC_API_KEY" ]; then
echo "Error: ANTHROPIC_API_KEY environment variable is not set"
echo "Please export your API key: export ANTHROPIC_API_KEY='your-key-here'"
exit 1
fi
# Run the MCP test workflow locally
echo "Running MCP server test locally with act..."
act push --secret ANTHROPIC_API_KEY="$ANTHROPIC_API_KEY" -W .github/workflows/test-mcp-servers.yml --container-architecture linux/amd64

View File

@@ -0,0 +1,10 @@
{
"mcpServers": {
"test-server": {
"type": "stdio",
"command": "bun",
"args": ["simple-mcp-server.ts"],
"env": {}
}
}
}

View File

@@ -0,0 +1,2 @@
engine-strict=true
registry=https://registry.npmjs.org/

View File

@@ -0,0 +1,186 @@
{
"lockfileVersion": 1,
"workspaces": {
"": {
"name": "mcp-test",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.11.0",
},
},
},
"packages": {
"@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.12.0", "", { "dependencies": { "ajv": "^6.12.6", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.23.8", "zod-to-json-schema": "^3.24.1" } }, "sha512-m//7RlINx1F3sz3KqwY1WWzVgTcYX52HYk4bJ1hkBXV3zccAEth+jRvG8DBRrdaQuRsPAJOx2MH3zaHNCKL7Zg=="],
"accepts": ["accepts@2.0.0", "", { "dependencies": { "mime-types": "^3.0.0", "negotiator": "^1.0.0" } }, "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng=="],
"ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="],
"body-parser": ["body-parser@2.2.0", "", { "dependencies": { "bytes": "^3.1.2", "content-type": "^1.0.5", "debug": "^4.4.0", "http-errors": "^2.0.0", "iconv-lite": "^0.6.3", "on-finished": "^2.4.1", "qs": "^6.14.0", "raw-body": "^3.0.0", "type-is": "^2.0.0" } }, "sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg=="],
"bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="],
"call-bind-apply-helpers": ["call-bind-apply-helpers@1.0.2", "", { "dependencies": { "es-errors": "^1.3.0", "function-bind": "^1.1.2" } }, "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ=="],
"call-bound": ["call-bound@1.0.4", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "get-intrinsic": "^1.3.0" } }, "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg=="],
"content-disposition": ["content-disposition@1.0.0", "", { "dependencies": { "safe-buffer": "5.2.1" } }, "sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg=="],
"content-type": ["content-type@1.0.5", "", {}, "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="],
"cookie": ["cookie@0.7.2", "", {}, "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w=="],
"cookie-signature": ["cookie-signature@1.2.2", "", {}, "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg=="],
"cors": ["cors@2.8.5", "", { "dependencies": { "object-assign": "^4", "vary": "^1" } }, "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g=="],
"cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="],
"debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="],
"depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="],
"dunder-proto": ["dunder-proto@1.0.1", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.1", "es-errors": "^1.3.0", "gopd": "^1.2.0" } }, "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A=="],
"ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="],
"encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="],
"es-define-property": ["es-define-property@1.0.1", "", {}, "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g=="],
"es-errors": ["es-errors@1.3.0", "", {}, "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="],
"es-object-atoms": ["es-object-atoms@1.1.1", "", { "dependencies": { "es-errors": "^1.3.0" } }, "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA=="],
"escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="],
"etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="],
"eventsource": ["eventsource@3.0.7", "", { "dependencies": { "eventsource-parser": "^3.0.1" } }, "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA=="],
"eventsource-parser": ["eventsource-parser@3.0.2", "", {}, "sha512-6RxOBZ/cYgd8usLwsEl+EC09Au/9BcmCKYF2/xbml6DNczf7nv0MQb+7BA2F+li6//I+28VNlQR37XfQtcAJuA=="],
"express": ["express@5.1.0", "", { "dependencies": { "accepts": "^2.0.0", "body-parser": "^2.2.0", "content-disposition": "^1.0.0", "content-type": "^1.0.5", "cookie": "^0.7.1", "cookie-signature": "^1.2.1", "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "finalhandler": "^2.1.0", "fresh": "^2.0.0", "http-errors": "^2.0.0", "merge-descriptors": "^2.0.0", "mime-types": "^3.0.0", "on-finished": "^2.4.1", "once": "^1.4.0", "parseurl": "^1.3.3", "proxy-addr": "^2.0.7", "qs": "^6.14.0", "range-parser": "^1.2.1", "router": "^2.2.0", "send": "^1.1.0", "serve-static": "^2.2.0", "statuses": "^2.0.1", "type-is": "^2.0.1", "vary": "^1.1.2" } }, "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA=="],
"express-rate-limit": ["express-rate-limit@7.5.0", "", { "peerDependencies": { "express": "^4.11 || 5 || ^5.0.0-beta.1" } }, "sha512-eB5zbQh5h+VenMPM3fh+nw1YExi5nMr6HUCR62ELSP11huvxm/Uir1H1QEyTkk5QX6A58pX6NmaTMceKZ0Eodg=="],
"fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
"fast-json-stable-stringify": ["fast-json-stable-stringify@2.1.0", "", {}, "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="],
"finalhandler": ["finalhandler@2.1.0", "", { "dependencies": { "debug": "^4.4.0", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "on-finished": "^2.4.1", "parseurl": "^1.3.3", "statuses": "^2.0.1" } }, "sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q=="],
"forwarded": ["forwarded@0.2.0", "", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="],
"fresh": ["fresh@2.0.0", "", {}, "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A=="],
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
"get-intrinsic": ["get-intrinsic@1.3.0", "", { "dependencies": { "call-bind-apply-helpers": "^1.0.2", "es-define-property": "^1.0.1", "es-errors": "^1.3.0", "es-object-atoms": "^1.1.1", "function-bind": "^1.1.2", "get-proto": "^1.0.1", "gopd": "^1.2.0", "has-symbols": "^1.1.0", "hasown": "^2.0.2", "math-intrinsics": "^1.1.0" } }, "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ=="],
"get-proto": ["get-proto@1.0.1", "", { "dependencies": { "dunder-proto": "^1.0.1", "es-object-atoms": "^1.0.0" } }, "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g=="],
"gopd": ["gopd@1.2.0", "", {}, "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg=="],
"has-symbols": ["has-symbols@1.1.0", "", {}, "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ=="],
"hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
"http-errors": ["http-errors@2.0.0", "", { "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", "setprototypeof": "1.2.0", "statuses": "2.0.1", "toidentifier": "1.0.1" } }, "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ=="],
"iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="],
"inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="],
"ipaddr.js": ["ipaddr.js@1.9.1", "", {}, "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="],
"is-promise": ["is-promise@4.0.0", "", {}, "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ=="],
"isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="],
"json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
"math-intrinsics": ["math-intrinsics@1.1.0", "", {}, "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g=="],
"media-typer": ["media-typer@1.1.0", "", {}, "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw=="],
"merge-descriptors": ["merge-descriptors@2.0.0", "", {}, "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g=="],
"mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="],
"mime-types": ["mime-types@3.0.1", "", { "dependencies": { "mime-db": "^1.54.0" } }, "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA=="],
"ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
"negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="],
"object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="],
"object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
"on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="],
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
"parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="],
"path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="],
"path-to-regexp": ["path-to-regexp@8.2.0", "", {}, "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ=="],
"pkce-challenge": ["pkce-challenge@5.0.0", "", {}, "sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ=="],
"proxy-addr": ["proxy-addr@2.0.7", "", { "dependencies": { "forwarded": "0.2.0", "ipaddr.js": "1.9.1" } }, "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg=="],
"punycode": ["punycode@2.3.1", "", {}, "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg=="],
"qs": ["qs@6.14.0", "", { "dependencies": { "side-channel": "^1.1.0" } }, "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w=="],
"range-parser": ["range-parser@1.2.1", "", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="],
"raw-body": ["raw-body@3.0.0", "", { "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", "iconv-lite": "0.6.3", "unpipe": "1.0.0" } }, "sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g=="],
"router": ["router@2.2.0", "", { "dependencies": { "debug": "^4.4.0", "depd": "^2.0.0", "is-promise": "^4.0.0", "parseurl": "^1.3.3", "path-to-regexp": "^8.0.0" } }, "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ=="],
"safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
"safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="],
"send": ["send@1.2.0", "", { "dependencies": { "debug": "^4.3.5", "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "etag": "^1.8.1", "fresh": "^2.0.0", "http-errors": "^2.0.0", "mime-types": "^3.0.1", "ms": "^2.1.3", "on-finished": "^2.4.1", "range-parser": "^1.2.1", "statuses": "^2.0.1" } }, "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw=="],
"serve-static": ["serve-static@2.2.0", "", { "dependencies": { "encodeurl": "^2.0.0", "escape-html": "^1.0.3", "parseurl": "^1.3.3", "send": "^1.2.0" } }, "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ=="],
"setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="],
"shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="],
"shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="],
"side-channel": ["side-channel@1.1.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3", "side-channel-list": "^1.0.0", "side-channel-map": "^1.0.1", "side-channel-weakmap": "^1.0.2" } }, "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw=="],
"side-channel-list": ["side-channel-list@1.0.0", "", { "dependencies": { "es-errors": "^1.3.0", "object-inspect": "^1.13.3" } }, "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA=="],
"side-channel-map": ["side-channel-map@1.0.1", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3" } }, "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA=="],
"side-channel-weakmap": ["side-channel-weakmap@1.0.2", "", { "dependencies": { "call-bound": "^1.0.2", "es-errors": "^1.3.0", "get-intrinsic": "^1.2.5", "object-inspect": "^1.13.3", "side-channel-map": "^1.0.1" } }, "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A=="],
"statuses": ["statuses@2.0.1", "", {}, "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ=="],
"toidentifier": ["toidentifier@1.0.1", "", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="],
"type-is": ["type-is@2.0.1", "", { "dependencies": { "content-type": "^1.0.5", "media-typer": "^1.1.0", "mime-types": "^3.0.0" } }, "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw=="],
"unpipe": ["unpipe@1.0.0", "", {}, "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="],
"uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="],
"vary": ["vary@1.1.2", "", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="],
"which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="],
"wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
"zod": ["zod@3.25.32", "", {}, "sha512-OSm2xTIRfW8CV5/QKgngwmQW/8aPfGdaQFlrGoErlgg/Epm7cjb6K6VEyExfe65a3VybUOnu381edLb0dfJl0g=="],
"zod-to-json-schema": ["zod-to-json-schema@3.24.5", "", { "peerDependencies": { "zod": "^3.24.1" } }, "sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g=="],
}
}

View File

@@ -0,0 +1,7 @@
{
"name": "mcp-test",
"version": "1.0.0",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.11.0"
}
}

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bun
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
const server = new McpServer({
name: "test-server",
version: "1.0.0",
});
server.tool("test_tool", "A simple test tool", {}, async () => {
return {
content: [
{
type: "text",
text: "Test tool response",
},
],
};
});
async function runServer() {
const transport = new StdioServerTransport();
await server.connect(transport);
process.on("exit", () => {
server.close();
});
}
runServer().catch(console.error);

View File

@@ -0,0 +1,114 @@
#!/usr/bin/env bun
import { describe, test, expect, beforeEach, afterEach } from "bun:test";
import { preparePrompt, type PreparePromptInput } from "../src/prepare-prompt";
import { unlink, writeFile, readFile, stat } from "fs/promises";
describe("preparePrompt integration tests", () => {
beforeEach(async () => {
try {
await unlink("/tmp/claude-action/prompt.txt");
} catch {
// Ignore if file doesn't exist
}
});
afterEach(async () => {
try {
await unlink("/tmp/claude-action/prompt.txt");
} catch {
// Ignore if file doesn't exist
}
});
test("should create temporary prompt file when only prompt is provided", async () => {
const input: PreparePromptInput = {
prompt: "This is a test prompt",
promptFile: "",
};
const config = await preparePrompt(input);
expect(config.path).toBe("/tmp/claude-action/prompt.txt");
expect(config.type).toBe("inline");
const fileContent = await readFile(config.path, "utf-8");
expect(fileContent).toBe("This is a test prompt");
const fileStat = await stat(config.path);
expect(fileStat.size).toBeGreaterThan(0);
});
test("should use existing file when promptFile is provided", async () => {
const testFilePath = "/tmp/test-prompt.txt";
await writeFile(testFilePath, "Prompt from file");
const input: PreparePromptInput = {
prompt: "",
promptFile: testFilePath,
};
const config = await preparePrompt(input);
expect(config.path).toBe(testFilePath);
expect(config.type).toBe("file");
await unlink(testFilePath);
});
test("should fail when neither prompt nor promptFile is provided", async () => {
const input: PreparePromptInput = {
prompt: "",
promptFile: "",
};
await expect(preparePrompt(input)).rejects.toThrow(
"Neither 'prompt' nor 'prompt_file' was provided",
);
});
test("should fail when promptFile points to non-existent file", async () => {
const input: PreparePromptInput = {
prompt: "",
promptFile: "/tmp/non-existent-file.txt",
};
await expect(preparePrompt(input)).rejects.toThrow(
"Prompt file '/tmp/non-existent-file.txt' does not exist.",
);
});
test("should fail when prompt is empty", async () => {
const emptyFilePath = "/tmp/empty-prompt.txt";
await writeFile(emptyFilePath, "");
const input: PreparePromptInput = {
prompt: "",
promptFile: emptyFilePath,
};
await expect(preparePrompt(input)).rejects.toThrow("Prompt file is empty");
try {
await unlink(emptyFilePath);
} catch {
// Ignore cleanup errors
}
});
test("should fail when both prompt and promptFile are provided", async () => {
const testFilePath = "/tmp/test-prompt.txt";
await writeFile(testFilePath, "Prompt from file");
const input: PreparePromptInput = {
prompt: "This should cause an error",
promptFile: testFilePath,
};
await expect(preparePrompt(input)).rejects.toThrow(
"Both 'prompt' and 'prompt_file' were provided. Please specify only one.",
);
await unlink(testFilePath);
});
});

View File

@@ -0,0 +1,297 @@
#!/usr/bin/env bun
import { describe, test, expect } from "bun:test";
import { prepareRunConfig, type ClaudeOptions } from "../src/run-claude";
describe("prepareRunConfig", () => {
test("should prepare config with basic arguments", () => {
const options: ClaudeOptions = {};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs.slice(0, 4)).toEqual([
"-p",
"--verbose",
"--output-format",
"stream-json",
]);
});
test("should include promptPath", () => {
const options: ClaudeOptions = {};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.promptPath).toBe("/tmp/test-prompt.txt");
});
test("should include allowed tools in command arguments", () => {
const options: ClaudeOptions = {
allowedTools: "Bash,Read",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--allowedTools");
expect(prepared.claudeArgs).toContain("Bash,Read");
});
test("should include disallowed tools in command arguments", () => {
const options: ClaudeOptions = {
disallowedTools: "Bash,Read",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--disallowedTools");
expect(prepared.claudeArgs).toContain("Bash,Read");
});
test("should include max turns in command arguments", () => {
const options: ClaudeOptions = {
maxTurns: "5",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--max-turns");
expect(prepared.claudeArgs).toContain("5");
});
test("should include mcp config in command arguments", () => {
const options: ClaudeOptions = {
mcpConfig: "/path/to/mcp-config.json",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--mcp-config");
expect(prepared.claudeArgs).toContain("/path/to/mcp-config.json");
});
test("should include system prompt in command arguments", () => {
const options: ClaudeOptions = {
systemPrompt: "You are a senior backend engineer.",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--system-prompt");
expect(prepared.claudeArgs).toContain("You are a senior backend engineer.");
});
test("should include append system prompt in command arguments", () => {
const options: ClaudeOptions = {
appendSystemPrompt:
"After writing code, be sure to code review yourself.",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--append-system-prompt");
expect(prepared.claudeArgs).toContain(
"After writing code, be sure to code review yourself.",
);
});
test("should include fallback model in command arguments", () => {
const options: ClaudeOptions = {
fallbackModel: "claude-sonnet-4-20250514",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--fallback-model");
expect(prepared.claudeArgs).toContain("claude-sonnet-4-20250514");
});
test("should use provided prompt path", () => {
const options: ClaudeOptions = {};
const prepared = prepareRunConfig("/custom/prompt/path.txt", options);
expect(prepared.promptPath).toBe("/custom/prompt/path.txt");
});
test("should not include optional arguments when not set", () => {
const options: ClaudeOptions = {};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).not.toContain("--allowedTools");
expect(prepared.claudeArgs).not.toContain("--disallowedTools");
expect(prepared.claudeArgs).not.toContain("--max-turns");
expect(prepared.claudeArgs).not.toContain("--mcp-config");
expect(prepared.claudeArgs).not.toContain("--system-prompt");
expect(prepared.claudeArgs).not.toContain("--append-system-prompt");
expect(prepared.claudeArgs).not.toContain("--fallback-model");
});
test("should preserve order of claude arguments", () => {
const options: ClaudeOptions = {
allowedTools: "Bash,Read",
maxTurns: "3",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toEqual([
"-p",
"--verbose",
"--output-format",
"stream-json",
"--allowedTools",
"Bash,Read",
"--max-turns",
"3",
]);
});
test("should preserve order with all options including fallback model", () => {
const options: ClaudeOptions = {
allowedTools: "Bash,Read",
disallowedTools: "Write",
maxTurns: "3",
mcpConfig: "/path/to/config.json",
systemPrompt: "You are a helpful assistant",
appendSystemPrompt: "Be concise",
fallbackModel: "claude-sonnet-4-20250514",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toEqual([
"-p",
"--verbose",
"--output-format",
"stream-json",
"--allowedTools",
"Bash,Read",
"--disallowedTools",
"Write",
"--max-turns",
"3",
"--mcp-config",
"/path/to/config.json",
"--system-prompt",
"You are a helpful assistant",
"--append-system-prompt",
"Be concise",
"--fallback-model",
"claude-sonnet-4-20250514",
]);
});
describe("maxTurns validation", () => {
test("should accept valid maxTurns value", () => {
const options: ClaudeOptions = { maxTurns: "5" };
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--max-turns");
expect(prepared.claudeArgs).toContain("5");
});
test("should throw error for non-numeric maxTurns", () => {
const options: ClaudeOptions = { maxTurns: "abc" };
expect(() => prepareRunConfig("/tmp/test-prompt.txt", options)).toThrow(
"maxTurns must be a positive number, got: abc",
);
});
test("should throw error for negative maxTurns", () => {
const options: ClaudeOptions = { maxTurns: "-1" };
expect(() => prepareRunConfig("/tmp/test-prompt.txt", options)).toThrow(
"maxTurns must be a positive number, got: -1",
);
});
test("should throw error for zero maxTurns", () => {
const options: ClaudeOptions = { maxTurns: "0" };
expect(() => prepareRunConfig("/tmp/test-prompt.txt", options)).toThrow(
"maxTurns must be a positive number, got: 0",
);
});
});
describe("timeoutMinutes validation", () => {
test("should accept valid timeoutMinutes value", () => {
const options: ClaudeOptions = { timeoutMinutes: "15" };
expect(() =>
prepareRunConfig("/tmp/test-prompt.txt", options),
).not.toThrow();
});
test("should throw error for non-numeric timeoutMinutes", () => {
const options: ClaudeOptions = { timeoutMinutes: "abc" };
expect(() => prepareRunConfig("/tmp/test-prompt.txt", options)).toThrow(
"timeoutMinutes must be a positive number, got: abc",
);
});
test("should throw error for negative timeoutMinutes", () => {
const options: ClaudeOptions = { timeoutMinutes: "-5" };
expect(() => prepareRunConfig("/tmp/test-prompt.txt", options)).toThrow(
"timeoutMinutes must be a positive number, got: -5",
);
});
test("should throw error for zero timeoutMinutes", () => {
const options: ClaudeOptions = { timeoutMinutes: "0" };
expect(() => prepareRunConfig("/tmp/test-prompt.txt", options)).toThrow(
"timeoutMinutes must be a positive number, got: 0",
);
});
});
describe("custom environment variables", () => {
test("should parse empty claudeEnv correctly", () => {
const options: ClaudeOptions = { claudeEnv: "" };
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({});
});
test("should parse single environment variable", () => {
const options: ClaudeOptions = { claudeEnv: "API_KEY: secret123" };
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({ API_KEY: "secret123" });
});
test("should parse multiple environment variables", () => {
const options: ClaudeOptions = {
claudeEnv: "API_KEY: secret123\nDEBUG: true\nUSER: testuser",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({
API_KEY: "secret123",
DEBUG: "true",
USER: "testuser",
});
});
test("should handle environment variables with spaces around values", () => {
const options: ClaudeOptions = {
claudeEnv: "API_KEY: secret123 \n DEBUG : true ",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({
API_KEY: "secret123",
DEBUG: "true",
});
});
test("should skip empty lines and comments", () => {
const options: ClaudeOptions = {
claudeEnv:
"API_KEY: secret123\n\n# This is a comment\nDEBUG: true\n# Another comment",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({
API_KEY: "secret123",
DEBUG: "true",
});
});
test("should skip lines without colons", () => {
const options: ClaudeOptions = {
claudeEnv: "API_KEY: secret123\nINVALID_LINE\nDEBUG: true",
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({
API_KEY: "secret123",
DEBUG: "true",
});
});
test("should handle undefined claudeEnv", () => {
const options: ClaudeOptions = {};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.env).toEqual({});
});
});
});

View File

@@ -0,0 +1,150 @@
#!/usr/bin/env bun
import { describe, test, expect, beforeEach, afterEach } from "bun:test";
import { setupClaudeCodeSettings } from "../src/setup-claude-code-settings";
import { tmpdir } from "os";
import { mkdir, writeFile, readFile, rm } from "fs/promises";
import { join } from "path";
const testHomeDir = join(
tmpdir(),
"claude-code-test-home",
Date.now().toString(),
);
const settingsPath = join(testHomeDir, ".claude", "settings.json");
const testSettingsDir = join(testHomeDir, ".claude-test");
const testSettingsPath = join(testSettingsDir, "test-settings.json");
describe("setupClaudeCodeSettings", () => {
beforeEach(async () => {
// Create test home directory and test settings directory
await mkdir(testHomeDir, { recursive: true });
await mkdir(testSettingsDir, { recursive: true });
});
afterEach(async () => {
// Clean up test home directory
await rm(testHomeDir, { recursive: true, force: true });
});
test("should always set enableAllProjectMcpServers to true when no input", async () => {
await setupClaudeCodeSettings(undefined, testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
});
test("should merge settings from JSON string input", async () => {
const inputSettings = JSON.stringify({
model: "claude-sonnet-4-20250514",
env: { API_KEY: "test-key" },
});
await setupClaudeCodeSettings(inputSettings, testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
expect(settings.model).toBe("claude-sonnet-4-20250514");
expect(settings.env).toEqual({ API_KEY: "test-key" });
});
test("should merge settings from file path input", async () => {
const testSettings = {
hooks: {
PreToolUse: [
{
matcher: "Bash",
hooks: [{ type: "command", command: "echo test" }],
},
],
},
permissions: {
allow: ["Bash", "Read"],
},
};
await writeFile(testSettingsPath, JSON.stringify(testSettings, null, 2));
await setupClaudeCodeSettings(testSettingsPath, testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
expect(settings.hooks).toEqual(testSettings.hooks);
expect(settings.permissions).toEqual(testSettings.permissions);
});
test("should override enableAllProjectMcpServers even if false in input", async () => {
const inputSettings = JSON.stringify({
enableAllProjectMcpServers: false,
model: "test-model",
});
await setupClaudeCodeSettings(inputSettings, testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
expect(settings.model).toBe("test-model");
});
test("should throw error for invalid JSON string", async () => {
expect(() =>
setupClaudeCodeSettings("{ invalid json", testHomeDir),
).toThrow();
});
test("should throw error for non-existent file path", async () => {
expect(() =>
setupClaudeCodeSettings("/non/existent/file.json", testHomeDir),
).toThrow();
});
test("should handle empty string input", async () => {
await setupClaudeCodeSettings("", testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
});
test("should handle whitespace-only input", async () => {
await setupClaudeCodeSettings(" \n\t ", testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
});
test("should merge with existing settings", async () => {
// First, create some existing settings
await setupClaudeCodeSettings(
JSON.stringify({ existingKey: "existingValue" }),
testHomeDir,
);
// Then, add new settings
const newSettings = JSON.stringify({
newKey: "newValue",
model: "claude-opus-4-20250514",
});
await setupClaudeCodeSettings(newSettings, testHomeDir);
const settingsContent = await readFile(settingsPath, "utf-8");
const settings = JSON.parse(settingsContent);
expect(settings.enableAllProjectMcpServers).toBe(true);
expect(settings.existingKey).toBe("existingValue");
expect(settings.newKey).toBe("newValue");
expect(settings.model).toBe("claude-opus-4-20250514");
});
});

View File

@@ -0,0 +1,214 @@
#!/usr/bin/env bun
import { describe, test, expect, beforeEach, afterEach } from "bun:test";
import { validateEnvironmentVariables } from "../src/validate-env";
describe("validateEnvironmentVariables", () => {
let originalEnv: NodeJS.ProcessEnv;
beforeEach(() => {
// Save the original environment
originalEnv = { ...process.env };
// Clear relevant environment variables
delete process.env.ANTHROPIC_API_KEY;
delete process.env.CLAUDE_CODE_USE_BEDROCK;
delete process.env.CLAUDE_CODE_USE_VERTEX;
delete process.env.AWS_REGION;
delete process.env.AWS_ACCESS_KEY_ID;
delete process.env.AWS_SECRET_ACCESS_KEY;
delete process.env.AWS_SESSION_TOKEN;
delete process.env.ANTHROPIC_BEDROCK_BASE_URL;
delete process.env.ANTHROPIC_VERTEX_PROJECT_ID;
delete process.env.CLOUD_ML_REGION;
delete process.env.GOOGLE_APPLICATION_CREDENTIALS;
delete process.env.ANTHROPIC_VERTEX_BASE_URL;
});
afterEach(() => {
// Restore the original environment
process.env = originalEnv;
});
describe("Direct Anthropic API", () => {
test("should pass when ANTHROPIC_API_KEY is provided", () => {
process.env.ANTHROPIC_API_KEY = "test-api-key";
expect(() => validateEnvironmentVariables()).not.toThrow();
});
test("should fail when ANTHROPIC_API_KEY is missing", () => {
expect(() => validateEnvironmentVariables()).toThrow(
"Either ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN is required when using direct Anthropic API.",
);
});
});
describe("AWS Bedrock", () => {
test("should pass when all required Bedrock variables are provided", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.AWS_REGION = "us-east-1";
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
expect(() => validateEnvironmentVariables()).not.toThrow();
});
test("should pass with optional Bedrock variables", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.AWS_REGION = "us-east-1";
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
process.env.AWS_SESSION_TOKEN = "test-session-token";
process.env.ANTHROPIC_BEDROCK_BASE_URL = "https://test.url";
expect(() => validateEnvironmentVariables()).not.toThrow();
});
test("should construct Bedrock base URL from AWS_REGION when ANTHROPIC_BEDROCK_BASE_URL is not provided", () => {
// This test verifies our action.yml change, which constructs:
// ANTHROPIC_BEDROCK_BASE_URL: ${{ env.ANTHROPIC_BEDROCK_BASE_URL || (env.AWS_REGION && format('https://bedrock-runtime.{0}.amazonaws.com', env.AWS_REGION)) }}
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.AWS_REGION = "us-west-2";
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
// ANTHROPIC_BEDROCK_BASE_URL is intentionally not set
// The actual URL construction happens in the composite action in action.yml
// This test is a placeholder to document the behavior
expect(() => validateEnvironmentVariables()).not.toThrow();
// In the actual action, ANTHROPIC_BEDROCK_BASE_URL would be:
// https://bedrock-runtime.us-west-2.amazonaws.com
});
test("should fail when AWS_REGION is missing", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
expect(() => validateEnvironmentVariables()).toThrow(
"AWS_REGION is required when using AWS Bedrock.",
);
});
test("should fail when AWS_ACCESS_KEY_ID is missing", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.AWS_REGION = "us-east-1";
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
expect(() => validateEnvironmentVariables()).toThrow(
"AWS_ACCESS_KEY_ID is required when using AWS Bedrock.",
);
});
test("should fail when AWS_SECRET_ACCESS_KEY is missing", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.AWS_REGION = "us-east-1";
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
expect(() => validateEnvironmentVariables()).toThrow(
"AWS_SECRET_ACCESS_KEY is required when using AWS Bedrock.",
);
});
test("should report all missing Bedrock variables", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
expect(() => validateEnvironmentVariables()).toThrow(
/AWS_REGION is required when using AWS Bedrock.*AWS_ACCESS_KEY_ID is required when using AWS Bedrock.*AWS_SECRET_ACCESS_KEY is required when using AWS Bedrock/s,
);
});
});
describe("Google Vertex AI", () => {
test("should pass when all required Vertex variables are provided", () => {
process.env.CLAUDE_CODE_USE_VERTEX = "1";
process.env.ANTHROPIC_VERTEX_PROJECT_ID = "test-project";
process.env.CLOUD_ML_REGION = "us-central1";
expect(() => validateEnvironmentVariables()).not.toThrow();
});
test("should pass with optional Vertex variables", () => {
process.env.CLAUDE_CODE_USE_VERTEX = "1";
process.env.ANTHROPIC_VERTEX_PROJECT_ID = "test-project";
process.env.CLOUD_ML_REGION = "us-central1";
process.env.GOOGLE_APPLICATION_CREDENTIALS = "/path/to/creds.json";
process.env.ANTHROPIC_VERTEX_BASE_URL = "https://test.url";
expect(() => validateEnvironmentVariables()).not.toThrow();
});
test("should fail when ANTHROPIC_VERTEX_PROJECT_ID is missing", () => {
process.env.CLAUDE_CODE_USE_VERTEX = "1";
process.env.CLOUD_ML_REGION = "us-central1";
expect(() => validateEnvironmentVariables()).toThrow(
"ANTHROPIC_VERTEX_PROJECT_ID is required when using Google Vertex AI.",
);
});
test("should fail when CLOUD_ML_REGION is missing", () => {
process.env.CLAUDE_CODE_USE_VERTEX = "1";
process.env.ANTHROPIC_VERTEX_PROJECT_ID = "test-project";
expect(() => validateEnvironmentVariables()).toThrow(
"CLOUD_ML_REGION is required when using Google Vertex AI.",
);
});
test("should report all missing Vertex variables", () => {
process.env.CLAUDE_CODE_USE_VERTEX = "1";
expect(() => validateEnvironmentVariables()).toThrow(
/ANTHROPIC_VERTEX_PROJECT_ID is required when using Google Vertex AI.*CLOUD_ML_REGION is required when using Google Vertex AI/s,
);
});
});
describe("Multiple providers", () => {
test("should fail when both Bedrock and Vertex are enabled", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
process.env.CLAUDE_CODE_USE_VERTEX = "1";
// Provide all required vars to isolate the mutual exclusion error
process.env.AWS_REGION = "us-east-1";
process.env.AWS_ACCESS_KEY_ID = "test-access-key";
process.env.AWS_SECRET_ACCESS_KEY = "test-secret-key";
process.env.ANTHROPIC_VERTEX_PROJECT_ID = "test-project";
process.env.CLOUD_ML_REGION = "us-central1";
expect(() => validateEnvironmentVariables()).toThrow(
"Cannot use both Bedrock and Vertex AI simultaneously. Please set only one provider.",
);
});
});
describe("Error message formatting", () => {
test("should format error message properly with multiple errors", () => {
process.env.CLAUDE_CODE_USE_BEDROCK = "1";
// Missing all required Bedrock vars
let error: Error | undefined;
try {
validateEnvironmentVariables();
} catch (e) {
error = e as Error;
}
expect(error).toBeDefined();
expect(error!.message).toMatch(
/^Environment variable validation failed:/,
);
expect(error!.message).toContain(
" - AWS_REGION is required when using AWS Bedrock.",
);
expect(error!.message).toContain(
" - AWS_ACCESS_KEY_ID is required when using AWS Bedrock.",
);
expect(error!.message).toContain(
" - AWS_SECRET_ACCESS_KEY is required when using AWS Bedrock.",
);
});
});
});

30
base-action/tsconfig.json Normal file
View File

@@ -0,0 +1,30 @@
{
"compilerOptions": {
// Environment setup & latest features
"lib": ["ESNext"],
"target": "ESNext",
"module": "ESNext",
"moduleDetection": "force",
"jsx": "react-jsx",
"allowJs": true,
// Bundler mode (Bun-specific)
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"verbatimModuleSyntax": true,
"noEmit": true,
// Best practices
"strict": true,
"skipLibCheck": true,
"noFallthroughCasesInSwitch": true,
"noUncheckedIndexedAccess": true,
// Some stricter flags
"noUnusedLocals": true,
"noUnusedParameters": true,
"noPropertyAccessFromIndexSignature": false
},
"include": ["src/**/*", "test/**/*"],
"exclude": ["node_modules", "test/mcp-test"]
}

View File

@@ -25,6 +25,6 @@
"noUnusedParameters": true,
"noPropertyAccessFromIndexSignature": false
},
"include": ["src/**/*", "test/**/*"],
"include": ["src/**/*", "base-action/**/*", "test/**/*"],
"exclude": ["node_modules"]
}