mirror of
https://github.com/anthropics/claude-code-action.git
synced 2026-01-23 06:54:13 +08:00
Compare commits
23 Commits
fix/instal
...
claude/sla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d544d7303b | ||
|
|
5c0f1e2273 | ||
|
|
5da7ba548c | ||
|
|
964b8355fb | ||
|
|
c83d67a9b9 | ||
|
|
c9ec2b02b4 | ||
|
|
63ea7e3174 | ||
|
|
653f9cd7a3 | ||
|
|
b17b541bbc | ||
|
|
7e4bf87b1c | ||
|
|
154d0de144 | ||
|
|
3ba9f7c8c2 | ||
|
|
e5b07416ea | ||
|
|
b89827f8d1 | ||
|
|
7145c3e051 | ||
|
|
db4548b597 | ||
|
|
0d19335299 | ||
|
|
95be46676d | ||
|
|
f98c1a5aa8 | ||
|
|
b0c32b65f9 | ||
|
|
d7b6d50442 | ||
|
|
f375cabfab | ||
|
|
9acae263e7 |
@@ -17,7 +17,6 @@ TASK OVERVIEW:
|
||||
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
|
||||
|
||||
2. Next, use gh commands to get context about the issue:
|
||||
|
||||
- Use `gh issue view ${{ github.event.issue.number }}` to retrieve the current issue's details
|
||||
- Use `gh search issues` to find similar issues that might provide context for proper categorization
|
||||
- You have access to these Bash commands:
|
||||
@@ -27,7 +26,6 @@ TASK OVERVIEW:
|
||||
- Bash(gh search:\*) - to search for similar issues
|
||||
|
||||
3. Analyze the issue content, considering:
|
||||
|
||||
- The issue title and description
|
||||
- The type of issue (bug report, feature request, question, etc.)
|
||||
- Technical areas mentioned
|
||||
@@ -36,7 +34,6 @@ TASK OVERVIEW:
|
||||
- Components affected
|
||||
|
||||
4. Select appropriate labels from the available labels list provided above:
|
||||
|
||||
- Choose labels that accurately reflect the issue's nature
|
||||
- Be specific but comprehensive
|
||||
- IMPORTANT: Add a priority label (P1, P2, or P3) based on the label descriptions from gh label list
|
||||
|
||||
132
.github/workflows/bump-claude-code-version.yml
vendored
132
.github/workflows/bump-claude-code-version.yml
vendored
@@ -1,132 +0,0 @@
|
||||
name: Bump Claude Code Version
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [bump_claude_code_version]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "Claude Code version to bump to"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
bump-version:
|
||||
name: Bump Claude Code Version
|
||||
runs-on: ubuntu-latest
|
||||
environment: release
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4
|
||||
with:
|
||||
token: ${{ secrets.RELEASE_PAT }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get version from event payload
|
||||
id: get_version
|
||||
run: |
|
||||
# Get version from either repository_dispatch or workflow_dispatch
|
||||
if [ "${{ github.event_name }}" = "repository_dispatch" ]; then
|
||||
NEW_VERSION="${CLIENT_PAYLOAD_VERSION}"
|
||||
else
|
||||
NEW_VERSION="${INPUT_VERSION}"
|
||||
fi
|
||||
|
||||
# Sanitize the version to avoid issues enabled by problematic characters
|
||||
NEW_VERSION=$(echo "$NEW_VERSION" | tr -d '`;$(){}[]|&<>' | tr -s ' ' '-')
|
||||
|
||||
if [ -z "$NEW_VERSION" ]; then
|
||||
echo "Error: version not provided"
|
||||
exit 1
|
||||
fi
|
||||
echo "NEW_VERSION=$NEW_VERSION" >> $GITHUB_ENV
|
||||
echo "new_version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
INPUT_VERSION: ${{ inputs.version }}
|
||||
CLIENT_PAYLOAD_VERSION: ${{ github.event.client_payload.version }}
|
||||
|
||||
- name: Create branch and update base-action/action.yml
|
||||
run: |
|
||||
# Variables
|
||||
TIMESTAMP=$(date +'%Y%m%d-%H%M%S')
|
||||
BRANCH_NAME="bump-claude-code-${{ env.NEW_VERSION }}-$TIMESTAMP"
|
||||
|
||||
echo "BRANCH_NAME=$BRANCH_NAME" >> $GITHUB_ENV
|
||||
|
||||
# Get the default branch
|
||||
DEFAULT_BRANCH=$(gh api repos/${GITHUB_REPOSITORY} --jq '.default_branch')
|
||||
echo "DEFAULT_BRANCH=$DEFAULT_BRANCH" >> $GITHUB_ENV
|
||||
|
||||
# Get the latest commit SHA from the default branch
|
||||
BASE_SHA=$(gh api repos/${GITHUB_REPOSITORY}/git/refs/heads/$DEFAULT_BRANCH --jq '.object.sha')
|
||||
|
||||
# Create a new branch
|
||||
gh api \
|
||||
--method POST \
|
||||
repos/${GITHUB_REPOSITORY}/git/refs \
|
||||
-f ref="refs/heads/$BRANCH_NAME" \
|
||||
-f sha="$BASE_SHA"
|
||||
|
||||
# Get the current base-action/action.yml content
|
||||
ACTION_CONTENT=$(gh api repos/${GITHUB_REPOSITORY}/contents/base-action/action.yml?ref=$DEFAULT_BRANCH --jq '.content' | base64 -d)
|
||||
|
||||
# Update the Claude Code version in the npm install command
|
||||
UPDATED_CONTENT=$(echo "$ACTION_CONTENT" | sed -E "s/(npm install -g @anthropic-ai\/claude-code@)[0-9]+\.[0-9]+\.[0-9]+/\1${{ env.NEW_VERSION }}/")
|
||||
|
||||
# Verify the change would be made
|
||||
if ! echo "$UPDATED_CONTENT" | grep -q "@anthropic-ai/claude-code@${{ env.NEW_VERSION }}"; then
|
||||
echo "Error: Failed to update Claude Code version in content"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the current SHA of base-action/action.yml for the update API call
|
||||
FILE_SHA=$(gh api repos/${GITHUB_REPOSITORY}/contents/base-action/action.yml?ref=$DEFAULT_BRANCH --jq '.sha')
|
||||
|
||||
# Create the updated base-action/action.yml content in base64
|
||||
echo "$UPDATED_CONTENT" | base64 > action.yml.b64
|
||||
|
||||
# Commit the updated base-action/action.yml via GitHub API
|
||||
gh api \
|
||||
--method PUT \
|
||||
repos/${GITHUB_REPOSITORY}/contents/base-action/action.yml \
|
||||
-f message="chore: bump Claude Code version to ${{ env.NEW_VERSION }}" \
|
||||
-F content=@action.yml.b64 \
|
||||
-f sha="$FILE_SHA" \
|
||||
-f branch="$BRANCH_NAME"
|
||||
|
||||
echo "Successfully created branch and updated Claude Code version to ${{ env.NEW_VERSION }}"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.RELEASE_PAT }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
|
||||
- name: Create Pull Request
|
||||
run: |
|
||||
# Determine trigger type for PR body
|
||||
if [ "${{ github.event_name }}" = "repository_dispatch" ]; then
|
||||
TRIGGER_INFO="repository dispatch event"
|
||||
else
|
||||
TRIGGER_INFO="manual workflow dispatch by @${GITHUB_ACTOR}"
|
||||
fi
|
||||
|
||||
# Create PR body with proper YAML escape
|
||||
printf -v PR_BODY "## Bump Claude Code to ${{ env.NEW_VERSION }}\n\nThis PR updates the Claude Code version in base-action/action.yml to ${{ env.NEW_VERSION }}.\n\n### Changes\n- Updated Claude Code version from current to \`${{ env.NEW_VERSION }}\`\n\n### Triggered by\n- $TRIGGER_INFO\n\n🤖 This PR was automatically created by the bump-claude-code-version workflow."
|
||||
|
||||
echo "Creating PR with gh pr create command"
|
||||
PR_URL=$(gh pr create \
|
||||
--repo "${GITHUB_REPOSITORY}" \
|
||||
--title "chore: bump Claude Code version to ${{ env.NEW_VERSION }}" \
|
||||
--body "$PR_BODY" \
|
||||
--base "${DEFAULT_BRANCH}" \
|
||||
--head "${BRANCH_NAME}")
|
||||
|
||||
echo "PR created successfully: $PR_URL"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.RELEASE_PAT }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
GITHUB_ACTOR: ${{ github.actor }}
|
||||
DEFAULT_BRANCH: ${{ env.DEFAULT_BRANCH }}
|
||||
BRANCH_NAME: ${{ env.BRANCH_NAME }}
|
||||
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -36,4 +36,4 @@ jobs:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Bash(bun install),Bash(bun test:*),Bash(bun run format),Bash(bun typecheck)"
|
||||
--model "claude-opus-4-1-20250805"
|
||||
--model "claude-opus-4-5"
|
||||
|
||||
23
action.yml
23
action.yml
@@ -81,6 +81,10 @@ inputs:
|
||||
description: "Enable commit signing using GitHub's commit signature verification. When false, Claude uses standard git commands"
|
||||
required: false
|
||||
default: "false"
|
||||
ssh_signing_key:
|
||||
description: "SSH private key for signing commits. When provided, git will be configured to use SSH signing. Takes precedence over use_commit_signing."
|
||||
required: false
|
||||
default: ""
|
||||
bot_id:
|
||||
description: "GitHub user ID to use for git operations (defaults to Claude's bot ID)"
|
||||
required: false
|
||||
@@ -93,6 +97,10 @@ inputs:
|
||||
description: "Force tag mode with tracking comments for pull_request and issue events. Only applicable to pull_request (opened, synchronize, ready_for_review, reopened) and issue (opened, edited, labeled, assigned) events."
|
||||
required: false
|
||||
default: "false"
|
||||
include_fix_links:
|
||||
description: "Include 'Fix this' links in PR code review feedback that open Claude Code with context to fix the identified issue"
|
||||
required: false
|
||||
default: "true"
|
||||
path_to_claude_code_executable:
|
||||
description: "Optional path to a custom Claude Code executable. If provided, skips automatic installation and uses this executable instead. WARNING: Using an older version may cause problems if the action begins taking advantage of new Claude Code features. This input is typically not needed unless you're debugging something specific or have unique needs in your environment."
|
||||
required: false
|
||||
@@ -177,9 +185,11 @@ runs:
|
||||
USE_STICKY_COMMENT: ${{ inputs.use_sticky_comment }}
|
||||
DEFAULT_WORKFLOW_TOKEN: ${{ github.token }}
|
||||
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
|
||||
SSH_SIGNING_KEY: ${{ inputs.ssh_signing_key }}
|
||||
BOT_ID: ${{ inputs.bot_id }}
|
||||
BOT_NAME: ${{ inputs.bot_name }}
|
||||
TRACK_PROGRESS: ${{ inputs.track_progress }}
|
||||
INCLUDE_FIX_LINKS: ${{ inputs.include_fix_links }}
|
||||
ADDITIONAL_PERMISSIONS: ${{ inputs.additional_permissions }}
|
||||
CLAUDE_ARGS: ${{ inputs.claude_args }}
|
||||
ALL_INPUTS: ${{ toJson(inputs) }}
|
||||
@@ -198,12 +208,13 @@ runs:
|
||||
|
||||
# Install Claude Code if no custom executable is provided
|
||||
if [ -z "$PATH_TO_CLAUDE_CODE_EXECUTABLE" ]; then
|
||||
CLAUDE_CODE_VERSION="2.0.69"
|
||||
CLAUDE_CODE_VERSION="2.0.76"
|
||||
echo "Installing Claude Code v${CLAUDE_CODE_VERSION}..."
|
||||
for attempt in 1 2 3; do
|
||||
echo "Installation attempt $attempt..."
|
||||
if command -v timeout &> /dev/null; then
|
||||
timeout 120 bash -c "curl -fsSL https://claude.ai/install.sh | bash -s -- $CLAUDE_CODE_VERSION" && break
|
||||
# Use --foreground to kill entire process group on timeout, --kill-after to send SIGKILL if SIGTERM fails
|
||||
timeout --foreground --kill-after=10 120 bash -c "curl -fsSL https://claude.ai/install.sh | bash -s -- $CLAUDE_CODE_VERSION" && break
|
||||
else
|
||||
curl -fsSL https://claude.ai/install.sh | bash -s -- "$CLAUDE_CODE_VERSION" && break
|
||||
fi
|
||||
@@ -247,6 +258,7 @@ runs:
|
||||
|
||||
# Model configuration
|
||||
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
||||
NODE_VERSION: ${{ env.NODE_VERSION }}
|
||||
DETAILED_PERMISSION_MESSAGES: "1"
|
||||
|
||||
@@ -296,6 +308,7 @@ runs:
|
||||
CLAUDE_COMMENT_ID: ${{ steps.prepare.outputs.claude_comment_id }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
||||
GH_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||
TRIGGER_COMMENT_ID: ${{ github.event.comment.id }}
|
||||
CLAUDE_BRANCH: ${{ steps.prepare.outputs.CLAUDE_BRANCH }}
|
||||
@@ -327,6 +340,12 @@ runs:
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
- name: Cleanup SSH signing key
|
||||
if: always() && inputs.ssh_signing_key != ''
|
||||
shell: bash
|
||||
run: |
|
||||
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/cleanup-ssh-signing.ts
|
||||
|
||||
- name: Revoke app token
|
||||
if: always() && inputs.github_token == '' && steps.prepare.outputs.skipped_due_to_workflow_validation_mismatch != 'true'
|
||||
shell: bash
|
||||
|
||||
@@ -57,7 +57,6 @@ Thank you for your interest in contributing to Claude Code Base Action! This doc
|
||||
```
|
||||
|
||||
This script:
|
||||
|
||||
- Installs `act` if not present (requires Homebrew on macOS)
|
||||
- Runs the GitHub Action workflow locally using Docker
|
||||
- Requires your `ANTHROPIC_API_KEY` to be set
|
||||
|
||||
@@ -85,26 +85,26 @@ Add the following to your workflow file:
|
||||
|
||||
## Inputs
|
||||
|
||||
| Input | Description | Required | Default |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------- | -------- | ---------------------------- |
|
||||
| `prompt` | The prompt to send to Claude Code | No\* | '' |
|
||||
| `prompt_file` | Path to a file containing the prompt to send to Claude Code | No\* | '' |
|
||||
| `allowed_tools` | Comma-separated list of allowed tools for Claude Code to use | No | '' |
|
||||
| `disallowed_tools` | Comma-separated list of disallowed tools that Claude Code cannot use | No | '' |
|
||||
| `max_turns` | Maximum number of conversation turns (default: no limit) | No | '' |
|
||||
| `mcp_config` | Path to the MCP configuration JSON file, or MCP configuration JSON string | No | '' |
|
||||
| `settings` | Path to Claude Code settings JSON file, or settings JSON string | No | '' |
|
||||
| `system_prompt` | Override system prompt | No | '' |
|
||||
| `append_system_prompt` | Append to system prompt | No | '' |
|
||||
| `claude_env` | Custom environment variables to pass to Claude Code execution (YAML multiline format) | No | '' |
|
||||
| `model` | Model to use (provider-specific format required for Bedrock/Vertex) | No | 'claude-4-0-sonnet-20250219' |
|
||||
| `anthropic_model` | DEPRECATED: Use 'model' instead | No | 'claude-4-0-sonnet-20250219' |
|
||||
| `fallback_model` | Enable automatic fallback to specified model when default model is overloaded | No | '' |
|
||||
| `anthropic_api_key` | Anthropic API key (required for direct Anthropic API) | No | '' |
|
||||
| `claude_code_oauth_token` | Claude Code OAuth token (alternative to anthropic_api_key) | No | '' |
|
||||
| `use_bedrock` | Use Amazon Bedrock with OIDC authentication instead of direct Anthropic API | No | 'false' |
|
||||
| `use_vertex` | Use Google Vertex AI with OIDC authentication instead of direct Anthropic API | No | 'false' |
|
||||
| `use_node_cache` | Whether to use Node.js dependency caching (set to true only for Node.js projects with lock files) | No | 'false' |
|
||||
| Input | Description | Required | Default |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -------- | ---------------------------- |
|
||||
| `prompt` | The prompt to send to Claude Code | No\* | '' |
|
||||
| `prompt_file` | Path to a file containing the prompt to send to Claude Code | No\* | '' |
|
||||
| `allowed_tools` | Comma-separated list of allowed tools for Claude Code to use | No | '' |
|
||||
| `disallowed_tools` | Comma-separated list of disallowed tools that Claude Code cannot use | No | '' |
|
||||
| `max_turns` | Maximum number of conversation turns (default: no limit) | No | '' |
|
||||
| `mcp_config` | Path to the MCP configuration JSON file, or MCP configuration JSON string | No | '' |
|
||||
| `settings` | Path to Claude Code settings JSON file, or settings JSON string | No | '' |
|
||||
| `system_prompt` | Override system prompt | No | '' |
|
||||
| `append_system_prompt` | Append to system prompt | No | '' |
|
||||
| `claude_env` | Custom environment variables to pass to Claude Code execution (YAML multiline format) | No | '' |
|
||||
| `model` | Model to use (provider-specific format required for Bedrock/Vertex) | No | 'claude-4-0-sonnet-20250219' |
|
||||
| `anthropic_model` | DEPRECATED: Use 'model' instead | No | 'claude-4-0-sonnet-20250219' |
|
||||
| `fallback_model` | Enable automatic fallback to specified model when default model is overloaded | No | '' |
|
||||
| `anthropic_api_key` | Anthropic API key (required for direct Anthropic API) | No | '' |
|
||||
| `claude_code_oauth_token` | Claude Code OAuth token (alternative to anthropic_api_key) | No | '' |
|
||||
| `use_bedrock` | Use Amazon Bedrock with OIDC authentication instead of direct Anthropic API | No | 'false' |
|
||||
| `use_vertex` | Use Google Vertex AI with OIDC authentication instead of direct Anthropic API | No | 'false' |
|
||||
| `use_node_cache` | Whether to use Node.js dependency caching (set to true only for Node.js projects with lock files) | No | 'false' |
|
||||
| `show_full_output` | Show full JSON output (⚠️ May expose secrets - see [security docs](../docs/security.md#️-full-output-security-warning)) | No | 'false'\*\* |
|
||||
|
||||
\*Either `prompt` or `prompt_file` must be provided, but not both.
|
||||
@@ -490,7 +490,6 @@ This example shows how to use OIDC authentication with GCP Vertex AI:
|
||||
To securely use your Anthropic API key:
|
||||
|
||||
1. Add your API key as a repository secret:
|
||||
|
||||
- Go to your repository's Settings
|
||||
- Navigate to "Secrets and variables" → "Actions"
|
||||
- Click "New repository secret"
|
||||
|
||||
@@ -124,12 +124,13 @@ runs:
|
||||
PATH_TO_CLAUDE_CODE_EXECUTABLE: ${{ inputs.path_to_claude_code_executable }}
|
||||
run: |
|
||||
if [ -z "$PATH_TO_CLAUDE_CODE_EXECUTABLE" ]; then
|
||||
CLAUDE_CODE_VERSION="2.0.69"
|
||||
CLAUDE_CODE_VERSION="2.0.76"
|
||||
echo "Installing Claude Code v${CLAUDE_CODE_VERSION}..."
|
||||
for attempt in 1 2 3; do
|
||||
echo "Installation attempt $attempt..."
|
||||
if command -v timeout &> /dev/null; then
|
||||
timeout 120 bash -c "curl -fsSL https://claude.ai/install.sh | bash -s -- $CLAUDE_CODE_VERSION" && break
|
||||
# Use --foreground to kill entire process group on timeout, --kill-after to send SIGKILL if SIGTERM fails
|
||||
timeout --foreground --kill-after=10 120 bash -c "curl -fsSL https://claude.ai/install.sh | bash -s -- $CLAUDE_CODE_VERSION" && break
|
||||
else
|
||||
curl -fsSL https://claude.ai/install.sh | bash -s -- "$CLAUDE_CODE_VERSION" && break
|
||||
fi
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 0,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "@anthropic-ai/claude-code-base-action",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.52",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.76",
|
||||
"shell-quote": "^1.8.3",
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -26,7 +27,7 @@
|
||||
|
||||
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
|
||||
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.1.52", "", { "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.33.5", "@img/sharp-darwin-x64": "^0.33.5", "@img/sharp-linux-arm": "^0.33.5", "@img/sharp-linux-arm64": "^0.33.5", "@img/sharp-linux-x64": "^0.33.5", "@img/sharp-linuxmusl-arm64": "^0.33.5", "@img/sharp-linuxmusl-x64": "^0.33.5", "@img/sharp-win32-x64": "^0.33.5" }, "peerDependencies": { "zod": "^3.24.1" } }, "sha512-yF8N05+9NRbqYA/h39jQ726HTQFrdXXp7pEfDNKIJ2c4FdWvEjxBA/8ciZIebN6/PyvGDcbEp3yq2Co4rNpg6A=="],
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.1.76", "", { "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.33.5", "@img/sharp-darwin-x64": "^0.33.5", "@img/sharp-linux-arm": "^0.33.5", "@img/sharp-linux-arm64": "^0.33.5", "@img/sharp-linux-x64": "^0.33.5", "@img/sharp-linuxmusl-arm64": "^0.33.5", "@img/sharp-linuxmusl-x64": "^0.33.5", "@img/sharp-win32-x64": "^0.33.5" }, "peerDependencies": { "zod": "^3.24.1 || ^4.0.0" } }, "sha512-s7RvpXoFaLXLG7A1cJBAPD8ilwOhhc/12fb5mJXRuD561o4FmPtQ+WRfuy9akMmrFRfLsKv8Ornw3ClGAPL2fw=="],
|
||||
|
||||
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.52",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.76",
|
||||
"shell-quote": "^1.8.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -8,26 +8,47 @@ const MARKETPLACE_URL_REGEX =
|
||||
/^https:\/\/[a-zA-Z0-9\-._~:/?#[\]@!$&'()*+,;=%]+\.git$/;
|
||||
|
||||
/**
|
||||
* Validates a marketplace URL for security issues
|
||||
* @param url - The marketplace URL to validate
|
||||
* @throws {Error} If the URL is invalid
|
||||
* Checks if a marketplace input is a local path (not a URL)
|
||||
* @param input - The marketplace input to check
|
||||
* @returns true if the input is a local path, false if it's a URL
|
||||
*/
|
||||
function validateMarketplaceUrl(url: string): void {
|
||||
const normalized = url.trim();
|
||||
function isLocalPath(input: string): boolean {
|
||||
// Local paths start with ./, ../, /, or a drive letter (Windows)
|
||||
return (
|
||||
input.startsWith("./") ||
|
||||
input.startsWith("../") ||
|
||||
input.startsWith("/") ||
|
||||
/^[a-zA-Z]:[\\\/]/.test(input)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates a marketplace URL or local path
|
||||
* @param input - The marketplace URL or local path to validate
|
||||
* @throws {Error} If the input is invalid
|
||||
*/
|
||||
function validateMarketplaceInput(input: string): void {
|
||||
const normalized = input.trim();
|
||||
|
||||
if (!normalized) {
|
||||
throw new Error("Marketplace URL cannot be empty");
|
||||
throw new Error("Marketplace URL or path cannot be empty");
|
||||
}
|
||||
|
||||
// Local paths are passed directly to Claude Code which handles them
|
||||
if (isLocalPath(normalized)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate as URL
|
||||
if (!MARKETPLACE_URL_REGEX.test(normalized)) {
|
||||
throw new Error(`Invalid marketplace URL format: ${url}`);
|
||||
throw new Error(`Invalid marketplace URL format: ${input}`);
|
||||
}
|
||||
|
||||
// Additional check for valid URL structure
|
||||
try {
|
||||
new URL(normalized);
|
||||
} catch {
|
||||
throw new Error(`Invalid marketplace URL: ${url}`);
|
||||
throw new Error(`Invalid marketplace URL: ${input}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,9 +76,9 @@ function validatePluginName(pluginName: string): void {
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a newline-separated list of marketplace URLs and return an array of validated URLs
|
||||
* @param marketplaces - Newline-separated list of marketplace Git URLs
|
||||
* @returns Array of validated marketplace URLs (empty array if none provided)
|
||||
* Parse a newline-separated list of marketplace URLs or local paths and return an array of validated entries
|
||||
* @param marketplaces - Newline-separated list of marketplace Git URLs or local paths
|
||||
* @returns Array of validated marketplace URLs or paths (empty array if none provided)
|
||||
*/
|
||||
function parseMarketplaces(marketplaces?: string): string[] {
|
||||
const trimmed = marketplaces?.trim();
|
||||
@@ -66,14 +87,14 @@ function parseMarketplaces(marketplaces?: string): string[] {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Split by newline and process each URL
|
||||
// Split by newline and process each entry
|
||||
return trimmed
|
||||
.split("\n")
|
||||
.map((url) => url.trim())
|
||||
.filter((url) => {
|
||||
if (url.length === 0) return false;
|
||||
.map((entry) => entry.trim())
|
||||
.filter((entry) => {
|
||||
if (entry.length === 0) return false;
|
||||
|
||||
validateMarketplaceUrl(url);
|
||||
validateMarketplaceInput(entry);
|
||||
return true;
|
||||
});
|
||||
}
|
||||
@@ -163,26 +184,26 @@ async function installPlugin(
|
||||
/**
|
||||
* Adds a Claude Code plugin marketplace
|
||||
* @param claudeExecutable - Path to the Claude executable
|
||||
* @param marketplaceUrl - The marketplace Git URL to add
|
||||
* @param marketplace - The marketplace Git URL or local path to add
|
||||
* @returns Promise that resolves when the marketplace add command completes
|
||||
* @throws {Error} If the command fails to execute
|
||||
*/
|
||||
async function addMarketplace(
|
||||
claudeExecutable: string,
|
||||
marketplaceUrl: string,
|
||||
marketplace: string,
|
||||
): Promise<void> {
|
||||
console.log(`Adding marketplace: ${marketplaceUrl}`);
|
||||
console.log(`Adding marketplace: ${marketplace}`);
|
||||
|
||||
return executeClaudeCommand(
|
||||
claudeExecutable,
|
||||
["plugin", "marketplace", "add", marketplaceUrl],
|
||||
`Failed to add marketplace '${marketplaceUrl}'`,
|
||||
["plugin", "marketplace", "add", marketplace],
|
||||
`Failed to add marketplace '${marketplace}'`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Installs Claude Code plugins from a newline-separated list
|
||||
* @param marketplacesInput - Newline-separated list of marketplace Git URLs
|
||||
* @param marketplacesInput - Newline-separated list of marketplace Git URLs or local paths
|
||||
* @param pluginsInput - Newline-separated list of plugin names
|
||||
* @param claudeExecutable - Path to the Claude executable (defaults to "claude")
|
||||
* @returns Promise that resolves when all plugins are installed
|
||||
|
||||
@@ -12,12 +12,79 @@ export type ParsedSdkOptions = {
|
||||
};
|
||||
|
||||
// Flags that should accumulate multiple values instead of overwriting
|
||||
const ACCUMULATING_FLAGS = new Set(["allowedTools", "disallowedTools"]);
|
||||
// Include both camelCase and hyphenated variants for CLI compatibility
|
||||
const ACCUMULATING_FLAGS = new Set([
|
||||
"allowedTools",
|
||||
"allowed-tools",
|
||||
"disallowedTools",
|
||||
"disallowed-tools",
|
||||
"mcp-config",
|
||||
]);
|
||||
|
||||
// Delimiter used to join accumulated flag values
|
||||
const ACCUMULATE_DELIMITER = "\x00";
|
||||
|
||||
type McpConfig = {
|
||||
mcpServers?: Record<string, unknown>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Merge multiple MCP config values into a single config.
|
||||
* Each config can be a JSON string or a file path.
|
||||
* For JSON strings, mcpServers objects are merged.
|
||||
* For file paths, they are kept as-is (user's file takes precedence and is used last).
|
||||
*/
|
||||
function mergeMcpConfigs(configValues: string[]): string {
|
||||
const merged: McpConfig = { mcpServers: {} };
|
||||
let lastFilePath: string | null = null;
|
||||
|
||||
for (const config of configValues) {
|
||||
const trimmed = config.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
// Check if it's a JSON string (starts with {) or a file path
|
||||
if (trimmed.startsWith("{")) {
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed) as McpConfig;
|
||||
if (parsed.mcpServers) {
|
||||
Object.assign(merged.mcpServers!, parsed.mcpServers);
|
||||
}
|
||||
} catch {
|
||||
// If JSON parsing fails, treat as file path
|
||||
lastFilePath = trimmed;
|
||||
}
|
||||
} else {
|
||||
// It's a file path - store it to handle separately
|
||||
lastFilePath = trimmed;
|
||||
}
|
||||
}
|
||||
|
||||
// If we have file paths, we need to keep the merged JSON and let the file
|
||||
// be handled separately. Since we can only return one value, merge what we can.
|
||||
// If there's a file path, we need a different approach - read the file at runtime.
|
||||
// For now, if there's a file path, we'll stringify the merged config.
|
||||
// The action prepends its config as JSON, so we can safely merge inline JSON configs.
|
||||
|
||||
// If no inline configs were found (all file paths), return the last file path
|
||||
if (Object.keys(merged.mcpServers!).length === 0 && lastFilePath) {
|
||||
return lastFilePath;
|
||||
}
|
||||
|
||||
// Note: If user passes a file path, we cannot merge it at parse time since
|
||||
// we don't have access to the file system here. The action's built-in MCP
|
||||
// servers are always passed as inline JSON, so they will be merged.
|
||||
// If user also passes inline JSON, it will be merged.
|
||||
// If user passes a file path, they should ensure it includes all needed servers.
|
||||
|
||||
return JSON.stringify(merged);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse claudeArgs string into extraArgs record for SDK pass-through
|
||||
* The SDK/CLI will handle --mcp-config, --json-schema, etc.
|
||||
* For allowedTools and disallowedTools, multiple occurrences are accumulated (comma-joined).
|
||||
* For allowedTools and disallowedTools, multiple occurrences are accumulated (null-char joined).
|
||||
* Accumulating flags also consume all consecutive non-flag values
|
||||
* (e.g., --allowed-tools "Tool1" "Tool2" "Tool3" captures all three).
|
||||
*/
|
||||
function parseClaudeArgsToExtraArgs(
|
||||
claudeArgs?: string,
|
||||
@@ -37,13 +104,25 @@ function parseClaudeArgsToExtraArgs(
|
||||
|
||||
// Check if next arg is a value (not another flag)
|
||||
if (nextArg && !nextArg.startsWith("--")) {
|
||||
// For accumulating flags, join multiple values with commas
|
||||
if (ACCUMULATING_FLAGS.has(flag) && result[flag]) {
|
||||
result[flag] = `${result[flag]},${nextArg}`;
|
||||
// For accumulating flags, consume all consecutive non-flag values
|
||||
// This handles: --allowed-tools "Tool1" "Tool2" "Tool3"
|
||||
if (ACCUMULATING_FLAGS.has(flag)) {
|
||||
const values: string[] = [];
|
||||
while (i + 1 < args.length && !args[i + 1]?.startsWith("--")) {
|
||||
i++;
|
||||
values.push(args[i]!);
|
||||
}
|
||||
const joinedValues = values.join(ACCUMULATE_DELIMITER);
|
||||
if (result[flag]) {
|
||||
result[flag] =
|
||||
`${result[flag]}${ACCUMULATE_DELIMITER}${joinedValues}`;
|
||||
} else {
|
||||
result[flag] = joinedValues;
|
||||
}
|
||||
} else {
|
||||
result[flag] = nextArg;
|
||||
i++; // Skip the value
|
||||
}
|
||||
i++; // Skip the value
|
||||
} else {
|
||||
result[flag] = null; // Boolean flag
|
||||
}
|
||||
@@ -68,12 +147,23 @@ export function parseSdkOptions(options: ClaudeOptions): ParsedSdkOptions {
|
||||
// Detect if --json-schema is present (for hasJsonSchema flag)
|
||||
const hasJsonSchema = "json-schema" in extraArgs;
|
||||
|
||||
// Extract and merge allowedTools from both sources:
|
||||
// Extract and merge allowedTools from all sources:
|
||||
// 1. From extraArgs (parsed from claudeArgs - contains tag mode's tools)
|
||||
// - Check both camelCase (--allowedTools) and hyphenated (--allowed-tools) variants
|
||||
// 2. From options.allowedTools (direct input - may be undefined)
|
||||
// This prevents duplicate flags being overwritten when claudeArgs contains --allowedTools
|
||||
const extraArgsAllowedTools = extraArgs["allowedTools"]
|
||||
? extraArgs["allowedTools"].split(",").map((t) => t.trim())
|
||||
const allowedToolsValues = [
|
||||
extraArgs["allowedTools"],
|
||||
extraArgs["allowed-tools"],
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(ACCUMULATE_DELIMITER);
|
||||
const extraArgsAllowedTools = allowedToolsValues
|
||||
? allowedToolsValues
|
||||
.split(ACCUMULATE_DELIMITER)
|
||||
.flatMap((v) => v.split(","))
|
||||
.map((t) => t.trim())
|
||||
.filter(Boolean)
|
||||
: [];
|
||||
const directAllowedTools = options.allowedTools
|
||||
? options.allowedTools.split(",").map((t) => t.trim())
|
||||
@@ -82,10 +172,21 @@ export function parseSdkOptions(options: ClaudeOptions): ParsedSdkOptions {
|
||||
...new Set([...extraArgsAllowedTools, ...directAllowedTools]),
|
||||
];
|
||||
delete extraArgs["allowedTools"];
|
||||
delete extraArgs["allowed-tools"];
|
||||
|
||||
// Same for disallowedTools
|
||||
const extraArgsDisallowedTools = extraArgs["disallowedTools"]
|
||||
? extraArgs["disallowedTools"].split(",").map((t) => t.trim())
|
||||
// Same for disallowedTools - check both camelCase and hyphenated variants
|
||||
const disallowedToolsValues = [
|
||||
extraArgs["disallowedTools"],
|
||||
extraArgs["disallowed-tools"],
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(ACCUMULATE_DELIMITER);
|
||||
const extraArgsDisallowedTools = disallowedToolsValues
|
||||
? disallowedToolsValues
|
||||
.split(ACCUMULATE_DELIMITER)
|
||||
.flatMap((v) => v.split(","))
|
||||
.map((t) => t.trim())
|
||||
.filter(Boolean)
|
||||
: [];
|
||||
const directDisallowedTools = options.disallowedTools
|
||||
? options.disallowedTools.split(",").map((t) => t.trim())
|
||||
@@ -94,12 +195,25 @@ export function parseSdkOptions(options: ClaudeOptions): ParsedSdkOptions {
|
||||
...new Set([...extraArgsDisallowedTools, ...directDisallowedTools]),
|
||||
];
|
||||
delete extraArgs["disallowedTools"];
|
||||
delete extraArgs["disallowed-tools"];
|
||||
|
||||
// Merge multiple --mcp-config values by combining their mcpServers objects
|
||||
// The action prepends its config (github_comment, github_ci, etc.) as inline JSON,
|
||||
// and users may provide their own config as inline JSON or file path
|
||||
if (extraArgs["mcp-config"]) {
|
||||
const mcpConfigValues = extraArgs["mcp-config"].split(ACCUMULATE_DELIMITER);
|
||||
if (mcpConfigValues.length > 1) {
|
||||
extraArgs["mcp-config"] = mergeMcpConfigs(mcpConfigValues);
|
||||
}
|
||||
}
|
||||
|
||||
// Build custom environment
|
||||
const env: Record<string, string | undefined> = { ...process.env };
|
||||
if (process.env.INPUT_ACTION_INPUTS_PRESENT) {
|
||||
env.GITHUB_ACTION_INPUTS = process.env.INPUT_ACTION_INPUTS_PRESENT;
|
||||
}
|
||||
// Ensure SDK path uses the same entrypoint as the CLI path
|
||||
env.CLAUDE_CODE_ENTRYPOINT = "claude-code-github-action";
|
||||
|
||||
// Build system prompt option - default to claude_code preset
|
||||
let systemPrompt: SdkOptions["systemPrompt"];
|
||||
@@ -137,10 +251,18 @@ export function parseSdkOptions(options: ClaudeOptions): ParsedSdkOptions {
|
||||
extraArgs,
|
||||
env,
|
||||
|
||||
// Load settings from all sources to pick up CLI-installed plugins, CLAUDE.md, etc.
|
||||
settingSources: ["user", "project", "local"],
|
||||
// Load settings from sources - prefer user's --setting-sources if provided, otherwise use all sources
|
||||
// This ensures users can override the default behavior (e.g., --setting-sources user to avoid in-repo configs)
|
||||
settingSources: extraArgs["setting-sources"]
|
||||
? (extraArgs["setting-sources"].split(
|
||||
",",
|
||||
) as SdkOptions["settingSources"])
|
||||
: ["user", "project", "local"],
|
||||
};
|
||||
|
||||
// Remove setting-sources from extraArgs to avoid passing it twice
|
||||
delete extraArgs["setting-sources"];
|
||||
|
||||
return {
|
||||
sdkOptions,
|
||||
showFullOutput,
|
||||
|
||||
@@ -1,14 +1,81 @@
|
||||
import * as core from "@actions/core";
|
||||
import { readFile, writeFile } from "fs/promises";
|
||||
import { readFile, writeFile, access } from "fs/promises";
|
||||
import { dirname, join } from "path";
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import type {
|
||||
SDKMessage,
|
||||
SDKResultMessage,
|
||||
SDKUserMessage,
|
||||
} from "@anthropic-ai/claude-agent-sdk";
|
||||
import type { ParsedSdkOptions } from "./parse-sdk-options";
|
||||
|
||||
const EXECUTION_FILE = `${process.env.RUNNER_TEMP}/claude-execution-output.json`;
|
||||
|
||||
/** Filename for the user request file, written by prompt generation */
|
||||
const USER_REQUEST_FILENAME = "claude-user-request.txt";
|
||||
|
||||
/**
|
||||
* Check if a file exists
|
||||
*/
|
||||
async function fileExists(path: string): Promise<boolean> {
|
||||
try {
|
||||
await access(path);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a prompt configuration for the SDK.
|
||||
* If a user request file exists alongside the prompt file, returns a multi-block
|
||||
* SDKUserMessage that enables slash command processing in the CLI.
|
||||
* Otherwise, returns the prompt as a simple string.
|
||||
*/
|
||||
async function createPromptConfig(
|
||||
promptPath: string,
|
||||
showFullOutput: boolean,
|
||||
): Promise<string | AsyncIterable<SDKUserMessage>> {
|
||||
const promptContent = await readFile(promptPath, "utf-8");
|
||||
|
||||
// Check for user request file in the same directory
|
||||
const userRequestPath = join(dirname(promptPath), USER_REQUEST_FILENAME);
|
||||
const hasUserRequest = await fileExists(userRequestPath);
|
||||
|
||||
if (!hasUserRequest) {
|
||||
// No user request file - use simple string prompt
|
||||
return promptContent;
|
||||
}
|
||||
|
||||
// User request file exists - create multi-block message
|
||||
const userRequest = await readFile(userRequestPath, "utf-8");
|
||||
if (showFullOutput) {
|
||||
console.log("Using multi-block message with user request:", userRequest);
|
||||
} else {
|
||||
console.log("Using multi-block message with user request (content hidden)");
|
||||
}
|
||||
|
||||
// Create an async generator that yields a single multi-block message
|
||||
// The context/instructions go first, then the user's actual request last
|
||||
// This allows the CLI to detect and process slash commands in the user request
|
||||
async function* createMultiBlockMessage(): AsyncGenerator<SDKUserMessage> {
|
||||
yield {
|
||||
type: "user",
|
||||
session_id: "",
|
||||
message: {
|
||||
role: "user",
|
||||
content: [
|
||||
{ type: "text", text: promptContent }, // Instructions + GitHub context
|
||||
{ type: "text", text: userRequest }, // User's request (may be a slash command)
|
||||
],
|
||||
},
|
||||
parent_tool_use_id: null,
|
||||
};
|
||||
}
|
||||
|
||||
return createMultiBlockMessage();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitizes SDK output to match CLI sanitization behavior
|
||||
*/
|
||||
@@ -63,7 +130,8 @@ export async function runClaudeWithSdk(
|
||||
promptPath: string,
|
||||
{ sdkOptions, showFullOutput, hasJsonSchema }: ParsedSdkOptions,
|
||||
): Promise<void> {
|
||||
const prompt = await readFile(promptPath, "utf-8");
|
||||
// Create prompt configuration - may be a string or multi-block message
|
||||
const prompt = await createPromptConfig(promptPath, showFullOutput);
|
||||
|
||||
if (!showFullOutput) {
|
||||
console.log(
|
||||
|
||||
@@ -596,4 +596,111 @@ describe("installPlugins", () => {
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
// Local marketplace path tests
|
||||
test("should accept local marketplace path with ./", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins("./my-local-marketplace", "test-plugin");
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(2);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "./my-local-marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
"claude",
|
||||
["plugin", "install", "test-plugin"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
test("should accept local marketplace path with absolute Unix path", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins("/home/user/my-marketplace", "test-plugin");
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(2);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "/home/user/my-marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
test("should accept local marketplace path with Windows absolute path", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins("C:\\Users\\user\\marketplace", "test-plugin");
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(2);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "C:\\Users\\user\\marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
test("should accept mixed local and remote marketplaces", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins(
|
||||
"./local-marketplace\nhttps://github.com/user/remote.git",
|
||||
"test-plugin",
|
||||
);
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(3);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "./local-marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "https://github.com/user/remote.git"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
test("should accept local path with ../ (parent directory)", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins("../shared-plugins/marketplace", "test-plugin");
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(2);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "../shared-plugins/marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
test("should accept local path with nested directories", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins("./plugins/my-org/my-marketplace", "test-plugin");
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(2);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "./plugins/my-org/my-marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
|
||||
test("should accept local path with dots in directory name", async () => {
|
||||
const spy = createMockSpawn();
|
||||
await installPlugins("./my.plugin.marketplace", "test-plugin");
|
||||
|
||||
expect(spy).toHaveBeenCalledTimes(2);
|
||||
expect(spy).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
"claude",
|
||||
["plugin", "marketplace", "add", "./my.plugin.marketplace"],
|
||||
{ stdio: "inherit" },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -2,6 +2,6 @@
|
||||
"name": "mcp-test",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@modelcontextprotocol/sdk": "^1.11.0"
|
||||
"@modelcontextprotocol/sdk": "^1.24.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,6 +108,48 @@ describe("parseSdkOptions", () => {
|
||||
expect(result.sdkOptions.extraArgs?.["allowedTools"]).toBeUndefined();
|
||||
expect(result.sdkOptions.extraArgs?.["model"]).toBe("claude-3-5-sonnet");
|
||||
});
|
||||
|
||||
test("should handle hyphenated --allowed-tools flag", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowed-tools "Edit,Read,Write"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual(["Edit", "Read", "Write"]);
|
||||
expect(result.sdkOptions.extraArgs?.["allowed-tools"]).toBeUndefined();
|
||||
});
|
||||
|
||||
test("should accumulate multiple --allowed-tools flags (hyphenated)", () => {
|
||||
// This is the exact scenario from issue #746
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs:
|
||||
'--allowed-tools "Bash(git log:*)" "Bash(git diff:*)" "Bash(git fetch:*)" "Bash(gh pr:*)"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual([
|
||||
"Bash(git log:*)",
|
||||
"Bash(git diff:*)",
|
||||
"Bash(git fetch:*)",
|
||||
"Bash(gh pr:*)",
|
||||
]);
|
||||
});
|
||||
|
||||
test("should handle mixed camelCase and hyphenated allowedTools flags", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowedTools "Edit,Read" --allowed-tools "Write,Glob"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
// Both should be merged - note: order depends on which key is found first
|
||||
expect(result.sdkOptions.allowedTools).toContain("Edit");
|
||||
expect(result.sdkOptions.allowedTools).toContain("Read");
|
||||
expect(result.sdkOptions.allowedTools).toContain("Write");
|
||||
expect(result.sdkOptions.allowedTools).toContain("Glob");
|
||||
});
|
||||
});
|
||||
|
||||
describe("disallowedTools merging", () => {
|
||||
@@ -134,19 +176,129 @@ describe("parseSdkOptions", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("other extraArgs passthrough", () => {
|
||||
test("should pass through mcp-config in extraArgs", () => {
|
||||
describe("mcp-config merging", () => {
|
||||
test("should pass through single mcp-config in extraArgs", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{}}' --allowedTools "Edit"`,
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"server1":{"command":"cmd1"}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.extraArgs?.["mcp-config"]).toBe(
|
||||
'{"mcpServers":{}}',
|
||||
'{"mcpServers":{"server1":{"command":"cmd1"}}}',
|
||||
);
|
||||
});
|
||||
|
||||
test("should merge multiple mcp-config flags with inline JSON", () => {
|
||||
// Simulates action prepending its config, then user providing their own
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"github_comment":{"command":"node","args":["server.js"]}}}' --mcp-config '{"mcpServers":{"user_server":{"command":"custom","args":["run"]}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_comment");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("user_server");
|
||||
expect(mcpConfig.mcpServers.github_comment.command).toBe("node");
|
||||
expect(mcpConfig.mcpServers.user_server.command).toBe("custom");
|
||||
});
|
||||
|
||||
test("should merge three mcp-config flags", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"server1":{"command":"cmd1"}}}' --mcp-config '{"mcpServers":{"server2":{"command":"cmd2"}}}' --mcp-config '{"mcpServers":{"server3":{"command":"cmd3"}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server1");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server2");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server3");
|
||||
});
|
||||
|
||||
test("should handle mcp-config file path when no inline JSON exists", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config /tmp/user-mcp-config.json`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.extraArgs?.["mcp-config"]).toBe(
|
||||
"/tmp/user-mcp-config.json",
|
||||
);
|
||||
});
|
||||
|
||||
test("should merge inline JSON configs when file path is also present", () => {
|
||||
// When action provides inline JSON and user provides a file path,
|
||||
// the inline JSON configs should be merged (file paths cannot be merged at parse time)
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"github_comment":{"command":"node"}}}' --mcp-config '{"mcpServers":{"github_ci":{"command":"node"}}}' --mcp-config /tmp/user-config.json`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
// The inline JSON configs should be merged
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_comment");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_ci");
|
||||
});
|
||||
|
||||
test("should handle mcp-config with other flags", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"server1":{}}}' --model claude-3-5-sonnet --mcp-config '{"mcpServers":{"server2":{}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server1");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server2");
|
||||
expect(result.sdkOptions.extraArgs?.["model"]).toBe("claude-3-5-sonnet");
|
||||
});
|
||||
|
||||
test("should handle real-world scenario: action config + user config", () => {
|
||||
// This is the exact scenario from the bug report
|
||||
const actionConfig = JSON.stringify({
|
||||
mcpServers: {
|
||||
github_comment: {
|
||||
command: "node",
|
||||
args: ["github-comment-server.js"],
|
||||
},
|
||||
github_ci: { command: "node", args: ["github-ci-server.js"] },
|
||||
},
|
||||
});
|
||||
const userConfig = JSON.stringify({
|
||||
mcpServers: {
|
||||
my_custom_server: { command: "python", args: ["server.py"] },
|
||||
},
|
||||
});
|
||||
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '${actionConfig}' --mcp-config '${userConfig}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
// All servers should be present
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_comment");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_ci");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("my_custom_server");
|
||||
});
|
||||
});
|
||||
|
||||
describe("other extraArgs passthrough", () => {
|
||||
test("should pass through json-schema in extraArgs", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--json-schema '{"type":"object"}'`,
|
||||
|
||||
5
bun.lock
5
bun.lock
@@ -1,12 +1,13 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 0,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "@anthropic-ai/claude-code-action",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.52",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.76",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@octokit/graphql": "^8.2.2",
|
||||
"@octokit/rest": "^21.1.1",
|
||||
@@ -36,7 +37,7 @@
|
||||
|
||||
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
|
||||
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.1.52", "", { "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.33.5", "@img/sharp-darwin-x64": "^0.33.5", "@img/sharp-linux-arm": "^0.33.5", "@img/sharp-linux-arm64": "^0.33.5", "@img/sharp-linux-x64": "^0.33.5", "@img/sharp-linuxmusl-arm64": "^0.33.5", "@img/sharp-linuxmusl-x64": "^0.33.5", "@img/sharp-win32-x64": "^0.33.5" }, "peerDependencies": { "zod": "^3.24.1" } }, "sha512-yF8N05+9NRbqYA/h39jQ726HTQFrdXXp7pEfDNKIJ2c4FdWvEjxBA/8ciZIebN6/PyvGDcbEp3yq2Co4rNpg6A=="],
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.1.76", "", { "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.33.5", "@img/sharp-darwin-x64": "^0.33.5", "@img/sharp-linux-arm": "^0.33.5", "@img/sharp-linux-arm64": "^0.33.5", "@img/sharp-linux-x64": "^0.33.5", "@img/sharp-linuxmusl-arm64": "^0.33.5", "@img/sharp-linuxmusl-x64": "^0.33.5", "@img/sharp-win32-x64": "^0.33.5" }, "peerDependencies": { "zod": "^3.24.1 || ^4.0.0" } }, "sha512-s7RvpXoFaLXLG7A1cJBAPD8ilwOhhc/12fb5mJXRuD561o4FmPtQ+WRfuy9akMmrFRfLsKv8Ornw3ClGAPL2fw=="],
|
||||
|
||||
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ You can authenticate with Claude using any of these four methods:
|
||||
3. Google Vertex AI with OIDC authentication
|
||||
4. Microsoft Foundry with OIDC authentication
|
||||
|
||||
For detailed setup instructions for AWS Bedrock and Google Vertex AI, see the [official documentation](https://docs.anthropic.com/en/docs/claude-code/github-actions#using-with-aws-bedrock-%26-google-vertex-ai).
|
||||
For detailed setup instructions for AWS Bedrock and Google Vertex AI, see the [official documentation](https://code.claude.com/docs/en/github-actions#for-aws-bedrock:).
|
||||
|
||||
**Note**:
|
||||
|
||||
|
||||
@@ -116,7 +116,6 @@ The `additional_permissions` input allows Claude to access GitHub Actions workfl
|
||||
To allow Claude to view workflow run results, job logs, and CI status:
|
||||
|
||||
1. **Grant the necessary permission to your GitHub token**:
|
||||
|
||||
- When using the default `GITHUB_TOKEN`, add the `actions: read` permission to your workflow:
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -228,12 +228,10 @@ jobs:
|
||||
The action now automatically detects the appropriate mode:
|
||||
|
||||
1. **If `prompt` is provided** → Runs in **automation mode**
|
||||
|
||||
- Executes immediately without waiting for @claude mentions
|
||||
- Perfect for scheduled tasks, PR automation, etc.
|
||||
|
||||
2. **If no `prompt` but @claude is mentioned** → Runs in **interactive mode**
|
||||
|
||||
- Waits for and responds to @claude mentions
|
||||
- Creates tracking comments with progress
|
||||
|
||||
|
||||
@@ -38,7 +38,62 @@ The following permissions are requested but not yet actively used. These will en
|
||||
|
||||
## Commit Signing
|
||||
|
||||
Commits made by Claude through this action are no longer automatically signed with commit signatures. To enable commit signing set `use_commit_signing: True` in the workflow(s). This ensures the authenticity and integrity of commits, providing a verifiable trail of changes made by the action.
|
||||
By default, commits made by Claude are unsigned. You can enable commit signing using one of two methods:
|
||||
|
||||
### Option 1: GitHub API Commit Signing (use_commit_signing)
|
||||
|
||||
This uses GitHub's API to create commits, which automatically signs them as verified from the GitHub App:
|
||||
|
||||
```yaml
|
||||
- uses: anthropics/claude-code-action@main
|
||||
with:
|
||||
use_commit_signing: true
|
||||
```
|
||||
|
||||
This is the simplest option and requires no additional setup. However, because it uses the GitHub API instead of git CLI, it cannot perform complex git operations like rebasing, cherry-picking, or interactive history manipulation.
|
||||
|
||||
### Option 2: SSH Signing Key (ssh_signing_key)
|
||||
|
||||
This uses an SSH key to sign commits via git CLI. Use this option when you need both signed commits AND standard git operations (rebasing, cherry-picking, etc.):
|
||||
|
||||
```yaml
|
||||
- uses: anthropics/claude-code-action@main
|
||||
with:
|
||||
ssh_signing_key: ${{ secrets.SSH_SIGNING_KEY }}
|
||||
bot_id: "YOUR_GITHUB_USER_ID"
|
||||
bot_name: "YOUR_GITHUB_USERNAME"
|
||||
```
|
||||
|
||||
Commits will show as verified and attributed to the GitHub account that owns the signing key.
|
||||
|
||||
**Setup steps:**
|
||||
|
||||
1. Generate an SSH key pair for signing:
|
||||
|
||||
```bash
|
||||
ssh-keygen -t ed25519 -f ~/.ssh/signing_key -N "" -C "commit signing key"
|
||||
```
|
||||
|
||||
2. Add the **public key** to your GitHub account:
|
||||
- Go to GitHub → Settings → SSH and GPG keys
|
||||
- Click "New SSH key"
|
||||
- Select **Key type: Signing Key** (important)
|
||||
- Paste the contents of `~/.ssh/signing_key.pub`
|
||||
|
||||
3. Add the **private key** to your repository secrets:
|
||||
- Go to your repo → Settings → Secrets and variables → Actions
|
||||
- Create a new secret named `SSH_SIGNING_KEY`
|
||||
- Paste the contents of `~/.ssh/signing_key`
|
||||
|
||||
4. Get your GitHub user ID:
|
||||
|
||||
```bash
|
||||
gh api users/YOUR_USERNAME --jq '.id'
|
||||
```
|
||||
|
||||
5. Update your workflow with `bot_id` and `bot_name` matching the account where you added the signing key.
|
||||
|
||||
**Note:** If both `ssh_signing_key` and `use_commit_signing` are provided, `ssh_signing_key` takes precedence.
|
||||
|
||||
## ⚠️ Authentication Protection
|
||||
|
||||
|
||||
@@ -31,27 +31,23 @@ The fastest way to create a custom GitHub App is using our pre-configured manife
|
||||
**🚀 [Download the Quick Setup Tool](./create-app.html)** (Right-click → "Save Link As" or "Download Linked File")
|
||||
|
||||
After downloading, open `create-app.html` in your web browser:
|
||||
|
||||
- **For Personal Accounts:** Click the "Create App for Personal Account" button
|
||||
- **For Organizations:** Enter your organization name and click "Create App for Organization"
|
||||
|
||||
The tool will automatically configure all required permissions and submit the manifest.
|
||||
|
||||
Alternatively, you can use the manifest file directly:
|
||||
|
||||
- Use the [`github-app-manifest.json`](../github-app-manifest.json) file from this repository
|
||||
- Visit https://github.com/settings/apps/new (for personal) or your organization's app settings
|
||||
- Look for the "Create from manifest" option and paste the JSON content
|
||||
|
||||
2. **Complete the creation flow:**
|
||||
|
||||
- GitHub will show you a preview of the app configuration
|
||||
- Confirm the app name (you can customize it)
|
||||
- Click "Create GitHub App"
|
||||
- The app will be created with all required permissions automatically configured
|
||||
|
||||
3. **Generate and download a private key:**
|
||||
|
||||
- After creating the app, you'll be redirected to the app settings
|
||||
- Scroll down to "Private keys"
|
||||
- Click "Generate a private key"
|
||||
@@ -64,7 +60,6 @@ The fastest way to create a custom GitHub App is using our pre-configured manife
|
||||
If you prefer to configure the app manually or need custom permissions:
|
||||
|
||||
1. **Create a new GitHub App:**
|
||||
|
||||
- Go to https://github.com/settings/apps (for personal apps) or your organization's settings
|
||||
- Click "New GitHub App"
|
||||
- Configure the app with these minimum permissions:
|
||||
@@ -77,19 +72,16 @@ If you prefer to configure the app manually or need custom permissions:
|
||||
- Create the app
|
||||
|
||||
2. **Generate and download a private key:**
|
||||
|
||||
- After creating the app, scroll down to "Private keys"
|
||||
- Click "Generate a private key"
|
||||
- Download the `.pem` file (keep this secure!)
|
||||
|
||||
3. **Install the app on your repository:**
|
||||
|
||||
- Go to the app's settings page
|
||||
- Click "Install App"
|
||||
- Select the repositories where you want to use Claude
|
||||
|
||||
4. **Add the app credentials to your repository secrets:**
|
||||
|
||||
- Go to your repository's Settings → Secrets and variables → Actions
|
||||
- Add these secrets:
|
||||
- `APP_ID`: Your GitHub App's ID (found in the app settings)
|
||||
@@ -138,7 +130,6 @@ For more information on creating GitHub Apps, see the [GitHub documentation](htt
|
||||
To securely use your Anthropic API key:
|
||||
|
||||
1. Add your API key as a repository secret:
|
||||
|
||||
- Go to your repository's Settings
|
||||
- Navigate to "Secrets and variables" → "Actions"
|
||||
- Click "New repository secret"
|
||||
|
||||
@@ -58,6 +58,7 @@ jobs:
|
||||
| `claude_code_oauth_token` | Claude Code OAuth token (alternative to anthropic_api_key) | No\* | - |
|
||||
| `prompt` | Instructions for Claude. Can be a direct prompt or custom template for automation workflows | No | - |
|
||||
| `track_progress` | Force tag mode with tracking comments. Only works with specific PR/issue events. Preserves GitHub context | No | `false` |
|
||||
| `include_fix_links` | Include 'Fix this' links in PR code review feedback that open Claude Code with context to fix the identified issue | No | `true` |
|
||||
| `claude_args` | Additional [arguments to pass directly to Claude CLI](https://docs.claude.com/en/docs/claude-code/cli-reference#cli-flags) (e.g., `--max-turns 10 --model claude-4-0-sonnet-20250805`) | No | "" |
|
||||
| `base_branch` | The base branch to use for creating new branches (e.g., 'main', 'develop') | No | - |
|
||||
| `use_sticky_comment` | Use just one comment to deliver PR comments (only applies for pull_request event workflows) | No | `false` |
|
||||
@@ -70,9 +71,10 @@ jobs:
|
||||
| `branch_prefix` | The prefix to use for Claude branches (defaults to 'claude/', use 'claude-' for dash format) | No | `claude/` |
|
||||
| `settings` | Claude Code settings as JSON string or path to settings JSON file | No | "" |
|
||||
| `additional_permissions` | Additional permissions to enable. Currently supports 'actions: read' for viewing workflow results | No | "" |
|
||||
| `use_commit_signing` | Enable commit signing using GitHub's commit signature verification. When false, Claude uses standard git commands | No | `false` |
|
||||
| `bot_id` | GitHub user ID to use for git operations (defaults to Claude's bot ID) | No | `41898282` |
|
||||
| `bot_name` | GitHub username to use for git operations (defaults to Claude's bot name) | No | `claude[bot]` |
|
||||
| `use_commit_signing` | Enable commit signing using GitHub's API. Simple but cannot perform complex git operations like rebasing. See [Security](./security.md#commit-signing) | No | `false` |
|
||||
| `ssh_signing_key` | SSH private key for signing commits. Enables signed commits with full git CLI support (rebasing, etc.). See [Security](./security.md#commit-signing) | No | "" |
|
||||
| `bot_id` | GitHub user ID to use for git operations (defaults to Claude's bot ID). Required with `ssh_signing_key` for verified commits | No | `41898282` |
|
||||
| `bot_name` | GitHub username to use for git operations (defaults to Claude's bot name). Required with `ssh_signing_key` for verified commits | No | `claude[bot]` |
|
||||
| `allowed_bots` | Comma-separated list of allowed bot usernames, or '\*' to allow all bots. Empty string (default) allows no bots | No | "" |
|
||||
| `allowed_non_write_users` | **⚠️ RISKY**: Comma-separated list of usernames to allow without write permissions, or '\*' for all users. Only works with `github_token` input. See [Security](./security.md) | No | "" |
|
||||
| `path_to_claude_code_executable` | Optional path to a custom Claude Code executable. Skips automatic installation. Useful for Nix, custom containers, or specialized environments | No | "" |
|
||||
|
||||
@@ -21,7 +21,26 @@ jobs:
|
||||
!startsWith(github.event.workflow_run.head_branch, 'claude-auto-fix-ci-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check for clawd-stop label
|
||||
id: check_label
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const prNumber = ${{ github.event.workflow_run.pull_requests[0].number }};
|
||||
const { data: pr } = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: prNumber
|
||||
});
|
||||
const hasClawdStop = pr.labels.some(label => label.name === 'clawd-stop');
|
||||
if (hasClawdStop) {
|
||||
console.log('PR has clawd-stop label, skipping auto-fix');
|
||||
}
|
||||
return hasClawdStop;
|
||||
result-encoding: string
|
||||
|
||||
- name: Checkout code
|
||||
if: steps.check_label.outputs.result != 'true'
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{ github.event.workflow_run.head_branch }}
|
||||
@@ -29,11 +48,13 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup git identity
|
||||
if: steps.check_label.outputs.result != 'true'
|
||||
run: |
|
||||
git config --global user.email "claude[bot]@users.noreply.github.com"
|
||||
git config --global user.name "claude[bot]"
|
||||
|
||||
- name: Create fix branch
|
||||
if: steps.check_label.outputs.result != 'true'
|
||||
id: branch
|
||||
run: |
|
||||
BRANCH_NAME="claude-auto-fix-ci-${{ github.event.workflow_run.head_branch }}-${{ github.run_id }}"
|
||||
@@ -41,6 +62,7 @@ jobs:
|
||||
echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get CI failure details
|
||||
if: steps.check_label.outputs.result != 'true'
|
||||
id: failure_details
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
@@ -79,6 +101,7 @@ jobs:
|
||||
};
|
||||
|
||||
- name: Fix CI failures with Claude
|
||||
if: steps.check_label.outputs.result != 'true'
|
||||
id: claude
|
||||
uses: anthropics/claude-code-action@v1
|
||||
with:
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.52",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.76",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@octokit/graphql": "^8.2.2",
|
||||
"@octokit/rest": "^21.1.1",
|
||||
|
||||
@@ -21,8 +21,12 @@ import type { ParsedGitHubContext } from "../github/context";
|
||||
import type { CommonFields, PreparedContext, EventData } from "./types";
|
||||
import { GITHUB_SERVER_URL } from "../github/api/config";
|
||||
import type { Mode, ModeContext } from "../modes/types";
|
||||
import { extractUserRequest } from "../utils/extract-user-request";
|
||||
export type { CommonFields, PreparedContext } from "./types";
|
||||
|
||||
/** Filename for the user request file, read by the SDK runner */
|
||||
const USER_REQUEST_FILENAME = "claude-user-request.txt";
|
||||
|
||||
// Tag mode defaults - these tools are needed for tag mode to function
|
||||
const BASE_ALLOWED_TOOLS = [
|
||||
"Edit",
|
||||
@@ -734,7 +738,13 @@ ${eventData.eventName === "issue_comment" || eventData.eventName === "pull_reque
|
||||
- Reference specific code sections with file paths and line numbers${eventData.isPR ? `\n - AFTER reading files and analyzing code, you MUST call mcp__github_comment__update_claude_comment to post your review` : ""}
|
||||
- Formulate a concise, technical, and helpful response based on the context.
|
||||
- Reference specific code with inline formatting or code blocks.
|
||||
- Include relevant file paths and line numbers when applicable.
|
||||
- Include relevant file paths and line numbers when applicable.${
|
||||
eventData.isPR && context.githubContext?.inputs.includeFixLinks
|
||||
? `
|
||||
- When identifying issues that could be fixed, include an inline link: [Fix this →](https://claude.ai/code?q=<URI_ENCODED_INSTRUCTIONS>&repo=${context.repository})
|
||||
The query should be URI-encoded and include enough context for Claude Code to understand and fix the issue (file path, line numbers, branch name, what needs to change).`
|
||||
: ""
|
||||
}
|
||||
- ${eventData.isPR ? `IMPORTANT: Submit your review feedback by updating the Claude comment using mcp__github_comment__update_claude_comment. This will be displayed as your PR review.` : `Remember that this feedback must be posted to the GitHub comment using mcp__github_comment__update_claude_comment.`}
|
||||
|
||||
B. For Straightforward Changes:
|
||||
@@ -841,6 +851,55 @@ f. If you are unable to complete certain steps, such as running a linter or test
|
||||
return promptContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the user's request from the prepared context and GitHub data.
|
||||
*
|
||||
* This is used to send the user's actual command/request as a separate
|
||||
* content block, enabling slash command processing in the CLI.
|
||||
*
|
||||
* @param context - The prepared context containing event data and trigger phrase
|
||||
* @param githubData - The fetched GitHub data containing issue/PR body content
|
||||
* @returns The extracted user request text (e.g., "/review-pr" or "fix this bug"),
|
||||
* or null for assigned/labeled events without an explicit trigger in the body
|
||||
*
|
||||
* @example
|
||||
* // Comment event: "@claude /review-pr" -> returns "/review-pr"
|
||||
* // Issue body with "@claude fix this" -> returns "fix this"
|
||||
* // Issue assigned without @claude in body -> returns null
|
||||
*/
|
||||
function extractUserRequestFromContext(
|
||||
context: PreparedContext,
|
||||
githubData: FetchDataResult,
|
||||
): string | null {
|
||||
const { eventData, triggerPhrase } = context;
|
||||
|
||||
// For comment events, extract from comment body
|
||||
if (
|
||||
"commentBody" in eventData &&
|
||||
eventData.commentBody &&
|
||||
(eventData.eventName === "issue_comment" ||
|
||||
eventData.eventName === "pull_request_review_comment" ||
|
||||
eventData.eventName === "pull_request_review")
|
||||
) {
|
||||
return extractUserRequest(eventData.commentBody, triggerPhrase);
|
||||
}
|
||||
|
||||
// For issue/PR events triggered by body content, extract from the body
|
||||
if (githubData.contextData?.body) {
|
||||
const request = extractUserRequest(
|
||||
githubData.contextData.body,
|
||||
triggerPhrase,
|
||||
);
|
||||
if (request) {
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
||||
// For assigned/labeled events without explicit trigger in body,
|
||||
// return null to indicate the full context should be used
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function createPrompt(
|
||||
mode: Mode,
|
||||
modeContext: ModeContext,
|
||||
@@ -889,6 +948,22 @@ export async function createPrompt(
|
||||
promptContent,
|
||||
);
|
||||
|
||||
// Extract and write the user request separately for SDK multi-block messaging
|
||||
// This allows the CLI to process slash commands (e.g., "@claude /review-pr")
|
||||
const userRequest = extractUserRequestFromContext(
|
||||
preparedContext,
|
||||
githubData,
|
||||
);
|
||||
if (userRequest) {
|
||||
await writeFile(
|
||||
`${process.env.RUNNER_TEMP || "/tmp"}/claude-prompts/${USER_REQUEST_FILENAME}`,
|
||||
userRequest,
|
||||
);
|
||||
console.log("===== USER REQUEST =====");
|
||||
console.log(userRequest);
|
||||
console.log("========================");
|
||||
}
|
||||
|
||||
// Set allowed tools
|
||||
const hasActionsReadPermission = false;
|
||||
|
||||
|
||||
21
src/entrypoints/cleanup-ssh-signing.ts
Normal file
21
src/entrypoints/cleanup-ssh-signing.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
/**
|
||||
* Cleanup SSH signing key after action completes
|
||||
* This is run as a post step for security purposes
|
||||
*/
|
||||
|
||||
import { cleanupSshSigning } from "../github/operations/git-config";
|
||||
|
||||
async function run() {
|
||||
try {
|
||||
await cleanupSshSigning();
|
||||
} catch (error) {
|
||||
// Don't fail the action if cleanup fails, just log it
|
||||
console.error("Failed to cleanup SSH signing key:", error);
|
||||
}
|
||||
}
|
||||
|
||||
if (import.meta.main) {
|
||||
run();
|
||||
}
|
||||
@@ -26,6 +26,7 @@ export function collectActionInputsPresence(): void {
|
||||
max_turns: "",
|
||||
use_sticky_comment: "false",
|
||||
use_commit_signing: "false",
|
||||
ssh_signing_key: "",
|
||||
};
|
||||
|
||||
const allInputsJson = process.env.ALL_INPUTS;
|
||||
|
||||
@@ -90,11 +90,13 @@ type BaseContext = {
|
||||
branchPrefix: string;
|
||||
useStickyComment: boolean;
|
||||
useCommitSigning: boolean;
|
||||
sshSigningKey: string;
|
||||
botId: string;
|
||||
botName: string;
|
||||
allowedBots: string;
|
||||
allowedNonWriteUsers: string;
|
||||
trackProgress: boolean;
|
||||
includeFixLinks: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -145,11 +147,13 @@ export function parseGitHubContext(): GitHubContext {
|
||||
branchPrefix: process.env.BRANCH_PREFIX ?? "claude/",
|
||||
useStickyComment: process.env.USE_STICKY_COMMENT === "true",
|
||||
useCommitSigning: process.env.USE_COMMIT_SIGNING === "true",
|
||||
sshSigningKey: process.env.SSH_SIGNING_KEY || "",
|
||||
botId: process.env.BOT_ID ?? String(CLAUDE_APP_BOT_ID),
|
||||
botName: process.env.BOT_NAME ?? CLAUDE_BOT_LOGIN,
|
||||
allowedBots: process.env.ALLOWED_BOTS ?? "",
|
||||
allowedNonWriteUsers: process.env.ALLOWED_NON_WRITE_USERS ?? "",
|
||||
trackProgress: process.env.TRACK_PROGRESS === "true",
|
||||
includeFixLinks: process.env.INCLUDE_FIX_LINKS === "true",
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ import type { Octokits } from "../api/client";
|
||||
import { ISSUE_QUERY, PR_QUERY, USER_QUERY } from "../api/queries/github";
|
||||
import {
|
||||
isIssueCommentEvent,
|
||||
isIssuesEvent,
|
||||
isPullRequestEvent,
|
||||
isPullRequestReviewEvent,
|
||||
isPullRequestReviewCommentEvent,
|
||||
type ParsedGitHubContext,
|
||||
@@ -40,6 +42,31 @@ export function extractTriggerTimestamp(
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the original title from the GitHub webhook payload.
|
||||
* This is the title as it existed when the trigger event occurred.
|
||||
*
|
||||
* @param context - Parsed GitHub context from webhook
|
||||
* @returns The original title string or undefined if not available
|
||||
*/
|
||||
export function extractOriginalTitle(
|
||||
context: ParsedGitHubContext,
|
||||
): string | undefined {
|
||||
if (isIssueCommentEvent(context)) {
|
||||
return context.payload.issue?.title;
|
||||
} else if (isPullRequestEvent(context)) {
|
||||
return context.payload.pull_request?.title;
|
||||
} else if (isPullRequestReviewEvent(context)) {
|
||||
return context.payload.pull_request?.title;
|
||||
} else if (isPullRequestReviewCommentEvent(context)) {
|
||||
return context.payload.pull_request?.title;
|
||||
} else if (isIssuesEvent(context)) {
|
||||
return context.payload.issue?.title;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filters comments to only include those that existed in their final state before the trigger time.
|
||||
* This prevents malicious actors from editing comments after the trigger to inject harmful content.
|
||||
@@ -146,6 +173,7 @@ type FetchDataParams = {
|
||||
isPR: boolean;
|
||||
triggerUsername?: string;
|
||||
triggerTime?: string;
|
||||
originalTitle?: string;
|
||||
};
|
||||
|
||||
export type GitHubFileWithSHA = GitHubFile & {
|
||||
@@ -169,6 +197,7 @@ export async function fetchGitHubData({
|
||||
isPR,
|
||||
triggerUsername,
|
||||
triggerTime,
|
||||
originalTitle,
|
||||
}: FetchDataParams): Promise<FetchDataResult> {
|
||||
const [owner, repo] = repository.split("/");
|
||||
if (!owner || !repo) {
|
||||
@@ -354,6 +383,11 @@ export async function fetchGitHubData({
|
||||
triggerDisplayName = await fetchUserDisplayName(octokits, triggerUsername);
|
||||
}
|
||||
|
||||
// Use the original title from the webhook payload if provided
|
||||
if (originalTitle !== undefined) {
|
||||
contextData.title = originalTitle;
|
||||
}
|
||||
|
||||
return {
|
||||
contextData,
|
||||
comments,
|
||||
|
||||
@@ -14,7 +14,8 @@ export function formatContext(
|
||||
): string {
|
||||
if (isPR) {
|
||||
const prData = contextData as GitHubPullRequest;
|
||||
return `PR Title: ${prData.title}
|
||||
const sanitizedTitle = sanitizeContent(prData.title);
|
||||
return `PR Title: ${sanitizedTitle}
|
||||
PR Author: ${prData.author.login}
|
||||
PR Branch: ${prData.headRefName} -> ${prData.baseRefName}
|
||||
PR State: ${prData.state}
|
||||
@@ -24,7 +25,8 @@ Total Commits: ${prData.commits.totalCount}
|
||||
Changed Files: ${prData.files.nodes.length} files`;
|
||||
} else {
|
||||
const issueData = contextData as GitHubIssue;
|
||||
return `Issue Title: ${issueData.title}
|
||||
const sanitizedTitle = sanitizeContent(issueData.title);
|
||||
return `Issue Title: ${sanitizedTitle}
|
||||
Issue Author: ${issueData.author.login}
|
||||
Issue State: ${issueData.state}`;
|
||||
}
|
||||
|
||||
@@ -6,9 +6,14 @@
|
||||
*/
|
||||
|
||||
import { $ } from "bun";
|
||||
import { mkdir, writeFile, rm } from "fs/promises";
|
||||
import { join } from "path";
|
||||
import { homedir } from "os";
|
||||
import type { GitHubContext } from "../context";
|
||||
import { GITHUB_SERVER_URL } from "../api/config";
|
||||
|
||||
const SSH_SIGNING_KEY_PATH = join(homedir(), ".ssh", "claude_signing_key");
|
||||
|
||||
type GitUser = {
|
||||
login: string;
|
||||
id: number;
|
||||
@@ -54,3 +59,50 @@ export async function configureGitAuth(
|
||||
|
||||
console.log("Git authentication configured successfully");
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure git to use SSH signing for commits
|
||||
* This is an alternative to GitHub API-based commit signing (use_commit_signing)
|
||||
*/
|
||||
export async function setupSshSigning(sshSigningKey: string): Promise<void> {
|
||||
console.log("Configuring SSH signing for commits...");
|
||||
|
||||
// Validate SSH key format
|
||||
if (!sshSigningKey.trim()) {
|
||||
throw new Error("SSH signing key cannot be empty");
|
||||
}
|
||||
if (
|
||||
!sshSigningKey.includes("BEGIN") ||
|
||||
!sshSigningKey.includes("PRIVATE KEY")
|
||||
) {
|
||||
throw new Error("Invalid SSH private key format");
|
||||
}
|
||||
|
||||
// Create .ssh directory with secure permissions (700)
|
||||
const sshDir = join(homedir(), ".ssh");
|
||||
await mkdir(sshDir, { recursive: true, mode: 0o700 });
|
||||
|
||||
// Write the signing key atomically with secure permissions (600)
|
||||
await writeFile(SSH_SIGNING_KEY_PATH, sshSigningKey, { mode: 0o600 });
|
||||
console.log(`✓ SSH signing key written to ${SSH_SIGNING_KEY_PATH}`);
|
||||
|
||||
// Configure git to use SSH signing
|
||||
await $`git config gpg.format ssh`;
|
||||
await $`git config user.signingkey ${SSH_SIGNING_KEY_PATH}`;
|
||||
await $`git config commit.gpgsign true`;
|
||||
|
||||
console.log("✓ Git configured to use SSH signing for commits");
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up the SSH signing key file
|
||||
* Should be called in the post step for security
|
||||
*/
|
||||
export async function cleanupSshSigning(): Promise<void> {
|
||||
try {
|
||||
await rm(SSH_SIGNING_KEY_PATH, { force: true });
|
||||
console.log("✓ SSH signing key cleaned up");
|
||||
} catch (error) {
|
||||
console.log("No SSH signing key to clean up");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,11 +4,12 @@ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
||||
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
||||
import { z } from "zod";
|
||||
import { readFile, stat } from "fs/promises";
|
||||
import { join } from "path";
|
||||
import { resolve } from "path";
|
||||
import { constants } from "fs";
|
||||
import fetch from "node-fetch";
|
||||
import { GITHUB_API_URL } from "../github/api/config";
|
||||
import { retryWithBackoff } from "../utils/retry";
|
||||
import { validatePathWithinRepo } from "./path-validation";
|
||||
|
||||
type GitHubRef = {
|
||||
object: {
|
||||
@@ -213,12 +214,18 @@ server.tool(
|
||||
throw new Error("GITHUB_TOKEN environment variable is required");
|
||||
}
|
||||
|
||||
const processedFiles = files.map((filePath) => {
|
||||
if (filePath.startsWith("/")) {
|
||||
return filePath.slice(1);
|
||||
}
|
||||
return filePath;
|
||||
});
|
||||
// Validate all paths are within repository root and get full/relative paths
|
||||
const resolvedRepoDir = resolve(REPO_DIR);
|
||||
const validatedFiles = await Promise.all(
|
||||
files.map(async (filePath) => {
|
||||
const fullPath = await validatePathWithinRepo(filePath, REPO_DIR);
|
||||
// Calculate the relative path for the git tree entry
|
||||
// Use the original filePath (normalized) for the git path, not the symlink-resolved path
|
||||
const normalizedPath = resolve(resolvedRepoDir, filePath);
|
||||
const relativePath = normalizedPath.slice(resolvedRepoDir.length + 1);
|
||||
return { fullPath, relativePath };
|
||||
}),
|
||||
);
|
||||
|
||||
// 1. Get the branch reference (create if doesn't exist)
|
||||
const baseSha = await getOrCreateBranchRef(
|
||||
@@ -247,18 +254,14 @@ server.tool(
|
||||
|
||||
// 3. Create tree entries for all files
|
||||
const treeEntries = await Promise.all(
|
||||
processedFiles.map(async (filePath) => {
|
||||
const fullPath = filePath.startsWith("/")
|
||||
? filePath
|
||||
: join(REPO_DIR, filePath);
|
||||
|
||||
validatedFiles.map(async ({ fullPath, relativePath }) => {
|
||||
// Get the proper file mode based on file permissions
|
||||
const fileMode = await getFileMode(fullPath);
|
||||
|
||||
// Check if file is binary (images, etc.)
|
||||
const isBinaryFile =
|
||||
/\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test(
|
||||
filePath,
|
||||
relativePath,
|
||||
);
|
||||
|
||||
if (isBinaryFile) {
|
||||
@@ -284,7 +287,7 @@ server.tool(
|
||||
if (!blobResponse.ok) {
|
||||
const errorText = await blobResponse.text();
|
||||
throw new Error(
|
||||
`Failed to create blob for ${filePath}: ${blobResponse.status} - ${errorText}`,
|
||||
`Failed to create blob for ${relativePath}: ${blobResponse.status} - ${errorText}`,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -292,7 +295,7 @@ server.tool(
|
||||
|
||||
// Return tree entry with blob SHA
|
||||
return {
|
||||
path: filePath,
|
||||
path: relativePath,
|
||||
mode: fileMode,
|
||||
type: "blob",
|
||||
sha: blobData.sha,
|
||||
@@ -301,7 +304,7 @@ server.tool(
|
||||
// For text files, include content directly in tree
|
||||
const content = await readFile(fullPath, "utf-8");
|
||||
return {
|
||||
path: filePath,
|
||||
path: relativePath,
|
||||
mode: fileMode,
|
||||
type: "blob",
|
||||
content: content,
|
||||
@@ -421,7 +424,9 @@ server.tool(
|
||||
author: newCommitData.author.name,
|
||||
date: newCommitData.author.date,
|
||||
},
|
||||
files: processedFiles.map((path) => ({ path })),
|
||||
files: validatedFiles.map(({ relativePath }) => ({
|
||||
path: relativePath,
|
||||
})),
|
||||
tree: {
|
||||
sha: treeData.sha,
|
||||
},
|
||||
|
||||
64
src/mcp/path-validation.ts
Normal file
64
src/mcp/path-validation.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
import { realpath } from "fs/promises";
|
||||
import { resolve, sep } from "path";
|
||||
|
||||
/**
|
||||
* Validates that a file path resolves within the repository root.
|
||||
* Prevents path traversal attacks via "../" sequences and symlinks.
|
||||
* @param filePath - The file path to validate (can be relative or absolute)
|
||||
* @param repoRoot - The repository root directory
|
||||
* @returns The resolved absolute path (with symlinks resolved) if valid
|
||||
* @throws Error if the path resolves outside the repository root
|
||||
*/
|
||||
export async function validatePathWithinRepo(
|
||||
filePath: string,
|
||||
repoRoot: string,
|
||||
): Promise<string> {
|
||||
// First resolve the path string (handles .. and . segments)
|
||||
const initialPath = resolve(repoRoot, filePath);
|
||||
|
||||
// Resolve symlinks to get the real path
|
||||
// This prevents symlink attacks where a link inside the repo points outside
|
||||
let resolvedRoot: string;
|
||||
let resolvedPath: string;
|
||||
|
||||
try {
|
||||
resolvedRoot = await realpath(repoRoot);
|
||||
} catch {
|
||||
throw new Error(`Repository root '${repoRoot}' does not exist`);
|
||||
}
|
||||
|
||||
try {
|
||||
resolvedPath = await realpath(initialPath);
|
||||
} catch {
|
||||
// File doesn't exist yet - fall back to checking the parent directory
|
||||
// This handles the case where we're creating a new file
|
||||
const parentDir = resolve(initialPath, "..");
|
||||
try {
|
||||
const resolvedParent = await realpath(parentDir);
|
||||
if (
|
||||
resolvedParent !== resolvedRoot &&
|
||||
!resolvedParent.startsWith(resolvedRoot + sep)
|
||||
) {
|
||||
throw new Error(
|
||||
`Path '${filePath}' resolves outside the repository root`,
|
||||
);
|
||||
}
|
||||
// Parent is valid, return the initial path since file doesn't exist yet
|
||||
return initialPath;
|
||||
} catch {
|
||||
throw new Error(
|
||||
`Path '${filePath}' resolves outside the repository root`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Path must be within repo root (or be the root itself)
|
||||
if (
|
||||
resolvedPath !== resolvedRoot &&
|
||||
!resolvedPath.startsWith(resolvedRoot + sep)
|
||||
) {
|
||||
throw new Error(`Path '${filePath}' resolves outside the repository root`);
|
||||
}
|
||||
|
||||
return resolvedPath;
|
||||
}
|
||||
@@ -4,7 +4,10 @@ import type { Mode, ModeOptions, ModeResult } from "../types";
|
||||
import type { PreparedContext } from "../../create-prompt/types";
|
||||
import { prepareMcpConfig } from "../../mcp/install-mcp-server";
|
||||
import { parseAllowedTools } from "./parse-tools";
|
||||
import { configureGitAuth } from "../../github/operations/git-config";
|
||||
import {
|
||||
configureGitAuth,
|
||||
setupSshSigning,
|
||||
} from "../../github/operations/git-config";
|
||||
import type { GitHubContext } from "../../github/context";
|
||||
import { isEntityContext } from "../../github/context";
|
||||
|
||||
@@ -79,7 +82,27 @@ export const agentMode: Mode = {
|
||||
|
||||
async prepare({ context, githubToken }: ModeOptions): Promise<ModeResult> {
|
||||
// Configure git authentication for agent mode (same as tag mode)
|
||||
if (!context.inputs.useCommitSigning) {
|
||||
// SSH signing takes precedence if provided
|
||||
const useSshSigning = !!context.inputs.sshSigningKey;
|
||||
const useApiCommitSigning =
|
||||
context.inputs.useCommitSigning && !useSshSigning;
|
||||
|
||||
if (useSshSigning) {
|
||||
// Setup SSH signing for commits
|
||||
await setupSshSigning(context.inputs.sshSigningKey);
|
||||
|
||||
// Still configure git auth for push operations (user/email and remote URL)
|
||||
const user = {
|
||||
login: context.inputs.botName,
|
||||
id: parseInt(context.inputs.botId),
|
||||
};
|
||||
try {
|
||||
await configureGitAuth(githubToken, context, user);
|
||||
} catch (error) {
|
||||
console.error("Failed to configure git authentication:", error);
|
||||
// Continue anyway - git operations may still work with default config
|
||||
}
|
||||
} else if (!useApiCommitSigning) {
|
||||
// Use bot_id and bot_name from inputs directly
|
||||
const user = {
|
||||
login: context.inputs.botName,
|
||||
|
||||
@@ -4,11 +4,15 @@ import { checkContainsTrigger } from "../../github/validation/trigger";
|
||||
import { checkHumanActor } from "../../github/validation/actor";
|
||||
import { createInitialComment } from "../../github/operations/comments/create-initial";
|
||||
import { setupBranch } from "../../github/operations/branch";
|
||||
import { configureGitAuth } from "../../github/operations/git-config";
|
||||
import {
|
||||
configureGitAuth,
|
||||
setupSshSigning,
|
||||
} from "../../github/operations/git-config";
|
||||
import { prepareMcpConfig } from "../../mcp/install-mcp-server";
|
||||
import {
|
||||
fetchGitHubData,
|
||||
extractTriggerTimestamp,
|
||||
extractOriginalTitle,
|
||||
} from "../../github/data/fetcher";
|
||||
import { createPrompt, generateDefaultPrompt } from "../../create-prompt";
|
||||
import { isEntityContext } from "../../github/context";
|
||||
@@ -75,6 +79,7 @@ export const tagMode: Mode = {
|
||||
const commentId = commentData.id;
|
||||
|
||||
const triggerTime = extractTriggerTimestamp(context);
|
||||
const originalTitle = extractOriginalTitle(context);
|
||||
|
||||
const githubData = await fetchGitHubData({
|
||||
octokits: octokit,
|
||||
@@ -83,13 +88,34 @@ export const tagMode: Mode = {
|
||||
isPR: context.isPR,
|
||||
triggerUsername: context.actor,
|
||||
triggerTime,
|
||||
originalTitle,
|
||||
});
|
||||
|
||||
// Setup branch
|
||||
const branchInfo = await setupBranch(octokit, githubData, context);
|
||||
|
||||
// Configure git authentication if not using commit signing
|
||||
if (!context.inputs.useCommitSigning) {
|
||||
// Configure git authentication
|
||||
// SSH signing takes precedence if provided
|
||||
const useSshSigning = !!context.inputs.sshSigningKey;
|
||||
const useApiCommitSigning =
|
||||
context.inputs.useCommitSigning && !useSshSigning;
|
||||
|
||||
if (useSshSigning) {
|
||||
// Setup SSH signing for commits
|
||||
await setupSshSigning(context.inputs.sshSigningKey);
|
||||
|
||||
// Still configure git auth for push operations (user/email and remote URL)
|
||||
const user = {
|
||||
login: context.inputs.botName,
|
||||
id: parseInt(context.inputs.botId),
|
||||
};
|
||||
try {
|
||||
await configureGitAuth(githubToken, context, user);
|
||||
} catch (error) {
|
||||
console.error("Failed to configure git authentication:", error);
|
||||
throw error;
|
||||
}
|
||||
} else if (!useApiCommitSigning) {
|
||||
// Use bot_id and bot_name from inputs directly
|
||||
const user = {
|
||||
login: context.inputs.botName,
|
||||
@@ -135,8 +161,9 @@ export const tagMode: Mode = {
|
||||
...userAllowedMCPTools,
|
||||
];
|
||||
|
||||
// Add git commands when not using commit signing
|
||||
if (!context.inputs.useCommitSigning) {
|
||||
// Add git commands when using git CLI (no API commit signing, or SSH signing)
|
||||
// SSH signing still uses git CLI, just with signing enabled
|
||||
if (!useApiCommitSigning) {
|
||||
tagModeTools.push(
|
||||
"Bash(git add:*)",
|
||||
"Bash(git commit:*)",
|
||||
@@ -147,7 +174,7 @@ export const tagMode: Mode = {
|
||||
"Bash(git rm:*)",
|
||||
);
|
||||
} else {
|
||||
// When using commit signing, use MCP file ops tools
|
||||
// When using API commit signing, use MCP file ops tools
|
||||
tagModeTools.push(
|
||||
"mcp__github_file_ops__commit_files",
|
||||
"mcp__github_file_ops__delete_files",
|
||||
|
||||
32
src/utils/extract-user-request.ts
Normal file
32
src/utils/extract-user-request.ts
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
* Extracts the user's request from a trigger comment.
|
||||
*
|
||||
* Given a comment like "@claude /review-pr please check the auth module",
|
||||
* this extracts "/review-pr please check the auth module".
|
||||
*
|
||||
* @param commentBody - The full comment body containing the trigger phrase
|
||||
* @param triggerPhrase - The trigger phrase (e.g., "@claude")
|
||||
* @returns The user's request (text after the trigger phrase), or null if not found
|
||||
*/
|
||||
export function extractUserRequest(
|
||||
commentBody: string | undefined,
|
||||
triggerPhrase: string,
|
||||
): string | null {
|
||||
if (!commentBody) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Use string operations instead of regex for better performance and security
|
||||
// (avoids potential ReDoS with large comment bodies)
|
||||
const triggerIndex = commentBody
|
||||
.toLowerCase()
|
||||
.indexOf(triggerPhrase.toLowerCase());
|
||||
if (triggerIndex === -1) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const afterTrigger = commentBody
|
||||
.substring(triggerIndex + triggerPhrase.length)
|
||||
.trim();
|
||||
return afterTrigger || null;
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
import { describe, expect, it, jest } from "bun:test";
|
||||
import {
|
||||
extractTriggerTimestamp,
|
||||
extractOriginalTitle,
|
||||
fetchGitHubData,
|
||||
filterCommentsToTriggerTime,
|
||||
filterReviewsToTriggerTime,
|
||||
@@ -9,6 +10,7 @@ import {
|
||||
import {
|
||||
createMockContext,
|
||||
mockIssueCommentContext,
|
||||
mockPullRequestCommentContext,
|
||||
mockPullRequestReviewContext,
|
||||
mockPullRequestReviewCommentContext,
|
||||
mockPullRequestOpenedContext,
|
||||
@@ -63,6 +65,47 @@ describe("extractTriggerTimestamp", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractOriginalTitle", () => {
|
||||
it("should extract title from IssueCommentEvent on PR", () => {
|
||||
const title = extractOriginalTitle(mockPullRequestCommentContext);
|
||||
expect(title).toBe("Fix: Memory leak in user service");
|
||||
});
|
||||
|
||||
it("should extract title from PullRequestReviewEvent", () => {
|
||||
const title = extractOriginalTitle(mockPullRequestReviewContext);
|
||||
expect(title).toBe("Refactor: Improve error handling in API layer");
|
||||
});
|
||||
|
||||
it("should extract title from PullRequestReviewCommentEvent", () => {
|
||||
const title = extractOriginalTitle(mockPullRequestReviewCommentContext);
|
||||
expect(title).toBe("Performance: Optimize search algorithm");
|
||||
});
|
||||
|
||||
it("should extract title from pull_request event", () => {
|
||||
const title = extractOriginalTitle(mockPullRequestOpenedContext);
|
||||
expect(title).toBe("Feature: Add user authentication");
|
||||
});
|
||||
|
||||
it("should extract title from issues event", () => {
|
||||
const title = extractOriginalTitle(mockIssueOpenedContext);
|
||||
expect(title).toBe("Bug: Application crashes on startup");
|
||||
});
|
||||
|
||||
it("should return undefined for event without title", () => {
|
||||
const context = createMockContext({
|
||||
eventName: "issue_comment",
|
||||
payload: {
|
||||
comment: {
|
||||
id: 123,
|
||||
body: "test",
|
||||
},
|
||||
} as any,
|
||||
});
|
||||
const title = extractOriginalTitle(context);
|
||||
expect(title).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe("filterCommentsToTriggerTime", () => {
|
||||
const createMockComment = (
|
||||
createdAt: string,
|
||||
@@ -945,4 +988,115 @@ describe("fetchGitHubData integration with time filtering", () => {
|
||||
);
|
||||
expect(hasPrBodyInMap).toBe(false);
|
||||
});
|
||||
|
||||
it("should use originalTitle when provided instead of fetched title", async () => {
|
||||
const mockOctokits = {
|
||||
graphql: jest.fn().mockResolvedValue({
|
||||
repository: {
|
||||
pullRequest: {
|
||||
number: 123,
|
||||
title: "Fetched Title From GraphQL",
|
||||
body: "PR body",
|
||||
author: { login: "author" },
|
||||
createdAt: "2024-01-15T10:00:00Z",
|
||||
additions: 10,
|
||||
deletions: 5,
|
||||
state: "OPEN",
|
||||
commits: { totalCount: 1, nodes: [] },
|
||||
files: { nodes: [] },
|
||||
comments: { nodes: [] },
|
||||
reviews: { nodes: [] },
|
||||
},
|
||||
},
|
||||
user: { login: "trigger-user" },
|
||||
}),
|
||||
rest: jest.fn() as any,
|
||||
};
|
||||
|
||||
const result = await fetchGitHubData({
|
||||
octokits: mockOctokits as any,
|
||||
repository: "test-owner/test-repo",
|
||||
prNumber: "123",
|
||||
isPR: true,
|
||||
triggerUsername: "trigger-user",
|
||||
originalTitle: "Original Title From Webhook",
|
||||
});
|
||||
|
||||
expect(result.contextData.title).toBe("Original Title From Webhook");
|
||||
});
|
||||
|
||||
it("should use fetched title when originalTitle is not provided", async () => {
|
||||
const mockOctokits = {
|
||||
graphql: jest.fn().mockResolvedValue({
|
||||
repository: {
|
||||
pullRequest: {
|
||||
number: 123,
|
||||
title: "Fetched Title From GraphQL",
|
||||
body: "PR body",
|
||||
author: { login: "author" },
|
||||
createdAt: "2024-01-15T10:00:00Z",
|
||||
additions: 10,
|
||||
deletions: 5,
|
||||
state: "OPEN",
|
||||
commits: { totalCount: 1, nodes: [] },
|
||||
files: { nodes: [] },
|
||||
comments: { nodes: [] },
|
||||
reviews: { nodes: [] },
|
||||
},
|
||||
},
|
||||
user: { login: "trigger-user" },
|
||||
}),
|
||||
rest: jest.fn() as any,
|
||||
};
|
||||
|
||||
const result = await fetchGitHubData({
|
||||
octokits: mockOctokits as any,
|
||||
repository: "test-owner/test-repo",
|
||||
prNumber: "123",
|
||||
isPR: true,
|
||||
triggerUsername: "trigger-user",
|
||||
});
|
||||
|
||||
expect(result.contextData.title).toBe("Fetched Title From GraphQL");
|
||||
});
|
||||
|
||||
it("should use original title from webhook even if title was edited after trigger", async () => {
|
||||
const mockOctokits = {
|
||||
graphql: jest.fn().mockResolvedValue({
|
||||
repository: {
|
||||
pullRequest: {
|
||||
number: 123,
|
||||
title: "Edited Title (from GraphQL)",
|
||||
body: "PR body",
|
||||
author: { login: "author" },
|
||||
createdAt: "2024-01-15T10:00:00Z",
|
||||
lastEditedAt: "2024-01-15T12:30:00Z", // Edited after trigger
|
||||
additions: 10,
|
||||
deletions: 5,
|
||||
state: "OPEN",
|
||||
commits: { totalCount: 1, nodes: [] },
|
||||
files: { nodes: [] },
|
||||
comments: { nodes: [] },
|
||||
reviews: { nodes: [] },
|
||||
},
|
||||
},
|
||||
user: { login: "trigger-user" },
|
||||
}),
|
||||
rest: jest.fn() as any,
|
||||
};
|
||||
|
||||
const result = await fetchGitHubData({
|
||||
octokits: mockOctokits as any,
|
||||
repository: "test-owner/test-repo",
|
||||
prNumber: "123",
|
||||
isPR: true,
|
||||
triggerUsername: "trigger-user",
|
||||
triggerTime: "2024-01-15T12:00:00Z",
|
||||
originalTitle: "Original Title (from webhook at trigger time)",
|
||||
});
|
||||
|
||||
expect(result.contextData.title).toBe(
|
||||
"Original Title (from webhook at trigger time)",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
77
test/extract-user-request.test.ts
Normal file
77
test/extract-user-request.test.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
import { describe, test, expect } from "bun:test";
|
||||
import { extractUserRequest } from "../src/utils/extract-user-request";
|
||||
|
||||
describe("extractUserRequest", () => {
|
||||
test("extracts text after @claude trigger", () => {
|
||||
expect(extractUserRequest("@claude /review-pr", "@claude")).toBe(
|
||||
"/review-pr",
|
||||
);
|
||||
});
|
||||
|
||||
test("extracts slash command with arguments", () => {
|
||||
expect(
|
||||
extractUserRequest(
|
||||
"@claude /review-pr please check the auth module",
|
||||
"@claude",
|
||||
),
|
||||
).toBe("/review-pr please check the auth module");
|
||||
});
|
||||
|
||||
test("handles trigger phrase with extra whitespace", () => {
|
||||
expect(extractUserRequest("@claude /review-pr", "@claude")).toBe(
|
||||
"/review-pr",
|
||||
);
|
||||
});
|
||||
|
||||
test("handles trigger phrase at start of multiline comment", () => {
|
||||
const comment = `@claude /review-pr
|
||||
Please review this PR carefully.
|
||||
Focus on security issues.`;
|
||||
expect(extractUserRequest(comment, "@claude")).toBe(
|
||||
`/review-pr
|
||||
Please review this PR carefully.
|
||||
Focus on security issues.`,
|
||||
);
|
||||
});
|
||||
|
||||
test("handles trigger phrase in middle of text", () => {
|
||||
expect(
|
||||
extractUserRequest("Hey team, @claude can you review this?", "@claude"),
|
||||
).toBe("can you review this?");
|
||||
});
|
||||
|
||||
test("returns null for empty comment body", () => {
|
||||
expect(extractUserRequest("", "@claude")).toBeNull();
|
||||
});
|
||||
|
||||
test("returns null for undefined comment body", () => {
|
||||
expect(extractUserRequest(undefined, "@claude")).toBeNull();
|
||||
});
|
||||
|
||||
test("returns null when trigger phrase not found", () => {
|
||||
expect(extractUserRequest("Please review this PR", "@claude")).toBeNull();
|
||||
});
|
||||
|
||||
test("returns null when only trigger phrase with no request", () => {
|
||||
expect(extractUserRequest("@claude", "@claude")).toBeNull();
|
||||
});
|
||||
|
||||
test("handles custom trigger phrase", () => {
|
||||
expect(extractUserRequest("/claude help me", "/claude")).toBe("help me");
|
||||
});
|
||||
|
||||
test("handles trigger phrase with special regex characters", () => {
|
||||
expect(
|
||||
extractUserRequest("@claude[bot] do something", "@claude[bot]"),
|
||||
).toBe("do something");
|
||||
});
|
||||
|
||||
test("is case insensitive", () => {
|
||||
expect(extractUserRequest("@CLAUDE /review-pr", "@claude")).toBe(
|
||||
"/review-pr",
|
||||
);
|
||||
expect(extractUserRequest("@Claude /review-pr", "@claude")).toBe(
|
||||
"/review-pr",
|
||||
);
|
||||
});
|
||||
});
|
||||
214
test/github-file-ops-path-validation.test.ts
Normal file
214
test/github-file-ops-path-validation.test.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import { describe, expect, it, beforeAll, afterAll } from "bun:test";
|
||||
import { validatePathWithinRepo } from "../src/mcp/path-validation";
|
||||
import { resolve } from "path";
|
||||
import { mkdir, writeFile, symlink, rm, realpath } from "fs/promises";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
describe("validatePathWithinRepo", () => {
|
||||
// Use a real temp directory for tests that need filesystem access
|
||||
let testDir: string;
|
||||
let repoRoot: string;
|
||||
let outsideDir: string;
|
||||
// Real paths after symlink resolution (e.g., /tmp -> /private/tmp on macOS)
|
||||
let realRepoRoot: string;
|
||||
|
||||
beforeAll(async () => {
|
||||
// Create test directory structure
|
||||
testDir = resolve(tmpdir(), `path-validation-test-${Date.now()}`);
|
||||
repoRoot = resolve(testDir, "repo");
|
||||
outsideDir = resolve(testDir, "outside");
|
||||
|
||||
await mkdir(repoRoot, { recursive: true });
|
||||
await mkdir(resolve(repoRoot, "src"), { recursive: true });
|
||||
await mkdir(outsideDir, { recursive: true });
|
||||
|
||||
// Create test files
|
||||
await writeFile(resolve(repoRoot, "file.txt"), "inside repo");
|
||||
await writeFile(resolve(repoRoot, "src", "main.js"), "console.log('hi')");
|
||||
await writeFile(resolve(outsideDir, "secret.txt"), "sensitive data");
|
||||
|
||||
// Get real paths after symlink resolution
|
||||
realRepoRoot = await realpath(repoRoot);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Cleanup
|
||||
await rm(testDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe("valid paths", () => {
|
||||
it("should accept simple relative paths", async () => {
|
||||
const result = await validatePathWithinRepo("file.txt", repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "file.txt"));
|
||||
});
|
||||
|
||||
it("should accept nested relative paths", async () => {
|
||||
const result = await validatePathWithinRepo("src/main.js", repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "src/main.js"));
|
||||
});
|
||||
|
||||
it("should accept paths with single dot segments", async () => {
|
||||
const result = await validatePathWithinRepo("./src/main.js", repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "src/main.js"));
|
||||
});
|
||||
|
||||
it("should accept paths that use .. but resolve inside repo", async () => {
|
||||
// src/../file.txt resolves to file.txt which is still inside repo
|
||||
const result = await validatePathWithinRepo("src/../file.txt", repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "file.txt"));
|
||||
});
|
||||
|
||||
it("should accept absolute paths within the repo root", async () => {
|
||||
const absolutePath = resolve(repoRoot, "file.txt");
|
||||
const result = await validatePathWithinRepo(absolutePath, repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "file.txt"));
|
||||
});
|
||||
|
||||
it("should accept the repo root itself", async () => {
|
||||
const result = await validatePathWithinRepo(".", repoRoot);
|
||||
expect(result).toBe(realRepoRoot);
|
||||
});
|
||||
|
||||
it("should handle new files (non-existent) in valid directories", async () => {
|
||||
const result = await validatePathWithinRepo("src/newfile.js", repoRoot);
|
||||
// For non-existent files, we validate the parent but return the initial path
|
||||
// (can't realpath a file that doesn't exist yet)
|
||||
expect(result).toBe(resolve(repoRoot, "src/newfile.js"));
|
||||
});
|
||||
});
|
||||
|
||||
describe("path traversal attacks", () => {
|
||||
it("should reject simple parent directory traversal", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo("../outside/secret.txt", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
});
|
||||
|
||||
it("should reject deeply nested parent directory traversal", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo("../../../etc/passwd", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
});
|
||||
|
||||
it("should reject traversal hidden within path", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo("src/../../outside/secret.txt", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
});
|
||||
|
||||
it("should reject traversal at the end of path", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo("src/../..", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
});
|
||||
|
||||
it("should reject absolute paths outside the repo root", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo("/etc/passwd", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
});
|
||||
|
||||
it("should reject absolute paths to sibling directories", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo(resolve(outsideDir, "secret.txt"), repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
});
|
||||
});
|
||||
|
||||
describe("symlink attacks", () => {
|
||||
it("should reject symlinks pointing outside the repo", async () => {
|
||||
// Create a symlink inside the repo that points to a file outside
|
||||
const symlinkPath = resolve(repoRoot, "evil-link");
|
||||
await symlink(resolve(outsideDir, "secret.txt"), symlinkPath);
|
||||
|
||||
try {
|
||||
// The symlink path looks like it's inside the repo, but points outside
|
||||
await expect(
|
||||
validatePathWithinRepo("evil-link", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
} finally {
|
||||
await rm(symlinkPath, { force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should reject symlinks to parent directories", async () => {
|
||||
// Create a symlink to the parent directory
|
||||
const symlinkPath = resolve(repoRoot, "parent-link");
|
||||
await symlink(testDir, symlinkPath);
|
||||
|
||||
try {
|
||||
await expect(
|
||||
validatePathWithinRepo("parent-link/outside/secret.txt", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
} finally {
|
||||
await rm(symlinkPath, { force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should accept symlinks that resolve within the repo", async () => {
|
||||
// Create a symlink inside the repo that points to another file inside
|
||||
const symlinkPath = resolve(repoRoot, "good-link");
|
||||
await symlink(resolve(repoRoot, "file.txt"), symlinkPath);
|
||||
|
||||
try {
|
||||
const result = await validatePathWithinRepo("good-link", repoRoot);
|
||||
// Should resolve to the actual file location
|
||||
expect(result).toBe(resolve(realRepoRoot, "file.txt"));
|
||||
} finally {
|
||||
await rm(symlinkPath, { force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should reject directory symlinks that escape the repo", async () => {
|
||||
// Create a symlink to outside directory
|
||||
const symlinkPath = resolve(repoRoot, "escape-dir");
|
||||
await symlink(outsideDir, symlinkPath);
|
||||
|
||||
try {
|
||||
await expect(
|
||||
validatePathWithinRepo("escape-dir/secret.txt", repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
} finally {
|
||||
await rm(symlinkPath, { force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle empty path (current directory)", async () => {
|
||||
const result = await validatePathWithinRepo("", repoRoot);
|
||||
expect(result).toBe(realRepoRoot);
|
||||
});
|
||||
|
||||
it("should handle paths with multiple consecutive slashes", async () => {
|
||||
const result = await validatePathWithinRepo("src//main.js", repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "src/main.js"));
|
||||
});
|
||||
|
||||
it("should handle paths with trailing slashes", async () => {
|
||||
const result = await validatePathWithinRepo("src/", repoRoot);
|
||||
expect(result).toBe(resolve(realRepoRoot, "src"));
|
||||
});
|
||||
|
||||
it("should reject prefix attack (repo root as prefix but not parent)", async () => {
|
||||
// Create a sibling directory with repo name as prefix
|
||||
const evilDir = repoRoot + "-evil";
|
||||
await mkdir(evilDir, { recursive: true });
|
||||
await writeFile(resolve(evilDir, "file.txt"), "evil");
|
||||
|
||||
try {
|
||||
await expect(
|
||||
validatePathWithinRepo(resolve(evilDir, "file.txt"), repoRoot),
|
||||
).rejects.toThrow(/resolves outside the repository root/);
|
||||
} finally {
|
||||
await rm(evilDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should throw error for non-existent repo root", async () => {
|
||||
await expect(
|
||||
validatePathWithinRepo("file.txt", "/nonexistent/repo"),
|
||||
).rejects.toThrow(/does not exist/);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -32,11 +32,13 @@ describe("prepareMcpConfig", () => {
|
||||
branchPrefix: "",
|
||||
useStickyComment: false,
|
||||
useCommitSigning: false,
|
||||
sshSigningKey: "",
|
||||
botId: String(CLAUDE_APP_BOT_ID),
|
||||
botName: CLAUDE_BOT_LOGIN,
|
||||
allowedBots: "",
|
||||
allowedNonWriteUsers: "",
|
||||
trackProgress: false,
|
||||
includeFixLinks: true,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -20,11 +20,13 @@ const defaultInputs = {
|
||||
branchPrefix: "claude/",
|
||||
useStickyComment: false,
|
||||
useCommitSigning: false,
|
||||
sshSigningKey: "",
|
||||
botId: String(CLAUDE_APP_BOT_ID),
|
||||
botName: CLAUDE_BOT_LOGIN,
|
||||
allowedBots: "",
|
||||
allowedNonWriteUsers: "",
|
||||
trackProgress: false,
|
||||
includeFixLinks: true,
|
||||
};
|
||||
|
||||
const defaultRepository = {
|
||||
|
||||
@@ -20,11 +20,13 @@ describe("detectMode with enhanced routing", () => {
|
||||
branchPrefix: "claude/",
|
||||
useStickyComment: false,
|
||||
useCommitSigning: false,
|
||||
sshSigningKey: "",
|
||||
botId: "123456",
|
||||
botName: "claude-bot",
|
||||
allowedBots: "",
|
||||
allowedNonWriteUsers: "",
|
||||
trackProgress: false,
|
||||
includeFixLinks: true,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -68,11 +68,13 @@ describe("checkWritePermissions", () => {
|
||||
branchPrefix: "claude/",
|
||||
useStickyComment: false,
|
||||
useCommitSigning: false,
|
||||
sshSigningKey: "",
|
||||
botId: String(CLAUDE_APP_BOT_ID),
|
||||
botName: CLAUDE_BOT_LOGIN,
|
||||
allowedBots: "",
|
||||
allowedNonWriteUsers: "",
|
||||
trackProgress: false,
|
||||
includeFixLinks: true,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
250
test/ssh-signing.test.ts
Normal file
250
test/ssh-signing.test.ts
Normal file
@@ -0,0 +1,250 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
import {
|
||||
describe,
|
||||
test,
|
||||
expect,
|
||||
afterEach,
|
||||
beforeAll,
|
||||
afterAll,
|
||||
} from "bun:test";
|
||||
import { mkdir, writeFile, rm, readFile, stat } from "fs/promises";
|
||||
import { join } from "path";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
describe("SSH Signing", () => {
|
||||
// Use a temp directory for tests
|
||||
const testTmpDir = join(tmpdir(), "claude-ssh-signing-test");
|
||||
const testSshDir = join(testTmpDir, ".ssh");
|
||||
const testKeyPath = join(testSshDir, "claude_signing_key");
|
||||
const testKey =
|
||||
"-----BEGIN OPENSSH PRIVATE KEY-----\ntest-key-content\n-----END OPENSSH PRIVATE KEY-----";
|
||||
|
||||
beforeAll(async () => {
|
||||
await mkdir(testTmpDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await rm(testTmpDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
// Clean up test key if it exists
|
||||
try {
|
||||
await rm(testKeyPath, { force: true });
|
||||
} catch {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
});
|
||||
|
||||
describe("setupSshSigning file operations", () => {
|
||||
test("should write key file atomically with correct permissions", async () => {
|
||||
// Create the directory with secure permissions (same as setupSshSigning does)
|
||||
await mkdir(testSshDir, { recursive: true, mode: 0o700 });
|
||||
|
||||
// Write key atomically with proper permissions (same as setupSshSigning does)
|
||||
await writeFile(testKeyPath, testKey, { mode: 0o600 });
|
||||
|
||||
// Verify key was written
|
||||
const keyContent = await readFile(testKeyPath, "utf-8");
|
||||
expect(keyContent).toBe(testKey);
|
||||
|
||||
// Verify permissions (0o600 = 384 in decimal for permission bits only)
|
||||
const stats = await stat(testKeyPath);
|
||||
const permissions = stats.mode & 0o777; // Get only permission bits
|
||||
expect(permissions).toBe(0o600);
|
||||
});
|
||||
|
||||
test("should create .ssh directory with secure permissions", async () => {
|
||||
// Clean up first
|
||||
await rm(testSshDir, { recursive: true, force: true });
|
||||
|
||||
// Create directory with secure permissions (same as setupSshSigning does)
|
||||
await mkdir(testSshDir, { recursive: true, mode: 0o700 });
|
||||
|
||||
// Verify directory exists
|
||||
const dirStats = await stat(testSshDir);
|
||||
expect(dirStats.isDirectory()).toBe(true);
|
||||
|
||||
// Verify directory permissions
|
||||
const dirPermissions = dirStats.mode & 0o777;
|
||||
expect(dirPermissions).toBe(0o700);
|
||||
});
|
||||
});
|
||||
|
||||
describe("setupSshSigning validation", () => {
|
||||
test("should reject empty SSH key", () => {
|
||||
const emptyKey = "";
|
||||
expect(() => {
|
||||
if (!emptyKey.trim()) {
|
||||
throw new Error("SSH signing key cannot be empty");
|
||||
}
|
||||
}).toThrow("SSH signing key cannot be empty");
|
||||
});
|
||||
|
||||
test("should reject whitespace-only SSH key", () => {
|
||||
const whitespaceKey = " \n\t ";
|
||||
expect(() => {
|
||||
if (!whitespaceKey.trim()) {
|
||||
throw new Error("SSH signing key cannot be empty");
|
||||
}
|
||||
}).toThrow("SSH signing key cannot be empty");
|
||||
});
|
||||
|
||||
test("should reject invalid SSH key format", () => {
|
||||
const invalidKey = "not a valid key";
|
||||
expect(() => {
|
||||
if (
|
||||
!invalidKey.includes("BEGIN") ||
|
||||
!invalidKey.includes("PRIVATE KEY")
|
||||
) {
|
||||
throw new Error("Invalid SSH private key format");
|
||||
}
|
||||
}).toThrow("Invalid SSH private key format");
|
||||
});
|
||||
|
||||
test("should accept valid SSH key format", () => {
|
||||
const validKey =
|
||||
"-----BEGIN OPENSSH PRIVATE KEY-----\nkey-content\n-----END OPENSSH PRIVATE KEY-----";
|
||||
expect(() => {
|
||||
if (!validKey.trim()) {
|
||||
throw new Error("SSH signing key cannot be empty");
|
||||
}
|
||||
if (!validKey.includes("BEGIN") || !validKey.includes("PRIVATE KEY")) {
|
||||
throw new Error("Invalid SSH private key format");
|
||||
}
|
||||
}).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe("cleanupSshSigning file operations", () => {
|
||||
test("should remove the signing key file", async () => {
|
||||
// Create the key file first
|
||||
await mkdir(testSshDir, { recursive: true });
|
||||
await writeFile(testKeyPath, testKey, { mode: 0o600 });
|
||||
|
||||
// Verify it exists
|
||||
const existsBefore = await stat(testKeyPath)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(existsBefore).toBe(true);
|
||||
|
||||
// Clean up (same operation as cleanupSshSigning)
|
||||
await rm(testKeyPath, { force: true });
|
||||
|
||||
// Verify it's gone
|
||||
const existsAfter = await stat(testKeyPath)
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
expect(existsAfter).toBe(false);
|
||||
});
|
||||
|
||||
test("should not throw if key file does not exist", async () => {
|
||||
// Make sure file doesn't exist
|
||||
await rm(testKeyPath, { force: true });
|
||||
|
||||
// Should not throw (rm with force: true doesn't throw on missing files)
|
||||
await expect(rm(testKeyPath, { force: true })).resolves.toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("SSH Signing Mode Detection", () => {
|
||||
test("sshSigningKey should take precedence over useCommitSigning", () => {
|
||||
// When both are set, SSH signing takes precedence
|
||||
const sshSigningKey = "test-key";
|
||||
const useCommitSigning = true;
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
expect(useSshSigning).toBe(true);
|
||||
expect(useApiCommitSigning).toBe(false);
|
||||
});
|
||||
|
||||
test("useCommitSigning should work when sshSigningKey is not set", () => {
|
||||
const sshSigningKey = "";
|
||||
const useCommitSigning = true;
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
expect(useSshSigning).toBe(false);
|
||||
expect(useApiCommitSigning).toBe(true);
|
||||
});
|
||||
|
||||
test("neither signing method when both are false/empty", () => {
|
||||
const sshSigningKey = "";
|
||||
const useCommitSigning = false;
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
expect(useSshSigning).toBe(false);
|
||||
expect(useApiCommitSigning).toBe(false);
|
||||
});
|
||||
|
||||
test("git CLI tools should be used when sshSigningKey is set", () => {
|
||||
// This tests the logic in tag mode for tool selection
|
||||
const sshSigningKey = "test-key";
|
||||
const useCommitSigning = true; // Even if this is true
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
// When SSH signing is used, we should use git CLI (not API)
|
||||
const shouldUseGitCli = !useApiCommitSigning;
|
||||
expect(shouldUseGitCli).toBe(true);
|
||||
});
|
||||
|
||||
test("MCP file ops should only be used with API commit signing", () => {
|
||||
// Case 1: API commit signing
|
||||
{
|
||||
const sshSigningKey = "";
|
||||
const useCommitSigning = true;
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
expect(useApiCommitSigning).toBe(true);
|
||||
}
|
||||
|
||||
// Case 2: SSH signing (should NOT use API)
|
||||
{
|
||||
const sshSigningKey = "test-key";
|
||||
const useCommitSigning = true;
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
expect(useApiCommitSigning).toBe(false);
|
||||
}
|
||||
|
||||
// Case 3: No signing (should NOT use API)
|
||||
{
|
||||
const sshSigningKey = "";
|
||||
const useCommitSigning = false;
|
||||
|
||||
const useSshSigning = !!sshSigningKey;
|
||||
const useApiCommitSigning = useCommitSigning && !useSshSigning;
|
||||
|
||||
expect(useApiCommitSigning).toBe(false);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("Context parsing", () => {
|
||||
test("sshSigningKey should be parsed from environment", () => {
|
||||
// Test that context.ts parses SSH_SIGNING_KEY correctly
|
||||
const testCases = [
|
||||
{ env: "test-key", expected: "test-key" },
|
||||
{ env: "", expected: "" },
|
||||
{ env: undefined, expected: "" },
|
||||
];
|
||||
|
||||
for (const { env, expected } of testCases) {
|
||||
const result = env || "";
|
||||
expect(result).toBe(expected);
|
||||
}
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user