mirror of
https://github.com/anthropics/claude-code-action.git
synced 2026-01-23 15:04:13 +08:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7b6d50442 | ||
|
|
f375cabfab | ||
|
|
9acae263e7 | ||
|
|
67bf0594ce | ||
|
|
b58533dbe0 | ||
|
|
bda9bf08de | ||
|
|
79b343c094 | ||
|
|
609c388361 | ||
|
|
f0c8eb2980 | ||
|
|
68a0348c20 | ||
|
|
dc06a34646 | ||
|
|
a3bb51dac1 | ||
|
|
6610520549 | ||
|
|
e2eb96f51d | ||
|
|
05c95aed79 | ||
|
|
bb4a3f68f7 | ||
|
|
2acd1f7011 | ||
|
|
469fc9c1a4 | ||
|
|
90da6b6e15 | ||
|
|
752ba96ea1 | ||
|
|
66bf95c07f | ||
|
|
6337623ebb | ||
|
|
6d79044f1d |
2
.github/workflows/claude.yml
vendored
2
.github/workflows/claude.yml
vendored
@@ -36,4 +36,4 @@ jobs:
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
claude_args: |
|
||||
--allowedTools "Bash(bun install),Bash(bun test:*),Bash(bun run format),Bash(bun typecheck)"
|
||||
--model "claude-opus-4-1-20250805"
|
||||
--model "claude-opus-4-5"
|
||||
|
||||
4
.github/workflows/sync-base-action.yml
vendored
4
.github/workflows/sync-base-action.yml
vendored
@@ -94,5 +94,5 @@ jobs:
|
||||
echo "✅ Successfully synced \`base-action\` directory to [anthropics/claude-code-base-action](https://github.com/anthropics/claude-code-base-action)" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Source commit**: [\`${GITHUB_SHA:0:7}\`](https://github.com/anthropics/claude-code-action/commit/${GITHUB_SHA})" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Triggered by**: ${{ github.event_name }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Actor**: @${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Triggered by**: $GITHUB_EVENT_NAME" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- **Actor**: @$GITHUB_ACTOR" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
58
.github/workflows/test-base-action.yml
vendored
58
.github/workflows/test-base-action.yml
vendored
@@ -118,3 +118,61 @@ jobs:
|
||||
echo "❌ Execution log file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
test-agent-sdk:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Test with Agent SDK
|
||||
id: sdk-test
|
||||
uses: ./base-action
|
||||
env:
|
||||
USE_AGENT_SDK: "true"
|
||||
with:
|
||||
prompt: ${{ github.event.inputs.test_prompt || 'List the files in the current directory starting with "package"' }}
|
||||
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
allowed_tools: "LS,Read"
|
||||
|
||||
- name: Verify SDK output
|
||||
run: |
|
||||
OUTPUT_FILE="${{ steps.sdk-test.outputs.execution_file }}"
|
||||
CONCLUSION="${{ steps.sdk-test.outputs.conclusion }}"
|
||||
|
||||
echo "Conclusion: $CONCLUSION"
|
||||
echo "Output file: $OUTPUT_FILE"
|
||||
|
||||
if [ "$CONCLUSION" = "success" ]; then
|
||||
echo "✅ Action completed successfully with Agent SDK"
|
||||
else
|
||||
echo "❌ Action failed with Agent SDK"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -f "$OUTPUT_FILE" ]; then
|
||||
if [ -s "$OUTPUT_FILE" ]; then
|
||||
echo "✅ Execution log file created successfully with content"
|
||||
echo "Validating JSON format:"
|
||||
if jq . "$OUTPUT_FILE" > /dev/null 2>&1; then
|
||||
echo "✅ Output is valid JSON"
|
||||
# Verify SDK output contains total_cost_usd (SDK field name)
|
||||
if jq -e '.[] | select(.type == "result") | .total_cost_usd' "$OUTPUT_FILE" > /dev/null 2>&1; then
|
||||
echo "✅ SDK output contains total_cost_usd field"
|
||||
else
|
||||
echo "❌ SDK output missing total_cost_usd field"
|
||||
exit 1
|
||||
fi
|
||||
echo "Content preview:"
|
||||
head -c 500 "$OUTPUT_FILE"
|
||||
else
|
||||
echo "❌ Output is not valid JSON"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "❌ Execution log file is empty"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "❌ Execution log file not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
19
action.yml
19
action.yml
@@ -127,6 +127,9 @@ outputs:
|
||||
structured_output:
|
||||
description: "JSON string containing all structured output fields when --json-schema is provided in claude_args. Use fromJSON() to parse: fromJSON(steps.id.outputs.structured_output).field_name"
|
||||
value: ${{ steps.claude-code.outputs.structured_output }}
|
||||
session_id:
|
||||
description: "The Claude Code session ID that can be used with --resume to continue this conversation"
|
||||
value: ${{ steps.claude-code.outputs.session_id }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -140,10 +143,12 @@ runs:
|
||||
- name: Setup Custom Bun Path
|
||||
if: inputs.path_to_bun_executable != ''
|
||||
shell: bash
|
||||
env:
|
||||
PATH_TO_BUN_EXECUTABLE: ${{ inputs.path_to_bun_executable }}
|
||||
run: |
|
||||
echo "Using custom Bun executable: ${{ inputs.path_to_bun_executable }}"
|
||||
echo "Using custom Bun executable: $PATH_TO_BUN_EXECUTABLE"
|
||||
# Add the directory containing the custom executable to PATH
|
||||
BUN_DIR=$(dirname "${{ inputs.path_to_bun_executable }}")
|
||||
BUN_DIR=$(dirname "$PATH_TO_BUN_EXECUTABLE")
|
||||
echo "$BUN_DIR" >> "$GITHUB_PATH"
|
||||
|
||||
- name: Install Dependencies
|
||||
@@ -182,6 +187,8 @@ runs:
|
||||
- name: Install Base Action Dependencies
|
||||
if: steps.prepare.outputs.contains_trigger == 'true'
|
||||
shell: bash
|
||||
env:
|
||||
PATH_TO_CLAUDE_CODE_EXECUTABLE: ${{ inputs.path_to_claude_code_executable }}
|
||||
run: |
|
||||
echo "Installing base-action dependencies..."
|
||||
cd ${GITHUB_ACTION_PATH}/base-action
|
||||
@@ -190,8 +197,8 @@ runs:
|
||||
cd -
|
||||
|
||||
# Install Claude Code if no custom executable is provided
|
||||
if [ -z "${{ inputs.path_to_claude_code_executable }}" ]; then
|
||||
CLAUDE_CODE_VERSION="2.0.50"
|
||||
if [ -z "$PATH_TO_CLAUDE_CODE_EXECUTABLE" ]; then
|
||||
CLAUDE_CODE_VERSION="2.0.70"
|
||||
echo "Installing Claude Code v${CLAUDE_CODE_VERSION}..."
|
||||
for attempt in 1 2 3; do
|
||||
echo "Installation attempt $attempt..."
|
||||
@@ -210,9 +217,9 @@ runs:
|
||||
echo "Claude Code installed successfully"
|
||||
echo "$HOME/.local/bin" >> "$GITHUB_PATH"
|
||||
else
|
||||
echo "Using custom Claude Code executable: ${{ inputs.path_to_claude_code_executable }}"
|
||||
echo "Using custom Claude Code executable: $PATH_TO_CLAUDE_CODE_EXECUTABLE"
|
||||
# Add the directory containing the custom executable to PATH
|
||||
CLAUDE_DIR=$(dirname "${{ inputs.path_to_claude_code_executable }}")
|
||||
CLAUDE_DIR=$(dirname "$PATH_TO_CLAUDE_CODE_EXECUTABLE")
|
||||
echo "$CLAUDE_DIR" >> "$GITHUB_PATH"
|
||||
fi
|
||||
|
||||
|
||||
@@ -82,6 +82,9 @@ outputs:
|
||||
structured_output:
|
||||
description: "JSON string containing all structured output fields when --json-schema is provided in claude_args (use fromJSON() or jq to parse)"
|
||||
value: ${{ steps.run_claude.outputs.structured_output }}
|
||||
session_id:
|
||||
description: "The Claude Code session ID that can be used with --resume to continue this conversation"
|
||||
value: ${{ steps.run_claude.outputs.session_id }}
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -101,10 +104,12 @@ runs:
|
||||
- name: Setup Custom Bun Path
|
||||
if: inputs.path_to_bun_executable != ''
|
||||
shell: bash
|
||||
env:
|
||||
PATH_TO_BUN_EXECUTABLE: ${{ inputs.path_to_bun_executable }}
|
||||
run: |
|
||||
echo "Using custom Bun executable: ${{ inputs.path_to_bun_executable }}"
|
||||
echo "Using custom Bun executable: $PATH_TO_BUN_EXECUTABLE"
|
||||
# Add the directory containing the custom executable to PATH
|
||||
BUN_DIR=$(dirname "${{ inputs.path_to_bun_executable }}")
|
||||
BUN_DIR=$(dirname "$PATH_TO_BUN_EXECUTABLE")
|
||||
echo "$BUN_DIR" >> "$GITHUB_PATH"
|
||||
|
||||
- name: Install Dependencies
|
||||
@@ -115,9 +120,11 @@ runs:
|
||||
|
||||
- name: Install Claude Code
|
||||
shell: bash
|
||||
env:
|
||||
PATH_TO_CLAUDE_CODE_EXECUTABLE: ${{ inputs.path_to_claude_code_executable }}
|
||||
run: |
|
||||
if [ -z "${{ inputs.path_to_claude_code_executable }}" ]; then
|
||||
CLAUDE_CODE_VERSION="2.0.50"
|
||||
if [ -z "$PATH_TO_CLAUDE_CODE_EXECUTABLE" ]; then
|
||||
CLAUDE_CODE_VERSION="2.0.70"
|
||||
echo "Installing Claude Code v${CLAUDE_CODE_VERSION}..."
|
||||
for attempt in 1 2 3; do
|
||||
echo "Installation attempt $attempt..."
|
||||
@@ -135,9 +142,9 @@ runs:
|
||||
done
|
||||
echo "Claude Code installed successfully"
|
||||
else
|
||||
echo "Using custom Claude Code executable: ${{ inputs.path_to_claude_code_executable }}"
|
||||
echo "Using custom Claude Code executable: $PATH_TO_CLAUDE_CODE_EXECUTABLE"
|
||||
# Add the directory containing the custom executable to PATH
|
||||
CLAUDE_DIR=$(dirname "${{ inputs.path_to_claude_code_executable }}")
|
||||
CLAUDE_DIR=$(dirname "$PATH_TO_CLAUDE_CODE_EXECUTABLE")
|
||||
echo "$CLAUDE_DIR" >> "$GITHUB_PATH"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 0,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "@anthropic-ai/claude-code-base-action",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.70",
|
||||
"shell-quote": "^1.8.3",
|
||||
},
|
||||
"devDependencies": {
|
||||
@@ -25,8 +27,40 @@
|
||||
|
||||
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
|
||||
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.1.70", "", { "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.33.5", "@img/sharp-darwin-x64": "^0.33.5", "@img/sharp-linux-arm": "^0.33.5", "@img/sharp-linux-arm64": "^0.33.5", "@img/sharp-linux-x64": "^0.33.5", "@img/sharp-linuxmusl-arm64": "^0.33.5", "@img/sharp-linuxmusl-x64": "^0.33.5", "@img/sharp-win32-x64": "^0.33.5" }, "peerDependencies": { "zod": "^3.24.1" } }, "sha512-4jpFPDX8asys6skO1r3Pzh0Fe9nbND2ASYTWuyFB5iN9bWEL6WScTFyGokjql3M2TkEp9ZGuB2YYpTCdaqT9Sw=="],
|
||||
|
||||
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
|
||||
|
||||
"@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.0.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ=="],
|
||||
|
||||
"@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.0.4" }, "os": "darwin", "cpu": "x64" }, "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.0.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.0.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.0.5", "", { "os": "linux", "cpu": "arm" }, "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.0.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA=="],
|
||||
|
||||
"@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.0.4", "", { "os": "linux", "cpu": "x64" }, "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.0.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.0.4", "", { "os": "linux", "cpu": "x64" }, "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw=="],
|
||||
|
||||
"@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.0.5" }, "os": "linux", "cpu": "arm" }, "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ=="],
|
||||
|
||||
"@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.0.4" }, "os": "linux", "cpu": "arm64" }, "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA=="],
|
||||
|
||||
"@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.0.4" }, "os": "linux", "cpu": "x64" }, "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA=="],
|
||||
|
||||
"@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" }, "os": "linux", "cpu": "arm64" }, "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g=="],
|
||||
|
||||
"@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.0.4" }, "os": "linux", "cpu": "x64" }, "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw=="],
|
||||
|
||||
"@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.33.5", "", { "os": "win32", "cpu": "x64" }, "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg=="],
|
||||
|
||||
"@types/bun": ["@types/bun@1.2.19", "", { "dependencies": { "bun-types": "1.2.19" } }, "sha512-d9ZCmrH3CJ2uYKXQIUuZ/pUnTqIvLDS0SK7pFmbx8ma+ziH/FRMoAq5bYpRG7y+w1gl+HgyNZbtqgMq4W4e2Lg=="],
|
||||
|
||||
"@types/node": ["@types/node@20.19.9", "", { "dependencies": { "undici-types": "~6.21.0" } }, "sha512-cuVNgarYWZqxRJDQHEB58GEONhOK79QVR/qYx4S7kcUObQvUwvFnYxJuuHUKm2aieN9X3yZB4LZsuYNU1Qphsw=="],
|
||||
@@ -50,5 +84,7 @@
|
||||
"undici": ["undici@5.29.0", "", { "dependencies": { "@fastify/busboy": "^2.0.0" } }, "sha512-raqeBD6NQK4SkWhQzeYKd1KmIG6dllBOTt55Rmkt4HtI9mwdWtJljnrXjAFUBLTSN67HWrOIZ3EPF4kjUw80Bg=="],
|
||||
|
||||
"undici-types": ["undici-types@6.21.0", "", {}, "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="],
|
||||
|
||||
"zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.70",
|
||||
"shell-quote": "^1.8.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
261
base-action/src/parse-sdk-options.ts
Normal file
261
base-action/src/parse-sdk-options.ts
Normal file
@@ -0,0 +1,261 @@
|
||||
import { parse as parseShellArgs } from "shell-quote";
|
||||
import type { ClaudeOptions } from "./run-claude";
|
||||
import type { Options as SdkOptions } from "@anthropic-ai/claude-agent-sdk";
|
||||
|
||||
/**
|
||||
* Result of parsing ClaudeOptions for SDK usage
|
||||
*/
|
||||
export type ParsedSdkOptions = {
|
||||
sdkOptions: SdkOptions;
|
||||
showFullOutput: boolean;
|
||||
hasJsonSchema: boolean;
|
||||
};
|
||||
|
||||
// Flags that should accumulate multiple values instead of overwriting
|
||||
// Include both camelCase and hyphenated variants for CLI compatibility
|
||||
const ACCUMULATING_FLAGS = new Set([
|
||||
"allowedTools",
|
||||
"allowed-tools",
|
||||
"disallowedTools",
|
||||
"disallowed-tools",
|
||||
"mcp-config",
|
||||
]);
|
||||
|
||||
// Delimiter used to join accumulated flag values
|
||||
const ACCUMULATE_DELIMITER = "\x00";
|
||||
|
||||
type McpConfig = {
|
||||
mcpServers?: Record<string, unknown>;
|
||||
};
|
||||
|
||||
/**
|
||||
* Merge multiple MCP config values into a single config.
|
||||
* Each config can be a JSON string or a file path.
|
||||
* For JSON strings, mcpServers objects are merged.
|
||||
* For file paths, they are kept as-is (user's file takes precedence and is used last).
|
||||
*/
|
||||
function mergeMcpConfigs(configValues: string[]): string {
|
||||
const merged: McpConfig = { mcpServers: {} };
|
||||
let lastFilePath: string | null = null;
|
||||
|
||||
for (const config of configValues) {
|
||||
const trimmed = config.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
// Check if it's a JSON string (starts with {) or a file path
|
||||
if (trimmed.startsWith("{")) {
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed) as McpConfig;
|
||||
if (parsed.mcpServers) {
|
||||
Object.assign(merged.mcpServers!, parsed.mcpServers);
|
||||
}
|
||||
} catch {
|
||||
// If JSON parsing fails, treat as file path
|
||||
lastFilePath = trimmed;
|
||||
}
|
||||
} else {
|
||||
// It's a file path - store it to handle separately
|
||||
lastFilePath = trimmed;
|
||||
}
|
||||
}
|
||||
|
||||
// If we have file paths, we need to keep the merged JSON and let the file
|
||||
// be handled separately. Since we can only return one value, merge what we can.
|
||||
// If there's a file path, we need a different approach - read the file at runtime.
|
||||
// For now, if there's a file path, we'll stringify the merged config.
|
||||
// The action prepends its config as JSON, so we can safely merge inline JSON configs.
|
||||
|
||||
// If no inline configs were found (all file paths), return the last file path
|
||||
if (Object.keys(merged.mcpServers!).length === 0 && lastFilePath) {
|
||||
return lastFilePath;
|
||||
}
|
||||
|
||||
// Note: If user passes a file path, we cannot merge it at parse time since
|
||||
// we don't have access to the file system here. The action's built-in MCP
|
||||
// servers are always passed as inline JSON, so they will be merged.
|
||||
// If user also passes inline JSON, it will be merged.
|
||||
// If user passes a file path, they should ensure it includes all needed servers.
|
||||
|
||||
return JSON.stringify(merged);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse claudeArgs string into extraArgs record for SDK pass-through
|
||||
* The SDK/CLI will handle --mcp-config, --json-schema, etc.
|
||||
* For allowedTools and disallowedTools, multiple occurrences are accumulated (null-char joined).
|
||||
* Accumulating flags also consume all consecutive non-flag values
|
||||
* (e.g., --allowed-tools "Tool1" "Tool2" "Tool3" captures all three).
|
||||
*/
|
||||
function parseClaudeArgsToExtraArgs(
|
||||
claudeArgs?: string,
|
||||
): Record<string, string | null> {
|
||||
if (!claudeArgs?.trim()) return {};
|
||||
|
||||
const result: Record<string, string | null> = {};
|
||||
const args = parseShellArgs(claudeArgs).filter(
|
||||
(arg): arg is string => typeof arg === "string",
|
||||
);
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i];
|
||||
if (arg?.startsWith("--")) {
|
||||
const flag = arg.slice(2);
|
||||
const nextArg = args[i + 1];
|
||||
|
||||
// Check if next arg is a value (not another flag)
|
||||
if (nextArg && !nextArg.startsWith("--")) {
|
||||
// For accumulating flags, consume all consecutive non-flag values
|
||||
// This handles: --allowed-tools "Tool1" "Tool2" "Tool3"
|
||||
if (ACCUMULATING_FLAGS.has(flag)) {
|
||||
const values: string[] = [];
|
||||
while (i + 1 < args.length && !args[i + 1]?.startsWith("--")) {
|
||||
i++;
|
||||
values.push(args[i]!);
|
||||
}
|
||||
const joinedValues = values.join(ACCUMULATE_DELIMITER);
|
||||
if (result[flag]) {
|
||||
result[flag] =
|
||||
`${result[flag]}${ACCUMULATE_DELIMITER}${joinedValues}`;
|
||||
} else {
|
||||
result[flag] = joinedValues;
|
||||
}
|
||||
} else {
|
||||
result[flag] = nextArg;
|
||||
i++; // Skip the value
|
||||
}
|
||||
} else {
|
||||
result[flag] = null; // Boolean flag
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse ClaudeOptions into SDK-compatible options
|
||||
* Uses extraArgs for CLI pass-through instead of duplicating option parsing
|
||||
*/
|
||||
export function parseSdkOptions(options: ClaudeOptions): ParsedSdkOptions {
|
||||
// Determine output verbosity
|
||||
const isDebugMode = process.env.ACTIONS_STEP_DEBUG === "true";
|
||||
const showFullOutput = options.showFullOutput === "true" || isDebugMode;
|
||||
|
||||
// Parse claudeArgs into extraArgs for CLI pass-through
|
||||
const extraArgs = parseClaudeArgsToExtraArgs(options.claudeArgs);
|
||||
|
||||
// Detect if --json-schema is present (for hasJsonSchema flag)
|
||||
const hasJsonSchema = "json-schema" in extraArgs;
|
||||
|
||||
// Extract and merge allowedTools from all sources:
|
||||
// 1. From extraArgs (parsed from claudeArgs - contains tag mode's tools)
|
||||
// - Check both camelCase (--allowedTools) and hyphenated (--allowed-tools) variants
|
||||
// 2. From options.allowedTools (direct input - may be undefined)
|
||||
// This prevents duplicate flags being overwritten when claudeArgs contains --allowedTools
|
||||
const allowedToolsValues = [
|
||||
extraArgs["allowedTools"],
|
||||
extraArgs["allowed-tools"],
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(ACCUMULATE_DELIMITER);
|
||||
const extraArgsAllowedTools = allowedToolsValues
|
||||
? allowedToolsValues
|
||||
.split(ACCUMULATE_DELIMITER)
|
||||
.flatMap((v) => v.split(","))
|
||||
.map((t) => t.trim())
|
||||
.filter(Boolean)
|
||||
: [];
|
||||
const directAllowedTools = options.allowedTools
|
||||
? options.allowedTools.split(",").map((t) => t.trim())
|
||||
: [];
|
||||
const mergedAllowedTools = [
|
||||
...new Set([...extraArgsAllowedTools, ...directAllowedTools]),
|
||||
];
|
||||
delete extraArgs["allowedTools"];
|
||||
delete extraArgs["allowed-tools"];
|
||||
|
||||
// Same for disallowedTools - check both camelCase and hyphenated variants
|
||||
const disallowedToolsValues = [
|
||||
extraArgs["disallowedTools"],
|
||||
extraArgs["disallowed-tools"],
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(ACCUMULATE_DELIMITER);
|
||||
const extraArgsDisallowedTools = disallowedToolsValues
|
||||
? disallowedToolsValues
|
||||
.split(ACCUMULATE_DELIMITER)
|
||||
.flatMap((v) => v.split(","))
|
||||
.map((t) => t.trim())
|
||||
.filter(Boolean)
|
||||
: [];
|
||||
const directDisallowedTools = options.disallowedTools
|
||||
? options.disallowedTools.split(",").map((t) => t.trim())
|
||||
: [];
|
||||
const mergedDisallowedTools = [
|
||||
...new Set([...extraArgsDisallowedTools, ...directDisallowedTools]),
|
||||
];
|
||||
delete extraArgs["disallowedTools"];
|
||||
delete extraArgs["disallowed-tools"];
|
||||
|
||||
// Merge multiple --mcp-config values by combining their mcpServers objects
|
||||
// The action prepends its config (github_comment, github_ci, etc.) as inline JSON,
|
||||
// and users may provide their own config as inline JSON or file path
|
||||
if (extraArgs["mcp-config"]) {
|
||||
const mcpConfigValues = extraArgs["mcp-config"].split(ACCUMULATE_DELIMITER);
|
||||
if (mcpConfigValues.length > 1) {
|
||||
extraArgs["mcp-config"] = mergeMcpConfigs(mcpConfigValues);
|
||||
}
|
||||
}
|
||||
|
||||
// Build custom environment
|
||||
const env: Record<string, string | undefined> = { ...process.env };
|
||||
if (process.env.INPUT_ACTION_INPUTS_PRESENT) {
|
||||
env.GITHUB_ACTION_INPUTS = process.env.INPUT_ACTION_INPUTS_PRESENT;
|
||||
}
|
||||
|
||||
// Build system prompt option - default to claude_code preset
|
||||
let systemPrompt: SdkOptions["systemPrompt"];
|
||||
if (options.systemPrompt) {
|
||||
systemPrompt = options.systemPrompt;
|
||||
} else if (options.appendSystemPrompt) {
|
||||
systemPrompt = {
|
||||
type: "preset",
|
||||
preset: "claude_code",
|
||||
append: options.appendSystemPrompt,
|
||||
};
|
||||
} else {
|
||||
// Default to claude_code preset when no custom prompt is specified
|
||||
systemPrompt = {
|
||||
type: "preset",
|
||||
preset: "claude_code",
|
||||
};
|
||||
}
|
||||
|
||||
// Build SDK options - use merged tools from both direct options and claudeArgs
|
||||
const sdkOptions: SdkOptions = {
|
||||
// Direct options from ClaudeOptions inputs
|
||||
model: options.model,
|
||||
maxTurns: options.maxTurns ? parseInt(options.maxTurns, 10) : undefined,
|
||||
allowedTools:
|
||||
mergedAllowedTools.length > 0 ? mergedAllowedTools : undefined,
|
||||
disallowedTools:
|
||||
mergedDisallowedTools.length > 0 ? mergedDisallowedTools : undefined,
|
||||
systemPrompt,
|
||||
fallbackModel: options.fallbackModel,
|
||||
pathToClaudeCodeExecutable: options.pathToClaudeCodeExecutable,
|
||||
|
||||
// Pass through claudeArgs as extraArgs - CLI handles --mcp-config, --json-schema, etc.
|
||||
// Note: allowedTools and disallowedTools have been removed from extraArgs to prevent duplicates
|
||||
extraArgs,
|
||||
env,
|
||||
|
||||
// Load settings from all sources to pick up CLI-installed plugins, CLAUDE.md, etc.
|
||||
settingSources: ["user", "project", "local"],
|
||||
};
|
||||
|
||||
return {
|
||||
sdkOptions,
|
||||
showFullOutput,
|
||||
hasJsonSchema,
|
||||
};
|
||||
}
|
||||
151
base-action/src/run-claude-sdk.ts
Normal file
151
base-action/src/run-claude-sdk.ts
Normal file
@@ -0,0 +1,151 @@
|
||||
import * as core from "@actions/core";
|
||||
import { readFile, writeFile } from "fs/promises";
|
||||
import { query } from "@anthropic-ai/claude-agent-sdk";
|
||||
import type {
|
||||
SDKMessage,
|
||||
SDKResultMessage,
|
||||
} from "@anthropic-ai/claude-agent-sdk";
|
||||
import type { ParsedSdkOptions } from "./parse-sdk-options";
|
||||
|
||||
const EXECUTION_FILE = `${process.env.RUNNER_TEMP}/claude-execution-output.json`;
|
||||
|
||||
/**
|
||||
* Sanitizes SDK output to match CLI sanitization behavior
|
||||
*/
|
||||
function sanitizeSdkOutput(
|
||||
message: SDKMessage,
|
||||
showFullOutput: boolean,
|
||||
): string | null {
|
||||
if (showFullOutput) {
|
||||
return JSON.stringify(message, null, 2);
|
||||
}
|
||||
|
||||
// System initialization - safe to show
|
||||
if (message.type === "system" && message.subtype === "init") {
|
||||
return JSON.stringify(
|
||||
{
|
||||
type: "system",
|
||||
subtype: "init",
|
||||
message: "Claude Code initialized",
|
||||
model: "model" in message ? message.model : "unknown",
|
||||
},
|
||||
null,
|
||||
2,
|
||||
);
|
||||
}
|
||||
|
||||
// Result messages - show sanitized summary
|
||||
if (message.type === "result") {
|
||||
const resultMsg = message as SDKResultMessage;
|
||||
return JSON.stringify(
|
||||
{
|
||||
type: "result",
|
||||
subtype: resultMsg.subtype,
|
||||
is_error: resultMsg.is_error,
|
||||
duration_ms: resultMsg.duration_ms,
|
||||
num_turns: resultMsg.num_turns,
|
||||
total_cost_usd: resultMsg.total_cost_usd,
|
||||
permission_denials: resultMsg.permission_denials,
|
||||
},
|
||||
null,
|
||||
2,
|
||||
);
|
||||
}
|
||||
|
||||
// Suppress other message types in non-full-output mode
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run Claude using the Agent SDK
|
||||
*/
|
||||
export async function runClaudeWithSdk(
|
||||
promptPath: string,
|
||||
{ sdkOptions, showFullOutput, hasJsonSchema }: ParsedSdkOptions,
|
||||
): Promise<void> {
|
||||
const prompt = await readFile(promptPath, "utf-8");
|
||||
|
||||
if (!showFullOutput) {
|
||||
console.log(
|
||||
"Running Claude Code via SDK (full output hidden for security)...",
|
||||
);
|
||||
console.log(
|
||||
"Rerun in debug mode or enable `show_full_output: true` in your workflow file for full output.",
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`Running Claude with prompt from file: ${promptPath}`);
|
||||
// Log SDK options without env (which could contain sensitive data)
|
||||
const { env, ...optionsToLog } = sdkOptions;
|
||||
console.log("SDK options:", JSON.stringify(optionsToLog, null, 2));
|
||||
|
||||
const messages: SDKMessage[] = [];
|
||||
let resultMessage: SDKResultMessage | undefined;
|
||||
|
||||
try {
|
||||
for await (const message of query({ prompt, options: sdkOptions })) {
|
||||
messages.push(message);
|
||||
|
||||
const sanitized = sanitizeSdkOutput(message, showFullOutput);
|
||||
if (sanitized) {
|
||||
console.log(sanitized);
|
||||
}
|
||||
|
||||
if (message.type === "result") {
|
||||
resultMessage = message as SDKResultMessage;
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("SDK execution error:", error);
|
||||
core.setOutput("conclusion", "failure");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Write execution file
|
||||
try {
|
||||
await writeFile(EXECUTION_FILE, JSON.stringify(messages, null, 2));
|
||||
console.log(`Log saved to ${EXECUTION_FILE}`);
|
||||
core.setOutput("execution_file", EXECUTION_FILE);
|
||||
} catch (error) {
|
||||
core.warning(`Failed to write execution file: ${error}`);
|
||||
}
|
||||
|
||||
if (!resultMessage) {
|
||||
core.setOutput("conclusion", "failure");
|
||||
core.error("No result message received from Claude");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const isSuccess = resultMessage.subtype === "success";
|
||||
core.setOutput("conclusion", isSuccess ? "success" : "failure");
|
||||
|
||||
// Handle structured output
|
||||
if (hasJsonSchema) {
|
||||
if (
|
||||
isSuccess &&
|
||||
"structured_output" in resultMessage &&
|
||||
resultMessage.structured_output
|
||||
) {
|
||||
const structuredOutputJson = JSON.stringify(
|
||||
resultMessage.structured_output,
|
||||
);
|
||||
core.setOutput("structured_output", structuredOutputJson);
|
||||
core.info(
|
||||
`Set structured_output with ${Object.keys(resultMessage.structured_output as object).length} field(s)`,
|
||||
);
|
||||
} else {
|
||||
core.setFailed(
|
||||
`--json-schema was provided but Claude did not return structured_output. Result subtype: ${resultMessage.subtype}`,
|
||||
);
|
||||
core.setOutput("conclusion", "failure");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!isSuccess) {
|
||||
if ("errors" in resultMessage && resultMessage.errors) {
|
||||
core.error(`Execution failed: ${resultMessage.errors.join(", ")}`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import { unlink, writeFile, stat, readFile } from "fs/promises";
|
||||
import { createWriteStream } from "fs";
|
||||
import { spawn } from "child_process";
|
||||
import { parse as parseShellArgs } from "shell-quote";
|
||||
import { runClaudeWithSdk } from "./run-claude-sdk";
|
||||
import { parseSdkOptions } from "./parse-sdk-options";
|
||||
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
@@ -122,6 +124,36 @@ export function prepareRunConfig(
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses session_id from execution file and sets GitHub Action output
|
||||
* Exported for testing
|
||||
*/
|
||||
export async function parseAndSetSessionId(
|
||||
executionFile: string,
|
||||
): Promise<void> {
|
||||
try {
|
||||
const content = await readFile(executionFile, "utf-8");
|
||||
const messages = JSON.parse(content) as {
|
||||
type: string;
|
||||
subtype?: string;
|
||||
session_id?: string;
|
||||
}[];
|
||||
|
||||
// Find the system.init message which contains session_id
|
||||
const initMessage = messages.find(
|
||||
(m) => m.type === "system" && m.subtype === "init",
|
||||
);
|
||||
|
||||
if (initMessage?.session_id) {
|
||||
core.setOutput("session_id", initMessage.session_id);
|
||||
core.info(`Set session_id: ${initMessage.session_id}`);
|
||||
}
|
||||
} catch (error) {
|
||||
// Don't fail the action if session_id extraction fails
|
||||
core.warning(`Failed to extract session_id: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses structured_output from execution file and sets GitHub Action outputs
|
||||
* Only runs if --json-schema was explicitly provided in claude_args
|
||||
@@ -165,6 +197,17 @@ export async function parseAndSetStructuredOutputs(
|
||||
}
|
||||
|
||||
export async function runClaude(promptPath: string, options: ClaudeOptions) {
|
||||
// Feature flag: use SDK path by default, set USE_AGENT_SDK=false to use CLI
|
||||
const useAgentSdk = process.env.USE_AGENT_SDK !== "false";
|
||||
console.log(
|
||||
`Using ${useAgentSdk ? "Agent SDK" : "CLI"} path (USE_AGENT_SDK=${process.env.USE_AGENT_SDK ?? "unset"})`,
|
||||
);
|
||||
|
||||
if (useAgentSdk) {
|
||||
const parsedOptions = parseSdkOptions(options);
|
||||
return runClaudeWithSdk(promptPath, parsedOptions);
|
||||
}
|
||||
|
||||
const config = prepareRunConfig(promptPath, options);
|
||||
|
||||
// Detect if --json-schema is present in claude args
|
||||
@@ -355,6 +398,9 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
|
||||
|
||||
core.setOutput("execution_file", EXECUTION_FILE);
|
||||
|
||||
// Extract and set session_id
|
||||
await parseAndSetSessionId(EXECUTION_FILE);
|
||||
|
||||
// Parse and set structured outputs only if user provided --json-schema in claude_args
|
||||
if (hasJsonSchema) {
|
||||
try {
|
||||
|
||||
315
base-action/test/parse-sdk-options.test.ts
Normal file
315
base-action/test/parse-sdk-options.test.ts
Normal file
@@ -0,0 +1,315 @@
|
||||
#!/usr/bin/env bun
|
||||
|
||||
import { describe, test, expect } from "bun:test";
|
||||
import { parseSdkOptions } from "../src/parse-sdk-options";
|
||||
import type { ClaudeOptions } from "../src/run-claude";
|
||||
|
||||
describe("parseSdkOptions", () => {
|
||||
describe("allowedTools merging", () => {
|
||||
test("should extract allowedTools from claudeArgs", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowedTools "Edit,Read,Write"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual(["Edit", "Read", "Write"]);
|
||||
expect(result.sdkOptions.extraArgs?.["allowedTools"]).toBeUndefined();
|
||||
});
|
||||
|
||||
test("should extract allowedTools from claudeArgs with MCP tools", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs:
|
||||
'--allowedTools "Edit,Read,mcp__github_comment__update_claude_comment"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual([
|
||||
"Edit",
|
||||
"Read",
|
||||
"mcp__github_comment__update_claude_comment",
|
||||
]);
|
||||
});
|
||||
|
||||
test("should accumulate multiple --allowedTools flags from claudeArgs", () => {
|
||||
// This simulates tag mode adding its tools, then user adding their own
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs:
|
||||
'--allowedTools "Edit,Read,mcp__github_comment__update_claude_comment" --model "claude-3" --allowedTools "Bash(npm install),mcp__github__get_issue"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual([
|
||||
"Edit",
|
||||
"Read",
|
||||
"mcp__github_comment__update_claude_comment",
|
||||
"Bash(npm install)",
|
||||
"mcp__github__get_issue",
|
||||
]);
|
||||
});
|
||||
|
||||
test("should merge allowedTools from both claudeArgs and direct options", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowedTools "Edit,Read"',
|
||||
allowedTools: "Write,Glob",
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual([
|
||||
"Edit",
|
||||
"Read",
|
||||
"Write",
|
||||
"Glob",
|
||||
]);
|
||||
});
|
||||
|
||||
test("should deduplicate allowedTools when merging", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowedTools "Edit,Read"',
|
||||
allowedTools: "Edit,Write",
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual(["Edit", "Read", "Write"]);
|
||||
});
|
||||
|
||||
test("should use only direct options when claudeArgs has no allowedTools", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--model "claude-3-5-sonnet"',
|
||||
allowedTools: "Edit,Read",
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual(["Edit", "Read"]);
|
||||
});
|
||||
|
||||
test("should return undefined allowedTools when neither source has it", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--model "claude-3-5-sonnet"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toBeUndefined();
|
||||
});
|
||||
|
||||
test("should remove allowedTools from extraArgs after extraction", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowedTools "Edit,Read" --model "claude-3-5-sonnet"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.extraArgs?.["allowedTools"]).toBeUndefined();
|
||||
expect(result.sdkOptions.extraArgs?.["model"]).toBe("claude-3-5-sonnet");
|
||||
});
|
||||
|
||||
test("should handle hyphenated --allowed-tools flag", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowed-tools "Edit,Read,Write"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual(["Edit", "Read", "Write"]);
|
||||
expect(result.sdkOptions.extraArgs?.["allowed-tools"]).toBeUndefined();
|
||||
});
|
||||
|
||||
test("should accumulate multiple --allowed-tools flags (hyphenated)", () => {
|
||||
// This is the exact scenario from issue #746
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs:
|
||||
'--allowed-tools "Bash(git log:*)" "Bash(git diff:*)" "Bash(git fetch:*)" "Bash(gh pr:*)"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.allowedTools).toEqual([
|
||||
"Bash(git log:*)",
|
||||
"Bash(git diff:*)",
|
||||
"Bash(git fetch:*)",
|
||||
"Bash(gh pr:*)",
|
||||
]);
|
||||
});
|
||||
|
||||
test("should handle mixed camelCase and hyphenated allowedTools flags", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--allowedTools "Edit,Read" --allowed-tools "Write,Glob"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
// Both should be merged - note: order depends on which key is found first
|
||||
expect(result.sdkOptions.allowedTools).toContain("Edit");
|
||||
expect(result.sdkOptions.allowedTools).toContain("Read");
|
||||
expect(result.sdkOptions.allowedTools).toContain("Write");
|
||||
expect(result.sdkOptions.allowedTools).toContain("Glob");
|
||||
});
|
||||
});
|
||||
|
||||
describe("disallowedTools merging", () => {
|
||||
test("should extract disallowedTools from claudeArgs", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--disallowedTools "Bash,Write"',
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.disallowedTools).toEqual(["Bash", "Write"]);
|
||||
expect(result.sdkOptions.extraArgs?.["disallowedTools"]).toBeUndefined();
|
||||
});
|
||||
|
||||
test("should merge disallowedTools from both sources", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: '--disallowedTools "Bash"',
|
||||
disallowedTools: "Write",
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.disallowedTools).toEqual(["Bash", "Write"]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("mcp-config merging", () => {
|
||||
test("should pass through single mcp-config in extraArgs", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"server1":{"command":"cmd1"}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.extraArgs?.["mcp-config"]).toBe(
|
||||
'{"mcpServers":{"server1":{"command":"cmd1"}}}',
|
||||
);
|
||||
});
|
||||
|
||||
test("should merge multiple mcp-config flags with inline JSON", () => {
|
||||
// Simulates action prepending its config, then user providing their own
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"github_comment":{"command":"node","args":["server.js"]}}}' --mcp-config '{"mcpServers":{"user_server":{"command":"custom","args":["run"]}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_comment");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("user_server");
|
||||
expect(mcpConfig.mcpServers.github_comment.command).toBe("node");
|
||||
expect(mcpConfig.mcpServers.user_server.command).toBe("custom");
|
||||
});
|
||||
|
||||
test("should merge three mcp-config flags", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"server1":{"command":"cmd1"}}}' --mcp-config '{"mcpServers":{"server2":{"command":"cmd2"}}}' --mcp-config '{"mcpServers":{"server3":{"command":"cmd3"}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server1");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server2");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server3");
|
||||
});
|
||||
|
||||
test("should handle mcp-config file path when no inline JSON exists", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config /tmp/user-mcp-config.json`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.extraArgs?.["mcp-config"]).toBe(
|
||||
"/tmp/user-mcp-config.json",
|
||||
);
|
||||
});
|
||||
|
||||
test("should merge inline JSON configs when file path is also present", () => {
|
||||
// When action provides inline JSON and user provides a file path,
|
||||
// the inline JSON configs should be merged (file paths cannot be merged at parse time)
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"github_comment":{"command":"node"}}}' --mcp-config '{"mcpServers":{"github_ci":{"command":"node"}}}' --mcp-config /tmp/user-config.json`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
// The inline JSON configs should be merged
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_comment");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_ci");
|
||||
});
|
||||
|
||||
test("should handle mcp-config with other flags", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '{"mcpServers":{"server1":{}}}' --model claude-3-5-sonnet --mcp-config '{"mcpServers":{"server2":{}}}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server1");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("server2");
|
||||
expect(result.sdkOptions.extraArgs?.["model"]).toBe("claude-3-5-sonnet");
|
||||
});
|
||||
|
||||
test("should handle real-world scenario: action config + user config", () => {
|
||||
// This is the exact scenario from the bug report
|
||||
const actionConfig = JSON.stringify({
|
||||
mcpServers: {
|
||||
github_comment: {
|
||||
command: "node",
|
||||
args: ["github-comment-server.js"],
|
||||
},
|
||||
github_ci: { command: "node", args: ["github-ci-server.js"] },
|
||||
},
|
||||
});
|
||||
const userConfig = JSON.stringify({
|
||||
mcpServers: {
|
||||
my_custom_server: { command: "python", args: ["server.py"] },
|
||||
},
|
||||
});
|
||||
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--mcp-config '${actionConfig}' --mcp-config '${userConfig}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
const mcpConfig = JSON.parse(
|
||||
result.sdkOptions.extraArgs?.["mcp-config"] as string,
|
||||
);
|
||||
// All servers should be present
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_comment");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("github_ci");
|
||||
expect(mcpConfig.mcpServers).toHaveProperty("my_custom_server");
|
||||
});
|
||||
});
|
||||
|
||||
describe("other extraArgs passthrough", () => {
|
||||
test("should pass through json-schema in extraArgs", () => {
|
||||
const options: ClaudeOptions = {
|
||||
claudeArgs: `--json-schema '{"type":"object"}'`,
|
||||
};
|
||||
|
||||
const result = parseSdkOptions(options);
|
||||
|
||||
expect(result.sdkOptions.extraArgs?.["json-schema"]).toBe(
|
||||
'{"type":"object"}',
|
||||
);
|
||||
expect(result.hasJsonSchema).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -4,7 +4,10 @@ import { describe, test, expect, afterEach, beforeEach, spyOn } from "bun:test";
|
||||
import { writeFile, unlink } from "fs/promises";
|
||||
import { tmpdir } from "os";
|
||||
import { join } from "path";
|
||||
import { parseAndSetStructuredOutputs } from "../src/run-claude";
|
||||
import {
|
||||
parseAndSetStructuredOutputs,
|
||||
parseAndSetSessionId,
|
||||
} from "../src/run-claude";
|
||||
import * as core from "@actions/core";
|
||||
|
||||
// Mock execution file path
|
||||
@@ -35,16 +38,19 @@ async function createMockExecutionFile(
|
||||
// Spy on core functions
|
||||
let setOutputSpy: any;
|
||||
let infoSpy: any;
|
||||
let warningSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
setOutputSpy = spyOn(core, "setOutput").mockImplementation(() => {});
|
||||
infoSpy = spyOn(core, "info").mockImplementation(() => {});
|
||||
warningSpy = spyOn(core, "warning").mockImplementation(() => {});
|
||||
});
|
||||
|
||||
describe("parseAndSetStructuredOutputs", () => {
|
||||
afterEach(async () => {
|
||||
setOutputSpy?.mockRestore();
|
||||
infoSpy?.mockRestore();
|
||||
warningSpy?.mockRestore();
|
||||
try {
|
||||
await unlink(TEST_EXECUTION_FILE);
|
||||
} catch {
|
||||
@@ -156,3 +162,66 @@ describe("parseAndSetStructuredOutputs", () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseAndSetSessionId", () => {
|
||||
afterEach(async () => {
|
||||
setOutputSpy?.mockRestore();
|
||||
infoSpy?.mockRestore();
|
||||
warningSpy?.mockRestore();
|
||||
try {
|
||||
await unlink(TEST_EXECUTION_FILE);
|
||||
} catch {
|
||||
// Ignore if file doesn't exist
|
||||
}
|
||||
});
|
||||
|
||||
test("should extract session_id from system.init message", async () => {
|
||||
const messages = [
|
||||
{ type: "system", subtype: "init", session_id: "test-session-123" },
|
||||
{ type: "result", cost_usd: 0.01 },
|
||||
];
|
||||
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
|
||||
|
||||
await parseAndSetSessionId(TEST_EXECUTION_FILE);
|
||||
|
||||
expect(setOutputSpy).toHaveBeenCalledWith("session_id", "test-session-123");
|
||||
expect(infoSpy).toHaveBeenCalledWith("Set session_id: test-session-123");
|
||||
});
|
||||
|
||||
test("should handle missing session_id gracefully", async () => {
|
||||
const messages = [
|
||||
{ type: "system", subtype: "init" },
|
||||
{ type: "result", cost_usd: 0.01 },
|
||||
];
|
||||
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
|
||||
|
||||
await parseAndSetSessionId(TEST_EXECUTION_FILE);
|
||||
|
||||
expect(setOutputSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should handle missing system.init message gracefully", async () => {
|
||||
const messages = [{ type: "result", cost_usd: 0.01 }];
|
||||
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
|
||||
|
||||
await parseAndSetSessionId(TEST_EXECUTION_FILE);
|
||||
|
||||
expect(setOutputSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should handle malformed JSON gracefully with warning", async () => {
|
||||
await writeFile(TEST_EXECUTION_FILE, "{ invalid json");
|
||||
|
||||
await parseAndSetSessionId(TEST_EXECUTION_FILE);
|
||||
|
||||
expect(setOutputSpy).not.toHaveBeenCalled();
|
||||
expect(warningSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test("should handle non-existent file gracefully with warning", async () => {
|
||||
await parseAndSetSessionId("/nonexistent/file.json");
|
||||
|
||||
expect(setOutputSpy).not.toHaveBeenCalled();
|
||||
expect(warningSpy).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
34
bun.lock
34
bun.lock
@@ -1,11 +1,13 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 0,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "@anthropic-ai/claude-code-action",
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.70",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@octokit/graphql": "^8.2.2",
|
||||
"@octokit/rest": "^21.1.1",
|
||||
@@ -35,8 +37,40 @@
|
||||
|
||||
"@actions/io": ["@actions/io@1.1.3", "", {}, "sha512-wi9JjgKLYS7U/z8PPbco+PvTb/nRWjeoFlJ1Qer83k/3C5PHQi28hiVdeE2kHXmIL99mQFawx8qt/JPjZilJ8Q=="],
|
||||
|
||||
"@anthropic-ai/claude-agent-sdk": ["@anthropic-ai/claude-agent-sdk@0.1.70", "", { "optionalDependencies": { "@img/sharp-darwin-arm64": "^0.33.5", "@img/sharp-darwin-x64": "^0.33.5", "@img/sharp-linux-arm": "^0.33.5", "@img/sharp-linux-arm64": "^0.33.5", "@img/sharp-linux-x64": "^0.33.5", "@img/sharp-linuxmusl-arm64": "^0.33.5", "@img/sharp-linuxmusl-x64": "^0.33.5", "@img/sharp-win32-x64": "^0.33.5" }, "peerDependencies": { "zod": "^3.24.1" } }, "sha512-4jpFPDX8asys6skO1r3Pzh0Fe9nbND2ASYTWuyFB5iN9bWEL6WScTFyGokjql3M2TkEp9ZGuB2YYpTCdaqT9Sw=="],
|
||||
|
||||
"@fastify/busboy": ["@fastify/busboy@2.1.1", "", {}, "sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA=="],
|
||||
|
||||
"@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.0.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ=="],
|
||||
|
||||
"@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.0.4" }, "os": "darwin", "cpu": "x64" }, "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.0.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg=="],
|
||||
|
||||
"@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.0.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.0.5", "", { "os": "linux", "cpu": "arm" }, "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g=="],
|
||||
|
||||
"@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.0.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA=="],
|
||||
|
||||
"@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.0.4", "", { "os": "linux", "cpu": "x64" }, "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.0.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA=="],
|
||||
|
||||
"@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.0.4", "", { "os": "linux", "cpu": "x64" }, "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw=="],
|
||||
|
||||
"@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.0.5" }, "os": "linux", "cpu": "arm" }, "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ=="],
|
||||
|
||||
"@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.0.4" }, "os": "linux", "cpu": "arm64" }, "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA=="],
|
||||
|
||||
"@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.0.4" }, "os": "linux", "cpu": "x64" }, "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA=="],
|
||||
|
||||
"@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" }, "os": "linux", "cpu": "arm64" }, "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g=="],
|
||||
|
||||
"@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.0.4" }, "os": "linux", "cpu": "x64" }, "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw=="],
|
||||
|
||||
"@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.33.5", "", { "os": "win32", "cpu": "x64" }, "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg=="],
|
||||
|
||||
"@modelcontextprotocol/sdk": ["@modelcontextprotocol/sdk@1.16.0", "", { "dependencies": { "ajv": "^6.12.6", "content-type": "^1.0.5", "cors": "^2.8.5", "cross-spawn": "^7.0.5", "eventsource": "^3.0.2", "eventsource-parser": "^3.0.0", "express": "^5.0.1", "express-rate-limit": "^7.5.0", "pkce-challenge": "^5.0.0", "raw-body": "^3.0.0", "zod": "^3.23.8", "zod-to-json-schema": "^3.24.1" } }, "sha512-8ofX7gkZcLj9H9rSd50mCgm3SSF8C7XoclxJuLoV0Cz3rEQ1tv9MZRYYvJtm9n1BiEQQMzSmE/w2AEkNacLYfg=="],
|
||||
|
||||
"@octokit/auth-token": ["@octokit/auth-token@4.0.0", "", {}, "sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA=="],
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
"dependencies": {
|
||||
"@actions/core": "^1.10.1",
|
||||
"@actions/github": "^6.0.1",
|
||||
"@anthropic-ai/claude-agent-sdk": "^0.1.70",
|
||||
"@modelcontextprotocol/sdk": "^1.11.0",
|
||||
"@octokit/graphql": "^8.2.2",
|
||||
"@octokit/rest": "^21.1.1",
|
||||
|
||||
@@ -192,11 +192,6 @@ export function prepareContext(
|
||||
if (!isPR) {
|
||||
throw new Error("IS_PR must be true for pull_request_review event");
|
||||
}
|
||||
if (!commentBody) {
|
||||
throw new Error(
|
||||
"COMMENT_BODY is required for pull_request_review event",
|
||||
);
|
||||
}
|
||||
eventData = {
|
||||
eventName: "pull_request_review",
|
||||
isPR: true,
|
||||
@@ -464,6 +459,123 @@ export function generatePrompt(
|
||||
return mode.generatePrompt(context, githubData, useCommitSigning);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a simplified prompt for tag mode (opt-in via USE_SIMPLE_PROMPT env var)
|
||||
* @internal
|
||||
*/
|
||||
function generateSimplePrompt(
|
||||
context: PreparedContext,
|
||||
githubData: FetchDataResult,
|
||||
useCommitSigning: boolean = false,
|
||||
): string {
|
||||
const {
|
||||
contextData,
|
||||
comments,
|
||||
changedFilesWithSHA,
|
||||
reviewData,
|
||||
imageUrlMap,
|
||||
} = githubData;
|
||||
const { eventData } = context;
|
||||
|
||||
const { triggerContext } = getEventTypeAndContext(context);
|
||||
|
||||
const formattedContext = formatContext(contextData, eventData.isPR);
|
||||
const formattedComments = formatComments(comments, imageUrlMap);
|
||||
const formattedReviewComments = eventData.isPR
|
||||
? formatReviewComments(reviewData, imageUrlMap)
|
||||
: "";
|
||||
const formattedChangedFiles = eventData.isPR
|
||||
? formatChangedFilesWithSHA(changedFilesWithSHA)
|
||||
: "";
|
||||
|
||||
const hasImages = imageUrlMap && imageUrlMap.size > 0;
|
||||
const imagesInfo = hasImages
|
||||
? `\n\n<images_info>
|
||||
Images from comments have been saved to disk. Paths are in the formatted content above. Use Read tool to view them.
|
||||
</images_info>`
|
||||
: "";
|
||||
|
||||
const formattedBody = contextData?.body
|
||||
? formatBody(contextData.body, imageUrlMap)
|
||||
: "No description provided";
|
||||
|
||||
const entityType = eventData.isPR ? "pull request" : "issue";
|
||||
const jobUrl = `${GITHUB_SERVER_URL}/${context.repository}/actions/runs/${process.env.GITHUB_RUN_ID}`;
|
||||
|
||||
let promptContent = `You were tagged on a GitHub ${entityType} via "${context.triggerPhrase}". Read the request and decide how to help.
|
||||
|
||||
<context>
|
||||
${formattedContext}
|
||||
</context>
|
||||
|
||||
<${eventData.isPR ? "pr" : "issue"}_body>
|
||||
${formattedBody}
|
||||
</${eventData.isPR ? "pr" : "issue"}_body>
|
||||
|
||||
<comments>
|
||||
${formattedComments || "No comments"}
|
||||
</comments>
|
||||
${
|
||||
eventData.isPR
|
||||
? `
|
||||
<review_comments>
|
||||
${formattedReviewComments || "No review comments"}
|
||||
</review_comments>
|
||||
|
||||
<changed_files>
|
||||
${formattedChangedFiles || "No files changed"}
|
||||
</changed_files>`
|
||||
: ""
|
||||
}${imagesInfo}
|
||||
|
||||
<metadata>
|
||||
repository: ${context.repository}
|
||||
${eventData.isPR && eventData.prNumber ? `pr_number: ${eventData.prNumber}` : ""}
|
||||
${!eventData.isPR && eventData.issueNumber ? `issue_number: ${eventData.issueNumber}` : ""}
|
||||
trigger: ${triggerContext}
|
||||
triggered_by: ${context.triggerUsername ?? "Unknown"}
|
||||
claude_comment_id: ${context.claudeCommentId}
|
||||
</metadata>
|
||||
${
|
||||
(eventData.eventName === "issue_comment" ||
|
||||
eventData.eventName === "pull_request_review_comment" ||
|
||||
eventData.eventName === "pull_request_review") &&
|
||||
eventData.commentBody
|
||||
? `
|
||||
<trigger_comment>
|
||||
${sanitizeContent(eventData.commentBody)}
|
||||
</trigger_comment>`
|
||||
: ""
|
||||
}
|
||||
|
||||
Your request is in <trigger_comment> above${eventData.eventName === "issues" ? ` (or the ${entityType} body for assigned/labeled events)` : ""}.
|
||||
|
||||
Decide what's being asked:
|
||||
1. **Question or code review** - Answer directly or provide feedback
|
||||
2. **Code change** - Implement the change, commit, and push
|
||||
|
||||
Communication:
|
||||
- Your ONLY visible output is your GitHub comment - update it with progress and results
|
||||
- Use mcp__github_comment__update_claude_comment to update (only "body" param needed)
|
||||
- Use checklist format for tasks: - [ ] incomplete, - [x] complete
|
||||
- Use ### headers (not #)
|
||||
${getCommitInstructions(eventData, githubData, context, useCommitSigning)}
|
||||
${
|
||||
eventData.claudeBranch
|
||||
? `
|
||||
When done with changes, provide a PR link:
|
||||
[Create a PR](${GITHUB_SERVER_URL}/${context.repository}/compare/${eventData.baseBranch}...${eventData.claudeBranch}?quick_pull=1&title=<url-encoded-title>&body=<url-encoded-body>)
|
||||
Use THREE dots (...) between branches. URL-encode all parameters.`
|
||||
: ""
|
||||
}
|
||||
|
||||
Always include at the bottom:
|
||||
- Job link: [View job run](${jobUrl})
|
||||
- Follow the repo's CLAUDE.md file for project-specific guidelines`;
|
||||
|
||||
return promptContent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates the default prompt for tag mode
|
||||
* @internal
|
||||
@@ -473,6 +585,10 @@ export function generateDefaultPrompt(
|
||||
githubData: FetchDataResult,
|
||||
useCommitSigning: boolean = false,
|
||||
): string {
|
||||
// Use simplified prompt if opted in
|
||||
if (process.env.USE_SIMPLE_PROMPT === "true") {
|
||||
return generateSimplePrompt(context, githubData, useCommitSigning);
|
||||
}
|
||||
const {
|
||||
contextData,
|
||||
comments,
|
||||
|
||||
@@ -23,7 +23,7 @@ type PullRequestReviewEvent = {
|
||||
eventName: "pull_request_review";
|
||||
isPR: true;
|
||||
prNumber: string;
|
||||
commentBody: string;
|
||||
commentBody?: string; // May be absent for approvals without comments
|
||||
claudeBranch?: string;
|
||||
baseBranch?: string;
|
||||
};
|
||||
|
||||
@@ -152,7 +152,7 @@ async function run() {
|
||||
|
||||
// Check if action failed and read output file for execution details
|
||||
let executionDetails: {
|
||||
cost_usd?: number;
|
||||
total_cost_usd?: number;
|
||||
duration_ms?: number;
|
||||
duration_api_ms?: number;
|
||||
} | null = null;
|
||||
@@ -179,11 +179,11 @@ async function run() {
|
||||
const lastElement = outputData[outputData.length - 1];
|
||||
if (
|
||||
lastElement.type === "result" &&
|
||||
"cost_usd" in lastElement &&
|
||||
"total_cost_usd" in lastElement &&
|
||||
"duration_ms" in lastElement
|
||||
) {
|
||||
executionDetails = {
|
||||
cost_usd: lastElement.cost_usd,
|
||||
total_cost_usd: lastElement.total_cost_usd,
|
||||
duration_ms: lastElement.duration_ms,
|
||||
duration_api_ms: lastElement.duration_api_ms,
|
||||
};
|
||||
|
||||
@@ -13,6 +13,8 @@ export const PR_QUERY = `
|
||||
headRefName
|
||||
headRefOid
|
||||
createdAt
|
||||
updatedAt
|
||||
lastEditedAt
|
||||
additions
|
||||
deletions
|
||||
state
|
||||
@@ -96,6 +98,8 @@ export const ISSUE_QUERY = `
|
||||
login
|
||||
}
|
||||
createdAt
|
||||
updatedAt
|
||||
lastEditedAt
|
||||
state
|
||||
comments(first: 100) {
|
||||
nodes {
|
||||
|
||||
@@ -107,6 +107,38 @@ export function filterReviewsToTriggerTime<
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the issue/PR body was edited after the trigger time.
|
||||
* This prevents a race condition where an attacker could edit the issue/PR body
|
||||
* between when an authorized user triggered Claude and when Claude processes the request.
|
||||
*
|
||||
* @param contextData - The PR or issue data containing body and edit timestamps
|
||||
* @param triggerTime - ISO timestamp of when the trigger event occurred
|
||||
* @returns true if the body is safe to use, false if it was edited after trigger
|
||||
*/
|
||||
export function isBodySafeToUse(
|
||||
contextData: { createdAt: string; updatedAt?: string; lastEditedAt?: string },
|
||||
triggerTime: string | undefined,
|
||||
): boolean {
|
||||
// If no trigger time is available, we can't validate - allow the body
|
||||
// This maintains backwards compatibility for triggers that don't have timestamps
|
||||
if (!triggerTime) return true;
|
||||
|
||||
const triggerTimestamp = new Date(triggerTime).getTime();
|
||||
|
||||
// Check if the body was edited after the trigger
|
||||
// Use lastEditedAt if available (more accurate for body edits), otherwise fall back to updatedAt
|
||||
const lastEditTime = contextData.lastEditedAt || contextData.updatedAt;
|
||||
if (lastEditTime) {
|
||||
const lastEditTimestamp = new Date(lastEditTime).getTime();
|
||||
if (lastEditTimestamp >= triggerTimestamp) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
type FetchDataParams = {
|
||||
octokits: Octokits;
|
||||
repository: string;
|
||||
@@ -273,9 +305,13 @@ export async function fetchGitHubData({
|
||||
body: c.body,
|
||||
}));
|
||||
|
||||
// Add the main issue/PR body if it has content
|
||||
const mainBody: CommentWithImages[] = contextData.body
|
||||
? [
|
||||
// Add the main issue/PR body if it has content and wasn't edited after trigger
|
||||
// This prevents a TOCTOU race condition where an attacker could edit the body
|
||||
// between when an authorized user triggered Claude and when Claude processes the request
|
||||
let mainBody: CommentWithImages[] = [];
|
||||
if (contextData.body) {
|
||||
if (isBodySafeToUse(contextData, triggerTime)) {
|
||||
mainBody = [
|
||||
{
|
||||
...(isPR
|
||||
? {
|
||||
@@ -289,8 +325,14 @@ export async function fetchGitHubData({
|
||||
body: contextData.body,
|
||||
}),
|
||||
},
|
||||
]
|
||||
: [];
|
||||
];
|
||||
} else {
|
||||
console.warn(
|
||||
`Security: ${isPR ? "PR" : "Issue"} #${prNumber} body was edited after the trigger event. ` +
|
||||
`Excluding body content to prevent potential injection attacks.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const allComments = [
|
||||
...mainBody,
|
||||
|
||||
@@ -6,13 +6,112 @@
|
||||
* - For Issues: Create a new branch
|
||||
*/
|
||||
|
||||
import { $ } from "bun";
|
||||
import { execFileSync } from "child_process";
|
||||
import * as core from "@actions/core";
|
||||
import type { ParsedGitHubContext } from "../context";
|
||||
import type { GitHubPullRequest } from "../types";
|
||||
import type { Octokits } from "../api/client";
|
||||
import type { FetchDataResult } from "../data/fetcher";
|
||||
|
||||
/**
|
||||
* Validates a git branch name against a strict whitelist pattern.
|
||||
* This prevents command injection by ensuring only safe characters are used.
|
||||
*
|
||||
* Valid branch names:
|
||||
* - Start with alphanumeric character (not dash, to prevent option injection)
|
||||
* - Contain only alphanumeric, forward slash, hyphen, underscore, or period
|
||||
* - Do not start or end with a period
|
||||
* - Do not end with a slash
|
||||
* - Do not contain '..' (path traversal)
|
||||
* - Do not contain '//' (consecutive slashes)
|
||||
* - Do not end with '.lock'
|
||||
* - Do not contain '@{'
|
||||
* - Do not contain control characters or special git characters (~^:?*[\])
|
||||
*/
|
||||
export function validateBranchName(branchName: string): void {
|
||||
// Check for empty or whitespace-only names
|
||||
if (!branchName || branchName.trim().length === 0) {
|
||||
throw new Error("Branch name cannot be empty");
|
||||
}
|
||||
|
||||
// Check for leading dash (prevents option injection like --help, -x)
|
||||
if (branchName.startsWith("-")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot start with a dash.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for control characters and special git characters (~^:?*[\])
|
||||
// eslint-disable-next-line no-control-regex
|
||||
if (/[\x00-\x1F\x7F ~^:?*[\]\\]/.test(branchName)) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot contain control characters, spaces, or special git characters (~^:?*[\\]).`,
|
||||
);
|
||||
}
|
||||
|
||||
// Strict whitelist pattern: alphanumeric start, then alphanumeric/slash/hyphen/underscore/period
|
||||
const validPattern = /^[a-zA-Z0-9][a-zA-Z0-9/_.-]*$/;
|
||||
|
||||
if (!validPattern.test(branchName)) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names must start with an alphanumeric character and contain only alphanumeric characters, forward slashes, hyphens, underscores, or periods.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for leading/trailing periods
|
||||
if (branchName.startsWith(".") || branchName.endsWith(".")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot start or end with a period.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for trailing slash
|
||||
if (branchName.endsWith("/")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot end with a slash.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check for consecutive slashes
|
||||
if (branchName.includes("//")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot contain consecutive slashes.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Additional git-specific validations
|
||||
if (branchName.includes("..")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot contain '..'`,
|
||||
);
|
||||
}
|
||||
|
||||
if (branchName.endsWith(".lock")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot end with '.lock'`,
|
||||
);
|
||||
}
|
||||
|
||||
if (branchName.includes("@{")) {
|
||||
throw new Error(
|
||||
`Invalid branch name: "${branchName}". Branch names cannot contain '@{'`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a git command safely using execFileSync to avoid shell interpolation.
|
||||
*
|
||||
* Security: execFileSync passes arguments directly to the git binary without
|
||||
* invoking a shell, preventing command injection attacks where malicious input
|
||||
* could be interpreted as shell commands (e.g., branch names containing `;`, `|`, `&&`).
|
||||
*
|
||||
* @param args - Git command arguments (e.g., ["checkout", "branch-name"])
|
||||
*/
|
||||
function execGit(args: string[]): void {
|
||||
execFileSync("git", args, { stdio: "inherit" });
|
||||
}
|
||||
|
||||
export type BranchInfo = {
|
||||
baseBranch: string;
|
||||
claudeBranch?: string;
|
||||
@@ -53,14 +152,19 @@ export async function setupBranch(
|
||||
`PR #${entityNumber}: ${commitCount} commits, using fetch depth ${fetchDepth}`,
|
||||
);
|
||||
|
||||
// Validate branch names before use to prevent command injection
|
||||
validateBranchName(branchName);
|
||||
|
||||
// Execute git commands to checkout PR branch (dynamic depth based on PR size)
|
||||
await $`git fetch origin --depth=${fetchDepth} ${branchName}`;
|
||||
await $`git checkout ${branchName} --`;
|
||||
// Using execFileSync instead of shell template literals for security
|
||||
execGit(["fetch", "origin", `--depth=${fetchDepth}`, branchName]);
|
||||
execGit(["checkout", branchName, "--"]);
|
||||
|
||||
console.log(`Successfully checked out PR branch for PR #${entityNumber}`);
|
||||
|
||||
// For open PRs, we need to get the base branch of the PR
|
||||
const baseBranch = prData.baseRefName;
|
||||
validateBranchName(baseBranch);
|
||||
|
||||
return {
|
||||
baseBranch,
|
||||
@@ -118,8 +222,9 @@ export async function setupBranch(
|
||||
|
||||
// Ensure we're on the source branch
|
||||
console.log(`Fetching and checking out source branch: ${sourceBranch}`);
|
||||
await $`git fetch origin ${sourceBranch} --depth=1`;
|
||||
await $`git checkout ${sourceBranch}`;
|
||||
validateBranchName(sourceBranch);
|
||||
execGit(["fetch", "origin", sourceBranch, "--depth=1"]);
|
||||
execGit(["checkout", sourceBranch, "--"]);
|
||||
|
||||
// Set outputs for GitHub Actions
|
||||
core.setOutput("CLAUDE_BRANCH", newBranch);
|
||||
@@ -138,11 +243,13 @@ export async function setupBranch(
|
||||
|
||||
// Fetch and checkout the source branch first to ensure we branch from the correct base
|
||||
console.log(`Fetching and checking out source branch: ${sourceBranch}`);
|
||||
await $`git fetch origin ${sourceBranch} --depth=1`;
|
||||
await $`git checkout ${sourceBranch}`;
|
||||
validateBranchName(sourceBranch);
|
||||
validateBranchName(newBranch);
|
||||
execGit(["fetch", "origin", sourceBranch, "--depth=1"]);
|
||||
execGit(["checkout", sourceBranch, "--"]);
|
||||
|
||||
// Create and checkout the new branch from the source branch
|
||||
await $`git checkout -b ${newBranch}`;
|
||||
execGit(["checkout", "-b", newBranch]);
|
||||
|
||||
console.log(
|
||||
`Successfully created and checked out local branch: ${newBranch}`,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { GITHUB_SERVER_URL } from "../api/config";
|
||||
|
||||
export type ExecutionDetails = {
|
||||
cost_usd?: number;
|
||||
total_cost_usd?: number;
|
||||
duration_ms?: number;
|
||||
duration_api_ms?: number;
|
||||
};
|
||||
|
||||
@@ -58,6 +58,8 @@ export type GitHubPullRequest = {
|
||||
headRefName: string;
|
||||
headRefOid: string;
|
||||
createdAt: string;
|
||||
updatedAt?: string;
|
||||
lastEditedAt?: string;
|
||||
additions: number;
|
||||
deletions: number;
|
||||
state: string;
|
||||
@@ -83,6 +85,8 @@ export type GitHubIssue = {
|
||||
body: string;
|
||||
author: GitHubAuthor;
|
||||
createdAt: string;
|
||||
updatedAt?: string;
|
||||
lastEditedAt?: string;
|
||||
state: string;
|
||||
comments: {
|
||||
nodes: GitHubComment[];
|
||||
|
||||
@@ -258,7 +258,7 @@ describe("updateCommentBody", () => {
|
||||
const input = {
|
||||
...baseInput,
|
||||
executionDetails: {
|
||||
cost_usd: 0.13382595,
|
||||
total_cost_usd: 0.13382595,
|
||||
duration_ms: 31033,
|
||||
duration_api_ms: 31034,
|
||||
},
|
||||
@@ -301,7 +301,7 @@ describe("updateCommentBody", () => {
|
||||
const input = {
|
||||
...baseInput,
|
||||
executionDetails: {
|
||||
cost_usd: 0.25,
|
||||
total_cost_usd: 0.25,
|
||||
},
|
||||
triggerUsername: "testuser",
|
||||
};
|
||||
@@ -322,7 +322,7 @@ describe("updateCommentBody", () => {
|
||||
branchName: "claude-branch-123",
|
||||
prLink: "\n[Create a PR](https://github.com/owner/repo/pr-url)",
|
||||
executionDetails: {
|
||||
cost_usd: 0.01,
|
||||
total_cost_usd: 0.01,
|
||||
duration_ms: 65000, // 1 minute 5 seconds
|
||||
},
|
||||
triggerUsername: "trigger-user",
|
||||
|
||||
@@ -4,6 +4,7 @@ import {
|
||||
fetchGitHubData,
|
||||
filterCommentsToTriggerTime,
|
||||
filterReviewsToTriggerTime,
|
||||
isBodySafeToUse,
|
||||
} from "../src/github/data/fetcher";
|
||||
import {
|
||||
createMockContext,
|
||||
@@ -371,6 +372,139 @@ describe("filterReviewsToTriggerTime", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("isBodySafeToUse", () => {
|
||||
const triggerTime = "2024-01-15T12:00:00Z";
|
||||
|
||||
const createMockContextData = (
|
||||
createdAt: string,
|
||||
updatedAt?: string,
|
||||
lastEditedAt?: string,
|
||||
) => ({
|
||||
createdAt,
|
||||
updatedAt,
|
||||
lastEditedAt,
|
||||
});
|
||||
|
||||
describe("body edit time validation", () => {
|
||||
it("should return true when body was never edited", () => {
|
||||
const contextData = createMockContextData("2024-01-15T10:00:00Z");
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(true);
|
||||
});
|
||||
|
||||
it("should return true when body was edited before trigger time", () => {
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T11:00:00Z",
|
||||
"2024-01-15T11:30:00Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(true);
|
||||
});
|
||||
|
||||
it("should return false when body was edited after trigger time (using updatedAt)", () => {
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T13:00:00Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(false);
|
||||
});
|
||||
|
||||
it("should return false when body was edited after trigger time (using lastEditedAt)", () => {
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
undefined,
|
||||
"2024-01-15T13:00:00Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(false);
|
||||
});
|
||||
|
||||
it("should return false when body was edited exactly at trigger time", () => {
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T12:00:00Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(false);
|
||||
});
|
||||
|
||||
it("should prioritize lastEditedAt over updatedAt", () => {
|
||||
// updatedAt is after trigger, but lastEditedAt is before - should be safe
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T13:00:00Z", // updatedAt after trigger
|
||||
"2024-01-15T11:00:00Z", // lastEditedAt before trigger
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should return true when no trigger time is provided (backward compatibility)", () => {
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T13:00:00Z", // Would normally fail
|
||||
"2024-01-15T14:00:00Z", // Would normally fail
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, undefined)).toBe(true);
|
||||
});
|
||||
|
||||
it("should handle millisecond precision correctly", () => {
|
||||
// Edit 1ms after trigger - should be unsafe
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T12:00:00.001Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(false);
|
||||
});
|
||||
|
||||
it("should handle edit 1ms before trigger - should be safe", () => {
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T11:59:59.999Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, triggerTime)).toBe(true);
|
||||
});
|
||||
|
||||
it("should handle various ISO timestamp formats", () => {
|
||||
const contextData1 = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T11:00:00Z",
|
||||
);
|
||||
const contextData2 = createMockContextData(
|
||||
"2024-01-15T10:00:00+00:00",
|
||||
"2024-01-15T11:00:00+00:00",
|
||||
);
|
||||
const contextData3 = createMockContextData(
|
||||
"2024-01-15T10:00:00.000Z",
|
||||
"2024-01-15T11:00:00.000Z",
|
||||
);
|
||||
|
||||
expect(isBodySafeToUse(contextData1, triggerTime)).toBe(true);
|
||||
expect(isBodySafeToUse(contextData2, triggerTime)).toBe(true);
|
||||
expect(isBodySafeToUse(contextData3, triggerTime)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("security scenarios", () => {
|
||||
it("should detect race condition attack - body edited between trigger and processing", () => {
|
||||
// Simulates: Owner triggers @claude at 12:00, attacker edits body at 12:00:30
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z", // Issue created
|
||||
"2024-01-15T12:00:30Z", // Body edited after trigger
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, "2024-01-15T12:00:00Z")).toBe(false);
|
||||
});
|
||||
|
||||
it("should allow body that was stable at trigger time", () => {
|
||||
// Body was last edited well before the trigger
|
||||
const contextData = createMockContextData(
|
||||
"2024-01-15T10:00:00Z",
|
||||
"2024-01-15T10:30:00Z",
|
||||
"2024-01-15T10:30:00Z",
|
||||
);
|
||||
expect(isBodySafeToUse(contextData, "2024-01-15T12:00:00Z")).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("fetchGitHubData integration with time filtering", () => {
|
||||
it("should filter comments based on trigger time when provided", async () => {
|
||||
const mockOctokits = {
|
||||
@@ -696,4 +830,119 @@ describe("fetchGitHubData integration with time filtering", () => {
|
||||
// All three comments should be included as they're all before trigger time
|
||||
expect(result.comments.length).toBe(3);
|
||||
});
|
||||
|
||||
it("should exclude issue body when edited after trigger time (TOCTOU protection)", async () => {
|
||||
const mockOctokits = {
|
||||
graphql: jest.fn().mockResolvedValue({
|
||||
repository: {
|
||||
issue: {
|
||||
number: 555,
|
||||
title: "Test Issue",
|
||||
body: "Malicious body edited after trigger",
|
||||
author: { login: "attacker" },
|
||||
createdAt: "2024-01-15T10:00:00Z",
|
||||
updatedAt: "2024-01-15T12:30:00Z", // Edited after trigger
|
||||
lastEditedAt: "2024-01-15T12:30:00Z", // Edited after trigger
|
||||
comments: { nodes: [] },
|
||||
},
|
||||
},
|
||||
user: { login: "trigger-user" },
|
||||
}),
|
||||
rest: jest.fn() as any,
|
||||
};
|
||||
|
||||
const result = await fetchGitHubData({
|
||||
octokits: mockOctokits as any,
|
||||
repository: "test-owner/test-repo",
|
||||
prNumber: "555",
|
||||
isPR: false,
|
||||
triggerUsername: "trigger-user",
|
||||
triggerTime: "2024-01-15T12:00:00Z",
|
||||
});
|
||||
|
||||
// The body should be excluded from image processing due to TOCTOU protection
|
||||
// We can verify this by checking that issue_body is NOT in the imageUrlMap keys
|
||||
const hasIssueBodyInMap = Array.from(result.imageUrlMap.keys()).some(
|
||||
(key) => key.includes("issue_body"),
|
||||
);
|
||||
expect(hasIssueBodyInMap).toBe(false);
|
||||
});
|
||||
|
||||
it("should include issue body when not edited after trigger time", async () => {
|
||||
const mockOctokits = {
|
||||
graphql: jest.fn().mockResolvedValue({
|
||||
repository: {
|
||||
issue: {
|
||||
number: 666,
|
||||
title: "Test Issue",
|
||||
body: "Safe body not edited after trigger",
|
||||
author: { login: "author" },
|
||||
createdAt: "2024-01-15T10:00:00Z",
|
||||
updatedAt: "2024-01-15T11:00:00Z", // Edited before trigger
|
||||
lastEditedAt: "2024-01-15T11:00:00Z", // Edited before trigger
|
||||
comments: { nodes: [] },
|
||||
},
|
||||
},
|
||||
user: { login: "trigger-user" },
|
||||
}),
|
||||
rest: jest.fn() as any,
|
||||
};
|
||||
|
||||
const result = await fetchGitHubData({
|
||||
octokits: mockOctokits as any,
|
||||
repository: "test-owner/test-repo",
|
||||
prNumber: "666",
|
||||
isPR: false,
|
||||
triggerUsername: "trigger-user",
|
||||
triggerTime: "2024-01-15T12:00:00Z",
|
||||
});
|
||||
|
||||
// The contextData should still contain the body
|
||||
expect(result.contextData.body).toBe("Safe body not edited after trigger");
|
||||
});
|
||||
|
||||
it("should exclude PR body when edited after trigger time (TOCTOU protection)", async () => {
|
||||
const mockOctokits = {
|
||||
graphql: jest.fn().mockResolvedValue({
|
||||
repository: {
|
||||
pullRequest: {
|
||||
number: 777,
|
||||
title: "Test PR",
|
||||
body: "Malicious PR body edited after trigger",
|
||||
author: { login: "attacker" },
|
||||
baseRefName: "main",
|
||||
headRefName: "feature",
|
||||
headRefOid: "abc123",
|
||||
createdAt: "2024-01-15T10:00:00Z",
|
||||
updatedAt: "2024-01-15T12:30:00Z", // Edited after trigger
|
||||
lastEditedAt: "2024-01-15T12:30:00Z", // Edited after trigger
|
||||
additions: 10,
|
||||
deletions: 5,
|
||||
state: "OPEN",
|
||||
commits: { totalCount: 1, nodes: [] },
|
||||
files: { nodes: [] },
|
||||
comments: { nodes: [] },
|
||||
reviews: { nodes: [] },
|
||||
},
|
||||
},
|
||||
user: { login: "trigger-user" },
|
||||
}),
|
||||
rest: jest.fn() as any,
|
||||
};
|
||||
|
||||
const result = await fetchGitHubData({
|
||||
octokits: mockOctokits as any,
|
||||
repository: "test-owner/test-repo",
|
||||
prNumber: "777",
|
||||
isPR: true,
|
||||
triggerUsername: "trigger-user",
|
||||
triggerTime: "2024-01-15T12:00:00Z",
|
||||
});
|
||||
|
||||
// The body should be excluded from image processing due to TOCTOU protection
|
||||
const hasPrBodyInMap = Array.from(result.imageUrlMap.keys()).some((key) =>
|
||||
key.includes("pr_body"),
|
||||
);
|
||||
expect(hasPrBodyInMap).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
2
test/fixtures/sample-turns.json
vendored
2
test/fixtures/sample-turns.json
vendored
@@ -189,7 +189,7 @@
|
||||
},
|
||||
{
|
||||
"type": "result",
|
||||
"cost_usd": 0.0347,
|
||||
"total_cost_usd": 0.0347,
|
||||
"duration_ms": 18750,
|
||||
"result": "Successfully removed debug print statement from file and added review comment to document the change."
|
||||
}
|
||||
|
||||
@@ -401,6 +401,53 @@ export const mockPullRequestReviewContext: ParsedGitHubContext = {
|
||||
inputs: { ...defaultInputs, triggerPhrase: "@claude" },
|
||||
};
|
||||
|
||||
export const mockPullRequestReviewWithoutCommentContext: ParsedGitHubContext = {
|
||||
runId: "1234567890",
|
||||
eventName: "pull_request_review",
|
||||
eventAction: "dismissed",
|
||||
repository: defaultRepository,
|
||||
actor: "senior-developer",
|
||||
payload: {
|
||||
action: "submitted",
|
||||
review: {
|
||||
id: 11122233,
|
||||
body: null, // Simulating approval without comment
|
||||
user: {
|
||||
login: "senior-developer",
|
||||
id: 44444,
|
||||
avatar_url: "https://avatars.githubusercontent.com/u/44444",
|
||||
html_url: "https://github.com/senior-developer",
|
||||
},
|
||||
state: "approved",
|
||||
html_url:
|
||||
"https://github.com/test-owner/test-repo/pull/321#pullrequestreview-11122233",
|
||||
submitted_at: "2024-01-15T15:30:00Z",
|
||||
},
|
||||
pull_request: {
|
||||
number: 321,
|
||||
title: "Refactor: Improve error handling in API layer",
|
||||
body: "This PR improves error handling across all API endpoints",
|
||||
user: {
|
||||
login: "backend-developer",
|
||||
id: 33333,
|
||||
avatar_url: "https://avatars.githubusercontent.com/u/33333",
|
||||
html_url: "https://github.com/backend-developer",
|
||||
},
|
||||
},
|
||||
repository: {
|
||||
name: "test-repo",
|
||||
full_name: "test-owner/test-repo",
|
||||
private: false,
|
||||
owner: {
|
||||
login: "test-owner",
|
||||
},
|
||||
},
|
||||
} as PullRequestReviewEvent,
|
||||
entityNumber: 321,
|
||||
isPR: true,
|
||||
inputs: { ...defaultInputs, triggerPhrase: "@claude" },
|
||||
};
|
||||
|
||||
export const mockPullRequestReviewCommentContext: ParsedGitHubContext = {
|
||||
runId: "1234567890",
|
||||
eventName: "pull_request_review_comment",
|
||||
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
mockPullRequestCommentContext,
|
||||
mockPullRequestReviewContext,
|
||||
mockPullRequestReviewCommentContext,
|
||||
mockPullRequestReviewWithoutCommentContext,
|
||||
} from "./mockContext";
|
||||
|
||||
const BASE_ENV = {
|
||||
@@ -126,6 +127,24 @@ describe("parseEnvVarsWithContext", () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe("pull_request_review event without comment", () => {
|
||||
test("should parse pull_request_review event correctly", () => {
|
||||
process.env = BASE_ENV;
|
||||
const result = prepareContext(
|
||||
mockPullRequestReviewWithoutCommentContext,
|
||||
"12345",
|
||||
);
|
||||
|
||||
expect(result.eventData.eventName).toBe("pull_request_review");
|
||||
expect(result.eventData.isPR).toBe(true);
|
||||
expect(result.triggerUsername).toBe("senior-developer");
|
||||
if (result.eventData.eventName === "pull_request_review") {
|
||||
expect(result.eventData.prNumber).toBe("321");
|
||||
expect(result.eventData.commentBody).toBe("");
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("pull_request_review_comment event", () => {
|
||||
test("should parse pull_request_review_comment event correctly", () => {
|
||||
process.env = BASE_ENV;
|
||||
|
||||
201
test/validate-branch-name.test.ts
Normal file
201
test/validate-branch-name.test.ts
Normal file
@@ -0,0 +1,201 @@
|
||||
import { describe, expect, it } from "bun:test";
|
||||
import { validateBranchName } from "../src/github/operations/branch";
|
||||
|
||||
describe("validateBranchName", () => {
|
||||
describe("valid branch names", () => {
|
||||
it("should accept simple alphanumeric names", () => {
|
||||
expect(() => validateBranchName("main")).not.toThrow();
|
||||
expect(() => validateBranchName("feature123")).not.toThrow();
|
||||
expect(() => validateBranchName("Branch1")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should accept names with hyphens", () => {
|
||||
expect(() => validateBranchName("feature-branch")).not.toThrow();
|
||||
expect(() => validateBranchName("fix-bug-123")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should accept names with underscores", () => {
|
||||
expect(() => validateBranchName("feature_branch")).not.toThrow();
|
||||
expect(() => validateBranchName("fix_bug_123")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should accept names with forward slashes", () => {
|
||||
expect(() => validateBranchName("feature/new-thing")).not.toThrow();
|
||||
expect(() => validateBranchName("user/feature/branch")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should accept names with periods", () => {
|
||||
expect(() => validateBranchName("v1.0.0")).not.toThrow();
|
||||
expect(() => validateBranchName("release.1.2.3")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should accept typical branch name formats", () => {
|
||||
expect(() =>
|
||||
validateBranchName("claude/issue-123-20250101-1234"),
|
||||
).not.toThrow();
|
||||
expect(() => validateBranchName("refs/heads/main")).not.toThrow();
|
||||
expect(() => validateBranchName("bugfix/JIRA-1234")).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe("command injection attempts", () => {
|
||||
it("should reject shell command substitution with $()", () => {
|
||||
expect(() => validateBranchName("$(whoami)")).toThrow();
|
||||
expect(() => validateBranchName("branch-$(rm -rf /)")).toThrow();
|
||||
expect(() => validateBranchName("test$(cat /etc/passwd)")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject shell command substitution with backticks", () => {
|
||||
expect(() => validateBranchName("`whoami`")).toThrow();
|
||||
expect(() => validateBranchName("branch-`rm -rf /`")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject command chaining with semicolons", () => {
|
||||
expect(() => validateBranchName("branch; rm -rf /")).toThrow();
|
||||
expect(() => validateBranchName("test;whoami")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject command chaining with &&", () => {
|
||||
expect(() => validateBranchName("branch && rm -rf /")).toThrow();
|
||||
expect(() => validateBranchName("test&&whoami")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject command chaining with ||", () => {
|
||||
expect(() => validateBranchName("branch || rm -rf /")).toThrow();
|
||||
expect(() => validateBranchName("test||whoami")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject pipe characters", () => {
|
||||
expect(() => validateBranchName("branch | cat")).toThrow();
|
||||
expect(() => validateBranchName("test|grep password")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject redirection operators", () => {
|
||||
expect(() => validateBranchName("branch > /etc/passwd")).toThrow();
|
||||
expect(() => validateBranchName("branch < input")).toThrow();
|
||||
expect(() => validateBranchName("branch >> file")).toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe("option injection attempts", () => {
|
||||
it("should reject branch names starting with dash", () => {
|
||||
expect(() => validateBranchName("-x")).toThrow(
|
||||
/cannot start with a dash/,
|
||||
);
|
||||
expect(() => validateBranchName("--help")).toThrow(
|
||||
/cannot start with a dash/,
|
||||
);
|
||||
expect(() => validateBranchName("-")).toThrow(/cannot start with a dash/);
|
||||
expect(() => validateBranchName("--version")).toThrow(
|
||||
/cannot start with a dash/,
|
||||
);
|
||||
expect(() => validateBranchName("-rf")).toThrow(
|
||||
/cannot start with a dash/,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("path traversal attempts", () => {
|
||||
it("should reject double dot sequences", () => {
|
||||
expect(() => validateBranchName("../../../etc")).toThrow();
|
||||
expect(() => validateBranchName("branch/../secret")).toThrow(/'\.\.'$/);
|
||||
expect(() => validateBranchName("a..b")).toThrow(/'\.\.'$/);
|
||||
});
|
||||
});
|
||||
|
||||
describe("git-specific invalid patterns", () => {
|
||||
it("should reject @{ sequence", () => {
|
||||
expect(() => validateBranchName("branch@{1}")).toThrow(/@{/);
|
||||
expect(() => validateBranchName("HEAD@{yesterday}")).toThrow(/@{/);
|
||||
});
|
||||
|
||||
it("should reject .lock suffix", () => {
|
||||
expect(() => validateBranchName("branch.lock")).toThrow(/\.lock/);
|
||||
expect(() => validateBranchName("feature.lock")).toThrow(/\.lock/);
|
||||
});
|
||||
|
||||
it("should reject consecutive slashes", () => {
|
||||
expect(() => validateBranchName("feature//branch")).toThrow(
|
||||
/consecutive slashes/,
|
||||
);
|
||||
expect(() => validateBranchName("a//b//c")).toThrow(
|
||||
/consecutive slashes/,
|
||||
);
|
||||
});
|
||||
|
||||
it("should reject trailing slashes", () => {
|
||||
expect(() => validateBranchName("feature/")).toThrow(
|
||||
/cannot end with a slash/,
|
||||
);
|
||||
expect(() => validateBranchName("branch/")).toThrow(
|
||||
/cannot end with a slash/,
|
||||
);
|
||||
});
|
||||
|
||||
it("should reject leading periods", () => {
|
||||
expect(() => validateBranchName(".hidden")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject trailing periods", () => {
|
||||
expect(() => validateBranchName("branch.")).toThrow(
|
||||
/cannot start or end with a period/,
|
||||
);
|
||||
});
|
||||
|
||||
it("should reject special git refspec characters", () => {
|
||||
expect(() => validateBranchName("branch~1")).toThrow();
|
||||
expect(() => validateBranchName("branch^2")).toThrow();
|
||||
expect(() => validateBranchName("branch:ref")).toThrow();
|
||||
expect(() => validateBranchName("branch?")).toThrow();
|
||||
expect(() => validateBranchName("branch*")).toThrow();
|
||||
expect(() => validateBranchName("branch[0]")).toThrow();
|
||||
expect(() => validateBranchName("branch\\path")).toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe("control characters and special characters", () => {
|
||||
it("should reject null bytes", () => {
|
||||
expect(() => validateBranchName("branch\x00name")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject other control characters", () => {
|
||||
expect(() => validateBranchName("branch\x01name")).toThrow();
|
||||
expect(() => validateBranchName("branch\x1Fname")).toThrow();
|
||||
expect(() => validateBranchName("branch\x7Fname")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject spaces", () => {
|
||||
expect(() => validateBranchName("branch name")).toThrow();
|
||||
expect(() => validateBranchName("feature branch")).toThrow();
|
||||
});
|
||||
|
||||
it("should reject newlines and tabs", () => {
|
||||
expect(() => validateBranchName("branch\nname")).toThrow();
|
||||
expect(() => validateBranchName("branch\tname")).toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe("empty and whitespace", () => {
|
||||
it("should reject empty strings", () => {
|
||||
expect(() => validateBranchName("")).toThrow(/cannot be empty/);
|
||||
});
|
||||
|
||||
it("should reject whitespace-only strings", () => {
|
||||
expect(() => validateBranchName(" ")).toThrow();
|
||||
expect(() => validateBranchName("\t\n")).toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should accept single alphanumeric character", () => {
|
||||
expect(() => validateBranchName("a")).not.toThrow();
|
||||
expect(() => validateBranchName("1")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should reject single special characters", () => {
|
||||
expect(() => validateBranchName(".")).toThrow();
|
||||
expect(() => validateBranchName("/")).toThrow();
|
||||
expect(() => validateBranchName("-")).toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user