mirror of
https://github.com/anthropics/claude-code-action.git
synced 2026-01-23 23:14:13 +08:00
Compare commits
10 Commits
eap
...
ashwin/cla
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3c96305c1 | ||
|
|
e3a4ac69fe | ||
|
|
923d1d0592 | ||
|
|
15db2b3c79 | ||
|
|
188d526721 | ||
|
|
a519840051 | ||
|
|
85287e957d | ||
|
|
c6a07895d7 | ||
|
|
0c5d54472f | ||
|
|
2845685880 |
2
.npmrc
Normal file
2
.npmrc
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
engine-strict=true
|
||||||
|
registry=https://registry.npmjs.org/
|
||||||
133
action.yml
133
action.yml
@@ -118,10 +118,10 @@ inputs:
|
|||||||
outputs:
|
outputs:
|
||||||
execution_file:
|
execution_file:
|
||||||
description: "Path to the Claude Code execution output file"
|
description: "Path to the Claude Code execution output file"
|
||||||
value: ${{ steps.claude-code.outputs.execution_file }}
|
value: ${{ steps.claude.outputs.execution_file }}
|
||||||
branch_name:
|
branch_name:
|
||||||
description: "The branch created by Claude Code for this execution"
|
description: "The branch created by Claude Code for this execution"
|
||||||
value: ${{ steps.prepare.outputs.CLAUDE_BRANCH }}
|
value: ${{ steps.claude.outputs.CLAUDE_BRANCH }}
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -137,20 +137,36 @@ runs:
|
|||||||
cd ${GITHUB_ACTION_PATH}
|
cd ${GITHUB_ACTION_PATH}
|
||||||
bun install
|
bun install
|
||||||
|
|
||||||
- name: Prepare action
|
- name: Run Claude
|
||||||
id: prepare
|
id: claude
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/prepare.ts
|
# Install base-action dependencies
|
||||||
|
echo "Installing base-action dependencies..."
|
||||||
|
cd ${GITHUB_ACTION_PATH}/base-action
|
||||||
|
bun install
|
||||||
|
echo "Base-action dependencies installed"
|
||||||
|
cd -
|
||||||
|
|
||||||
|
# Install Claude Code globally
|
||||||
|
bun install -g @anthropic-ai/claude-code@1.0.67
|
||||||
|
|
||||||
|
# Setup network restrictions if needed
|
||||||
|
if [[ "${{ inputs.experimental_allowed_domains }}" != "" ]]; then
|
||||||
|
chmod +x ${GITHUB_ACTION_PATH}/scripts/setup-network-restrictions.sh
|
||||||
|
${GITHUB_ACTION_PATH}/scripts/setup-network-restrictions.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the unified entrypoint
|
||||||
|
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/run.ts
|
||||||
env:
|
env:
|
||||||
|
# Mode and trigger configuration
|
||||||
MODE: ${{ inputs.mode }}
|
MODE: ${{ inputs.mode }}
|
||||||
TRIGGER_PHRASE: ${{ inputs.trigger_phrase }}
|
TRIGGER_PHRASE: ${{ inputs.trigger_phrase }}
|
||||||
ASSIGNEE_TRIGGER: ${{ inputs.assignee_trigger }}
|
ASSIGNEE_TRIGGER: ${{ inputs.assignee_trigger }}
|
||||||
LABEL_TRIGGER: ${{ inputs.label_trigger }}
|
LABEL_TRIGGER: ${{ inputs.label_trigger }}
|
||||||
BASE_BRANCH: ${{ inputs.base_branch }}
|
BASE_BRANCH: ${{ inputs.base_branch }}
|
||||||
BRANCH_PREFIX: ${{ inputs.branch_prefix }}
|
BRANCH_PREFIX: ${{ inputs.branch_prefix }}
|
||||||
ALLOWED_TOOLS: ${{ inputs.allowed_tools }}
|
|
||||||
DISALLOWED_TOOLS: ${{ inputs.disallowed_tools }}
|
|
||||||
CUSTOM_INSTRUCTIONS: ${{ inputs.custom_instructions }}
|
CUSTOM_INSTRUCTIONS: ${{ inputs.custom_instructions }}
|
||||||
DIRECT_PROMPT: ${{ inputs.direct_prompt }}
|
DIRECT_PROMPT: ${{ inputs.direct_prompt }}
|
||||||
OVERRIDE_PROMPT: ${{ inputs.override_prompt }}
|
OVERRIDE_PROMPT: ${{ inputs.override_prompt }}
|
||||||
@@ -161,65 +177,25 @@ runs:
|
|||||||
DEFAULT_WORKFLOW_TOKEN: ${{ github.token }}
|
DEFAULT_WORKFLOW_TOKEN: ${{ github.token }}
|
||||||
ADDITIONAL_PERMISSIONS: ${{ inputs.additional_permissions }}
|
ADDITIONAL_PERMISSIONS: ${{ inputs.additional_permissions }}
|
||||||
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
|
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
|
||||||
|
|
||||||
# Authentication for remote-agent mode
|
|
||||||
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
|
|
||||||
CLAUDE_CODE_OAUTH_TOKEN: ${{ inputs.claude_code_oauth_token }}
|
|
||||||
|
|
||||||
- name: Install Base Action Dependencies
|
|
||||||
if: steps.prepare.outputs.contains_trigger == 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
echo "Installing base-action dependencies..."
|
|
||||||
cd ${GITHUB_ACTION_PATH}/base-action
|
|
||||||
bun install
|
|
||||||
echo "Base-action dependencies installed"
|
|
||||||
cd -
|
|
||||||
# Install Claude Code globally
|
|
||||||
bun install -g @anthropic-ai/claude-code
|
|
||||||
|
|
||||||
- name: Setup Network Restrictions
|
|
||||||
if: steps.prepare.outputs.contains_trigger == 'true' && inputs.experimental_allowed_domains != ''
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
chmod +x ${GITHUB_ACTION_PATH}/scripts/setup-network-restrictions.sh
|
|
||||||
${GITHUB_ACTION_PATH}/scripts/setup-network-restrictions.sh
|
|
||||||
env:
|
|
||||||
EXPERIMENTAL_ALLOWED_DOMAINS: ${{ inputs.experimental_allowed_domains }}
|
EXPERIMENTAL_ALLOWED_DOMAINS: ${{ inputs.experimental_allowed_domains }}
|
||||||
|
|
||||||
- name: Run Claude Code
|
# Claude configuration
|
||||||
id: claude-code
|
ALLOWED_TOOLS: ${{ inputs.allowed_tools }}
|
||||||
if: steps.prepare.outputs.contains_trigger == 'true'
|
DISALLOWED_TOOLS: ${{ inputs.disallowed_tools }}
|
||||||
shell: bash
|
MAX_TURNS: ${{ inputs.max_turns }}
|
||||||
run: |
|
SETTINGS: ${{ inputs.settings }}
|
||||||
# Run the base-action
|
TIMEOUT_MINUTES: ${{ inputs.timeout_minutes }}
|
||||||
bun run ${GITHUB_ACTION_PATH}/base-action/src/index.ts
|
CLAUDE_ENV: ${{ inputs.claude_env }}
|
||||||
env:
|
FALLBACK_MODEL: ${{ inputs.fallback_model }}
|
||||||
# Base-action inputs
|
|
||||||
CLAUDE_CODE_ACTION: "1"
|
|
||||||
INPUT_PROMPT_FILE: ${{ runner.temp }}/claude-prompts/claude-prompt.txt
|
|
||||||
INPUT_ALLOWED_TOOLS: ${{ env.ALLOWED_TOOLS }}
|
|
||||||
INPUT_DISALLOWED_TOOLS: ${{ env.DISALLOWED_TOOLS }}
|
|
||||||
INPUT_MAX_TURNS: ${{ inputs.max_turns }}
|
|
||||||
INPUT_MCP_CONFIG: ${{ steps.prepare.outputs.mcp_config }}
|
|
||||||
INPUT_SETTINGS: ${{ inputs.settings }}
|
|
||||||
INPUT_SYSTEM_PROMPT: ""
|
|
||||||
INPUT_APPEND_SYSTEM_PROMPT: ${{ env.APPEND_SYSTEM_PROMPT }}
|
|
||||||
INPUT_TIMEOUT_MINUTES: ${{ inputs.timeout_minutes }}
|
|
||||||
INPUT_CLAUDE_ENV: ${{ inputs.claude_env }}
|
|
||||||
INPUT_FALLBACK_MODEL: ${{ inputs.fallback_model }}
|
|
||||||
INPUT_EXPERIMENTAL_SLASH_COMMANDS_DIR: ${{ github.action_path }}/slash-commands
|
INPUT_EXPERIMENTAL_SLASH_COMMANDS_DIR: ${{ github.action_path }}/slash-commands
|
||||||
INPUT_STREAM_CONFIG: ${{ steps.prepare.outputs.stream_config }}
|
|
||||||
|
|
||||||
# Model configuration
|
# Model configuration
|
||||||
ANTHROPIC_MODEL: ${{ steps.prepare.outputs.anthropic_model || inputs.model || inputs.anthropic_model }}
|
MODEL: ${{ inputs.model }}
|
||||||
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
ANTHROPIC_MODEL: ${{ inputs.model || inputs.anthropic_model }}
|
||||||
NODE_VERSION: ${{ env.NODE_VERSION }}
|
|
||||||
DETAILED_PERMISSION_MESSAGES: "1"
|
|
||||||
|
|
||||||
# Provider configuration
|
# Provider configuration
|
||||||
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
|
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
|
||||||
CLAUDE_CODE_OAUTH_TOKEN: ${{ steps.prepare.outputs.claude_code_oauth_token || inputs.claude_code_oauth_token }}
|
CLAUDE_CODE_OAUTH_TOKEN: ${{ inputs.claude_code_oauth_token }}
|
||||||
ANTHROPIC_BASE_URL: ${{ env.ANTHROPIC_BASE_URL }}
|
ANTHROPIC_BASE_URL: ${{ env.ANTHROPIC_BASE_URL }}
|
||||||
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
|
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
|
||||||
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
|
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
|
||||||
@@ -242,51 +218,36 @@ runs:
|
|||||||
VERTEX_REGION_CLAUDE_3_5_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_5_SONNET }}
|
VERTEX_REGION_CLAUDE_3_5_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_5_SONNET }}
|
||||||
VERTEX_REGION_CLAUDE_3_7_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_7_SONNET }}
|
VERTEX_REGION_CLAUDE_3_7_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_7_SONNET }}
|
||||||
|
|
||||||
- name: Report Claude completion
|
|
||||||
if: steps.prepare.outputs.contains_trigger == 'true' && always()
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/report-claude-complete.ts
|
|
||||||
env:
|
|
||||||
MODE: ${{ inputs.mode }}
|
|
||||||
STREAM_CONFIG: ${{ steps.prepare.outputs.stream_config }}
|
|
||||||
CLAUDE_CONCLUSION: ${{ steps.claude-code.outputs.conclusion }}
|
|
||||||
CLAUDE_START_TIME: ${{ steps.prepare.outputs.claude_start_time }}
|
|
||||||
CLAUDE_BRANCH: ${{ steps.prepare.outputs.CLAUDE_BRANCH }}
|
|
||||||
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
|
|
||||||
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
|
||||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
|
||||||
|
|
||||||
- name: Update comment with job link
|
- name: Update comment with job link
|
||||||
if: steps.prepare.outputs.contains_trigger == 'true' && steps.prepare.outputs.claude_comment_id && always()
|
if: steps.claude.outputs.contains_trigger == 'true' && steps.claude.outputs.claude_comment_id && always()
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/update-comment-link.ts
|
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/update-comment-link.ts
|
||||||
env:
|
env:
|
||||||
REPOSITORY: ${{ github.repository }}
|
REPOSITORY: ${{ github.repository }}
|
||||||
PR_NUMBER: ${{ github.event.issue.number || github.event.pull_request.number }}
|
PR_NUMBER: ${{ github.event.issue.number || github.event.pull_request.number }}
|
||||||
CLAUDE_COMMENT_ID: ${{ steps.prepare.outputs.claude_comment_id }}
|
CLAUDE_COMMENT_ID: ${{ steps.claude.outputs.claude_comment_id }}
|
||||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||||
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ steps.claude.outputs.GITHUB_TOKEN }}
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
TRIGGER_COMMENT_ID: ${{ github.event.comment.id }}
|
TRIGGER_COMMENT_ID: ${{ github.event.comment.id }}
|
||||||
CLAUDE_BRANCH: ${{ steps.prepare.outputs.CLAUDE_BRANCH }}
|
CLAUDE_BRANCH: ${{ steps.claude.outputs.CLAUDE_BRANCH }}
|
||||||
IS_PR: ${{ github.event.issue.pull_request != null || github.event_name == 'pull_request_review_comment' }}
|
IS_PR: ${{ github.event.issue.pull_request != null || github.event_name == 'pull_request_review_comment' }}
|
||||||
BASE_BRANCH: ${{ steps.prepare.outputs.BASE_BRANCH }}
|
BASE_BRANCH: ${{ steps.claude.outputs.BASE_BRANCH }}
|
||||||
CLAUDE_SUCCESS: ${{ steps.claude-code.outputs.conclusion == 'success' }}
|
CLAUDE_SUCCESS: ${{ steps.claude.outputs.conclusion == 'success' }}
|
||||||
OUTPUT_FILE: ${{ steps.claude-code.outputs.execution_file || '' }}
|
OUTPUT_FILE: ${{ steps.claude.outputs.execution_file || '' }}
|
||||||
TRIGGER_USERNAME: ${{ github.event.comment.user.login || github.event.issue.user.login || github.event.pull_request.user.login || github.event.sender.login || github.triggering_actor || github.actor || '' }}
|
TRIGGER_USERNAME: ${{ github.event.comment.user.login || github.event.issue.user.login || github.event.pull_request.user.login || github.event.sender.login || github.triggering_actor || github.actor || '' }}
|
||||||
PREPARE_SUCCESS: ${{ steps.prepare.outcome == 'success' }}
|
PREPARE_SUCCESS: ${{ steps.claude.outcome == 'success' }}
|
||||||
PREPARE_ERROR: ${{ steps.prepare.outputs.prepare_error || '' }}
|
PREPARE_ERROR: ${{ steps.claude.outputs.prepare_error || '' }}
|
||||||
USE_STICKY_COMMENT: ${{ inputs.use_sticky_comment }}
|
USE_STICKY_COMMENT: ${{ inputs.use_sticky_comment }}
|
||||||
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
|
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
|
||||||
|
|
||||||
- name: Display Claude Code Report
|
- name: Display Claude Code Report
|
||||||
if: steps.prepare.outputs.contains_trigger == 'true' && steps.claude-code.outputs.execution_file != ''
|
if: steps.claude.outputs.contains_trigger == 'true' && steps.claude.outputs.execution_file != ''
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
# Try to format the turns, but if it fails, dump the raw JSON
|
# Try to format the turns, but if it fails, dump the raw JSON
|
||||||
if bun run ${{ github.action_path }}/src/entrypoints/format-turns.ts "${{ steps.claude-code.outputs.execution_file }}" >> $GITHUB_STEP_SUMMARY 2>/dev/null; then
|
if bun run ${{ github.action_path }}/src/entrypoints/format-turns.ts "${{ steps.claude.outputs.execution_file }}" >> $GITHUB_STEP_SUMMARY 2>/dev/null; then
|
||||||
echo "Successfully formatted Claude Code report"
|
echo "Successfully formatted Claude Code report"
|
||||||
else
|
else
|
||||||
echo "## Claude Code Report (Raw Output)" >> $GITHUB_STEP_SUMMARY
|
echo "## Claude Code Report (Raw Output)" >> $GITHUB_STEP_SUMMARY
|
||||||
@@ -294,7 +255,7 @@ runs:
|
|||||||
echo "Failed to format output (please report). Here's the raw JSON:" >> $GITHUB_STEP_SUMMARY
|
echo "Failed to format output (please report). Here's the raw JSON:" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
echo '```json' >> $GITHUB_STEP_SUMMARY
|
echo '```json' >> $GITHUB_STEP_SUMMARY
|
||||||
cat "${{ steps.claude-code.outputs.execution_file }}" >> $GITHUB_STEP_SUMMARY
|
cat "${{ steps.claude.outputs.execution_file }}" >> $GITHUB_STEP_SUMMARY
|
||||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -305,6 +266,6 @@ runs:
|
|||||||
curl -L \
|
curl -L \
|
||||||
-X DELETE \
|
-X DELETE \
|
||||||
-H "Accept: application/vnd.github+json" \
|
-H "Accept: application/vnd.github+json" \
|
||||||
-H "Authorization: Bearer ${{ steps.prepare.outputs.GITHUB_TOKEN }}" \
|
-H "Authorization: Bearer ${{ steps.claude.outputs.GITHUB_TOKEN }}" \
|
||||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||||
${GITHUB_API_URL:-https://api.github.com}/installation/token
|
${GITHUB_API_URL:-https://api.github.com}/installation/token
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ runs:
|
|||||||
- name: Setup Node.js
|
- name: Setup Node.js
|
||||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # https://github.com/actions/setup-node/releases/tag/v4.4.0
|
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # https://github.com/actions/setup-node/releases/tag/v4.4.0
|
||||||
with:
|
with:
|
||||||
node-version: ${{ env.NODE_VERSION || '22.x' }}
|
node-version: ${{ env.NODE_VERSION || '18.x' }}
|
||||||
cache: ${{ inputs.use_node_cache == 'true' && 'npm' || '' }}
|
cache: ${{ inputs.use_node_cache == 'true' && 'npm' || '' }}
|
||||||
|
|
||||||
- name: Install Bun
|
- name: Install Bun
|
||||||
@@ -118,9 +118,7 @@ runs:
|
|||||||
|
|
||||||
- name: Install Claude Code
|
- name: Install Claude Code
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: bun install -g @anthropic-ai/claude-code@1.0.69
|
||||||
# Install Claude Code
|
|
||||||
bun install -g @anthropic-ai/claude-code
|
|
||||||
|
|
||||||
- name: Run Claude Code Action
|
- name: Run Claude Code Action
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
import * as core from "@actions/core";
|
import * as core from "@actions/core";
|
||||||
import { preparePrompt } from "./prepare-prompt";
|
import { preparePrompt } from "./prepare-prompt";
|
||||||
import { runClaude } from "./run-claude";
|
import { runClaudeCore } from "./run-claude-core";
|
||||||
import { setupClaudeCodeSettings } from "./setup-claude-code-settings";
|
import { setupClaudeCodeSettings } from "./setup-claude-code-settings";
|
||||||
import { validateEnvironmentVariables } from "./validate-env";
|
import { validateEnvironmentVariables } from "./validate-env";
|
||||||
|
|
||||||
@@ -21,7 +21,9 @@ async function run() {
|
|||||||
promptFile: process.env.INPUT_PROMPT_FILE || "",
|
promptFile: process.env.INPUT_PROMPT_FILE || "",
|
||||||
});
|
});
|
||||||
|
|
||||||
await runClaude(promptConfig.path, {
|
await runClaudeCore({
|
||||||
|
promptFile: promptConfig.path,
|
||||||
|
settings: process.env.INPUT_SETTINGS,
|
||||||
allowedTools: process.env.INPUT_ALLOWED_TOOLS,
|
allowedTools: process.env.INPUT_ALLOWED_TOOLS,
|
||||||
disallowedTools: process.env.INPUT_DISALLOWED_TOOLS,
|
disallowedTools: process.env.INPUT_DISALLOWED_TOOLS,
|
||||||
maxTurns: process.env.INPUT_MAX_TURNS,
|
maxTurns: process.env.INPUT_MAX_TURNS,
|
||||||
@@ -30,7 +32,8 @@ async function run() {
|
|||||||
appendSystemPrompt: process.env.INPUT_APPEND_SYSTEM_PROMPT,
|
appendSystemPrompt: process.env.INPUT_APPEND_SYSTEM_PROMPT,
|
||||||
claudeEnv: process.env.INPUT_CLAUDE_ENV,
|
claudeEnv: process.env.INPUT_CLAUDE_ENV,
|
||||||
fallbackModel: process.env.INPUT_FALLBACK_MODEL,
|
fallbackModel: process.env.INPUT_FALLBACK_MODEL,
|
||||||
streamConfig: process.env.INPUT_STREAM_CONFIG,
|
model: process.env.ANTHROPIC_MODEL,
|
||||||
|
timeoutMinutes: process.env.INPUT_TIMEOUT_MINUTES,
|
||||||
});
|
});
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
core.setFailed(`Action failed with error: ${error}`);
|
core.setFailed(`Action failed with error: ${error}`);
|
||||||
|
|||||||
366
base-action/src/run-claude-core.ts
Normal file
366
base-action/src/run-claude-core.ts
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
#!/usr/bin/env bun
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import { exec } from "child_process";
|
||||||
|
import { promisify } from "util";
|
||||||
|
import { unlink, writeFile, stat } from "fs/promises";
|
||||||
|
import { createWriteStream } from "fs";
|
||||||
|
import { spawn } from "child_process";
|
||||||
|
|
||||||
|
const execAsync = promisify(exec);
|
||||||
|
|
||||||
|
const PIPE_PATH = `${process.env.RUNNER_TEMP}/claude_prompt_pipe`;
|
||||||
|
const EXECUTION_FILE = `${process.env.RUNNER_TEMP}/claude-execution-output.json`;
|
||||||
|
const BASE_ARGS = ["-p", "--verbose", "--output-format", "stream-json"];
|
||||||
|
|
||||||
|
export type ClaudeOptions = {
|
||||||
|
allowedTools?: string;
|
||||||
|
disallowedTools?: string;
|
||||||
|
maxTurns?: string;
|
||||||
|
mcpConfig?: string;
|
||||||
|
systemPrompt?: string;
|
||||||
|
appendSystemPrompt?: string;
|
||||||
|
claudeEnv?: string;
|
||||||
|
fallbackModel?: string;
|
||||||
|
model?: string;
|
||||||
|
timeoutMinutes?: string;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type RunClaudeConfig = {
|
||||||
|
promptFile: string;
|
||||||
|
settings?: string;
|
||||||
|
allowedTools?: string;
|
||||||
|
disallowedTools?: string;
|
||||||
|
maxTurns?: string;
|
||||||
|
mcpConfig?: string;
|
||||||
|
systemPrompt?: string;
|
||||||
|
appendSystemPrompt?: string;
|
||||||
|
claudeEnv?: string;
|
||||||
|
fallbackModel?: string;
|
||||||
|
model?: string;
|
||||||
|
timeoutMinutes?: string;
|
||||||
|
env?: Record<string, string>;
|
||||||
|
};
|
||||||
|
|
||||||
|
function parseCustomEnvVars(claudeEnv?: string): Record<string, string> {
|
||||||
|
if (!claudeEnv || claudeEnv.trim() === "") {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const customEnv: Record<string, string> = {};
|
||||||
|
|
||||||
|
// Split by lines and parse each line as KEY: VALUE
|
||||||
|
const lines = claudeEnv.split("\n");
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
const trimmedLine = line.trim();
|
||||||
|
if (trimmedLine === "" || trimmedLine.startsWith("#")) {
|
||||||
|
continue; // Skip empty lines and comments
|
||||||
|
}
|
||||||
|
|
||||||
|
const colonIndex = trimmedLine.indexOf(":");
|
||||||
|
if (colonIndex === -1) {
|
||||||
|
continue; // Skip lines without colons
|
||||||
|
}
|
||||||
|
|
||||||
|
const key = trimmedLine.substring(0, colonIndex).trim();
|
||||||
|
const value = trimmedLine.substring(colonIndex + 1).trim();
|
||||||
|
|
||||||
|
if (key) {
|
||||||
|
customEnv[key] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return customEnv;
|
||||||
|
}
|
||||||
|
|
||||||
|
function prepareClaudeArgs(config: RunClaudeConfig): string[] {
|
||||||
|
const claudeArgs = [...BASE_ARGS];
|
||||||
|
|
||||||
|
if (config.allowedTools) {
|
||||||
|
claudeArgs.push("--allowedTools", config.allowedTools);
|
||||||
|
}
|
||||||
|
if (config.disallowedTools) {
|
||||||
|
claudeArgs.push("--disallowedTools", config.disallowedTools);
|
||||||
|
}
|
||||||
|
if (config.maxTurns) {
|
||||||
|
const maxTurnsNum = parseInt(config.maxTurns, 10);
|
||||||
|
if (isNaN(maxTurnsNum) || maxTurnsNum <= 0) {
|
||||||
|
throw new Error(
|
||||||
|
`maxTurns must be a positive number, got: ${config.maxTurns}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
claudeArgs.push("--max-turns", config.maxTurns);
|
||||||
|
}
|
||||||
|
if (config.mcpConfig) {
|
||||||
|
claudeArgs.push("--mcp-config", config.mcpConfig);
|
||||||
|
}
|
||||||
|
if (config.systemPrompt) {
|
||||||
|
claudeArgs.push("--system-prompt", config.systemPrompt);
|
||||||
|
}
|
||||||
|
if (config.appendSystemPrompt) {
|
||||||
|
claudeArgs.push("--append-system-prompt", config.appendSystemPrompt);
|
||||||
|
}
|
||||||
|
if (config.fallbackModel) {
|
||||||
|
claudeArgs.push("--fallback-model", config.fallbackModel);
|
||||||
|
}
|
||||||
|
if (config.model) {
|
||||||
|
claudeArgs.push("--model", config.model);
|
||||||
|
}
|
||||||
|
if (config.timeoutMinutes) {
|
||||||
|
const timeoutMinutesNum = parseInt(config.timeoutMinutes, 10);
|
||||||
|
if (isNaN(timeoutMinutesNum) || timeoutMinutesNum <= 0) {
|
||||||
|
throw new Error(
|
||||||
|
`timeoutMinutes must be a positive number, got: ${config.timeoutMinutes}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return claudeArgs;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function prepareRunConfig(
|
||||||
|
promptPath: string,
|
||||||
|
options: ClaudeOptions,
|
||||||
|
): { claudeArgs: string[]; promptPath: string; env: Record<string, string> } {
|
||||||
|
const config: RunClaudeConfig = {
|
||||||
|
promptFile: promptPath,
|
||||||
|
...options,
|
||||||
|
};
|
||||||
|
|
||||||
|
const claudeArgs = prepareClaudeArgs(config);
|
||||||
|
const customEnv = parseCustomEnvVars(config.claudeEnv);
|
||||||
|
const mergedEnv = {
|
||||||
|
...customEnv,
|
||||||
|
...(config.env || {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
claudeArgs,
|
||||||
|
promptPath,
|
||||||
|
env: mergedEnv,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function runClaudeCore(config: RunClaudeConfig) {
|
||||||
|
const claudeArgs = prepareClaudeArgs(config);
|
||||||
|
|
||||||
|
// Parse custom environment variables from claudeEnv
|
||||||
|
const customEnv = parseCustomEnvVars(config.claudeEnv);
|
||||||
|
|
||||||
|
// Merge with additional env vars passed in config
|
||||||
|
const mergedEnv = {
|
||||||
|
...customEnv,
|
||||||
|
...(config.env || {}),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create a named pipe
|
||||||
|
try {
|
||||||
|
await unlink(PIPE_PATH);
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore if file doesn't exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the named pipe
|
||||||
|
await execAsync(`mkfifo "${PIPE_PATH}"`);
|
||||||
|
|
||||||
|
// Log prompt file size
|
||||||
|
let promptSize = "unknown";
|
||||||
|
try {
|
||||||
|
const stats = await stat(config.promptFile);
|
||||||
|
promptSize = stats.size.toString();
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore error
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`Prompt file size: ${promptSize} bytes`);
|
||||||
|
|
||||||
|
// Log custom environment variables if any
|
||||||
|
const totalEnvVars = Object.keys(mergedEnv).length;
|
||||||
|
if (totalEnvVars > 0) {
|
||||||
|
const envKeys = Object.keys(mergedEnv).join(", ");
|
||||||
|
console.log(`Custom environment variables (${totalEnvVars}): ${envKeys}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output to console
|
||||||
|
console.log(`Running Claude with prompt from file: ${config.promptFile}`);
|
||||||
|
|
||||||
|
// Start sending prompt to pipe in background
|
||||||
|
const catProcess = spawn("cat", [config.promptFile], {
|
||||||
|
stdio: ["ignore", "pipe", "inherit"],
|
||||||
|
});
|
||||||
|
const pipeStream = createWriteStream(PIPE_PATH);
|
||||||
|
catProcess.stdout.pipe(pipeStream);
|
||||||
|
|
||||||
|
catProcess.on("error", (error) => {
|
||||||
|
console.error("Error reading prompt file:", error);
|
||||||
|
pipeStream.destroy();
|
||||||
|
});
|
||||||
|
|
||||||
|
const claudeProcess = spawn("claude", claudeArgs, {
|
||||||
|
stdio: ["pipe", "pipe", "inherit"],
|
||||||
|
env: {
|
||||||
|
...process.env,
|
||||||
|
...mergedEnv,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle Claude process errors
|
||||||
|
claudeProcess.on("error", (error) => {
|
||||||
|
console.error("Error spawning Claude process:", error);
|
||||||
|
pipeStream.destroy();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Capture output for parsing execution metrics
|
||||||
|
let output = "";
|
||||||
|
claudeProcess.stdout.on("data", (data) => {
|
||||||
|
const text = data.toString();
|
||||||
|
|
||||||
|
// Try to parse as JSON and pretty print if it's on a single line
|
||||||
|
const lines = text.split("\n");
|
||||||
|
lines.forEach((line: string, index: number) => {
|
||||||
|
if (line.trim() === "") return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Check if this line is a JSON object
|
||||||
|
const parsed = JSON.parse(line);
|
||||||
|
const prettyJson = JSON.stringify(parsed, null, 2);
|
||||||
|
process.stdout.write(prettyJson);
|
||||||
|
if (index < lines.length - 1 || text.endsWith("\n")) {
|
||||||
|
process.stdout.write("\n");
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Not a JSON object, print as is
|
||||||
|
process.stdout.write(line);
|
||||||
|
if (index < lines.length - 1 || text.endsWith("\n")) {
|
||||||
|
process.stdout.write("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
output += text;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle stdout errors
|
||||||
|
claudeProcess.stdout.on("error", (error) => {
|
||||||
|
console.error("Error reading Claude stdout:", error);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Pipe from named pipe to Claude
|
||||||
|
const pipeProcess = spawn("cat", [PIPE_PATH]);
|
||||||
|
pipeProcess.stdout.pipe(claudeProcess.stdin);
|
||||||
|
|
||||||
|
// Handle pipe process errors
|
||||||
|
pipeProcess.on("error", (error) => {
|
||||||
|
console.error("Error reading from named pipe:", error);
|
||||||
|
claudeProcess.kill("SIGTERM");
|
||||||
|
});
|
||||||
|
|
||||||
|
// Wait for Claude to finish with timeout
|
||||||
|
let timeoutMs = 10 * 60 * 1000; // Default 10 minutes
|
||||||
|
if (config.timeoutMinutes) {
|
||||||
|
timeoutMs = parseInt(config.timeoutMinutes, 10) * 60 * 1000;
|
||||||
|
} else if (process.env.INPUT_TIMEOUT_MINUTES) {
|
||||||
|
const envTimeout = parseInt(process.env.INPUT_TIMEOUT_MINUTES, 10);
|
||||||
|
if (isNaN(envTimeout) || envTimeout <= 0) {
|
||||||
|
throw new Error(
|
||||||
|
`INPUT_TIMEOUT_MINUTES must be a positive number, got: ${process.env.INPUT_TIMEOUT_MINUTES}`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
timeoutMs = envTimeout * 60 * 1000;
|
||||||
|
}
|
||||||
|
const exitCode = await new Promise<number>((resolve) => {
|
||||||
|
let resolved = false;
|
||||||
|
|
||||||
|
// Set a timeout for the process
|
||||||
|
const timeoutId = setTimeout(() => {
|
||||||
|
if (!resolved) {
|
||||||
|
console.error(
|
||||||
|
`Claude process timed out after ${timeoutMs / 1000} seconds`,
|
||||||
|
);
|
||||||
|
claudeProcess.kill("SIGTERM");
|
||||||
|
// Give it 5 seconds to terminate gracefully, then force kill
|
||||||
|
setTimeout(() => {
|
||||||
|
try {
|
||||||
|
claudeProcess.kill("SIGKILL");
|
||||||
|
} catch (e) {
|
||||||
|
// Process may already be dead
|
||||||
|
}
|
||||||
|
}, 5000);
|
||||||
|
resolved = true;
|
||||||
|
resolve(124); // Standard timeout exit code
|
||||||
|
}
|
||||||
|
}, timeoutMs);
|
||||||
|
|
||||||
|
claudeProcess.on("close", (code) => {
|
||||||
|
if (!resolved) {
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
resolved = true;
|
||||||
|
resolve(code || 0);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
claudeProcess.on("error", (error) => {
|
||||||
|
if (!resolved) {
|
||||||
|
console.error("Claude process error:", error);
|
||||||
|
clearTimeout(timeoutId);
|
||||||
|
resolved = true;
|
||||||
|
resolve(1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Clean up processes
|
||||||
|
try {
|
||||||
|
catProcess.kill("SIGTERM");
|
||||||
|
} catch (e) {
|
||||||
|
// Process may already be dead
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
pipeProcess.kill("SIGTERM");
|
||||||
|
} catch (e) {
|
||||||
|
// Process may already be dead
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean up pipe file
|
||||||
|
try {
|
||||||
|
await unlink(PIPE_PATH);
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore errors during cleanup
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set conclusion based on exit code
|
||||||
|
if (exitCode === 0) {
|
||||||
|
// Try to process the output and save execution metrics
|
||||||
|
try {
|
||||||
|
await writeFile("output.txt", output);
|
||||||
|
|
||||||
|
// Process output.txt into JSON and save to execution file
|
||||||
|
const { stdout: jsonOutput } = await execAsync("jq -s '.' output.txt");
|
||||||
|
await writeFile(EXECUTION_FILE, jsonOutput);
|
||||||
|
|
||||||
|
console.log(`Log saved to ${EXECUTION_FILE}`);
|
||||||
|
} catch (e) {
|
||||||
|
core.warning(`Failed to process output for execution metrics: ${e}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
core.setOutput("conclusion", "success");
|
||||||
|
core.setOutput("execution_file", EXECUTION_FILE);
|
||||||
|
} else {
|
||||||
|
core.setOutput("conclusion", "failure");
|
||||||
|
|
||||||
|
// Still try to save execution file if we have output
|
||||||
|
if (output) {
|
||||||
|
try {
|
||||||
|
await writeFile("output.txt", output);
|
||||||
|
const { stdout: jsonOutput } = await execAsync("jq -s '.' output.txt");
|
||||||
|
await writeFile(EXECUTION_FILE, jsonOutput);
|
||||||
|
core.setOutput("execution_file", EXECUTION_FILE);
|
||||||
|
} catch (e) {
|
||||||
|
// Ignore errors when processing output during failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
process.exit(exitCode);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,452 +1,44 @@
|
|||||||
|
#!/usr/bin/env bun
|
||||||
|
|
||||||
import * as core from "@actions/core";
|
import * as core from "@actions/core";
|
||||||
import { exec } from "child_process";
|
import { preparePrompt } from "./prepare-prompt";
|
||||||
import { promisify } from "util";
|
import { runClaudeCore } from "./run-claude-core";
|
||||||
import { unlink, writeFile, stat } from "fs/promises";
|
export { prepareRunConfig, type ClaudeOptions } from "./run-claude-core";
|
||||||
import { createWriteStream } from "fs";
|
import { setupClaudeCodeSettings } from "./setup-claude-code-settings";
|
||||||
import { spawn } from "child_process";
|
import { validateEnvironmentVariables } from "./validate-env";
|
||||||
import { StreamHandler } from "./stream-handler";
|
|
||||||
|
|
||||||
const execAsync = promisify(exec);
|
async function run() {
|
||||||
|
|
||||||
const PIPE_PATH = `${process.env.RUNNER_TEMP}/claude_prompt_pipe`;
|
|
||||||
const EXECUTION_FILE = `${process.env.RUNNER_TEMP}/claude-execution-output.json`;
|
|
||||||
const BASE_ARGS = ["-p", "--verbose", "--output-format", "stream-json"];
|
|
||||||
|
|
||||||
export type ClaudeOptions = {
|
|
||||||
allowedTools?: string;
|
|
||||||
disallowedTools?: string;
|
|
||||||
maxTurns?: string;
|
|
||||||
mcpConfig?: string;
|
|
||||||
systemPrompt?: string;
|
|
||||||
appendSystemPrompt?: string;
|
|
||||||
claudeEnv?: string;
|
|
||||||
fallbackModel?: string;
|
|
||||||
timeoutMinutes?: string;
|
|
||||||
streamConfig?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
export type StreamConfig = {
|
|
||||||
progress_endpoint?: string;
|
|
||||||
headers?: Record<string, string>;
|
|
||||||
resume_endpoint?: string;
|
|
||||||
session_id?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
type PreparedConfig = {
|
|
||||||
claudeArgs: string[];
|
|
||||||
promptPath: string;
|
|
||||||
env: Record<string, string>;
|
|
||||||
};
|
|
||||||
|
|
||||||
function parseCustomEnvVars(claudeEnv?: string): Record<string, string> {
|
|
||||||
if (!claudeEnv || claudeEnv.trim() === "") {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
const customEnv: Record<string, string> = {};
|
|
||||||
|
|
||||||
// Split by lines and parse each line as KEY: VALUE
|
|
||||||
const lines = claudeEnv.split("\n");
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
const trimmedLine = line.trim();
|
|
||||||
if (trimmedLine === "" || trimmedLine.startsWith("#")) {
|
|
||||||
continue; // Skip empty lines and comments
|
|
||||||
}
|
|
||||||
|
|
||||||
const colonIndex = trimmedLine.indexOf(":");
|
|
||||||
if (colonIndex === -1) {
|
|
||||||
continue; // Skip lines without colons
|
|
||||||
}
|
|
||||||
|
|
||||||
const key = trimmedLine.substring(0, colonIndex).trim();
|
|
||||||
const value = trimmedLine.substring(colonIndex + 1).trim();
|
|
||||||
|
|
||||||
if (key) {
|
|
||||||
customEnv[key] = value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return customEnv;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function prepareRunConfig(
|
|
||||||
promptPath: string,
|
|
||||||
options: ClaudeOptions,
|
|
||||||
): PreparedConfig {
|
|
||||||
const claudeArgs = [...BASE_ARGS];
|
|
||||||
|
|
||||||
if (options.allowedTools) {
|
|
||||||
claudeArgs.push("--allowedTools", options.allowedTools);
|
|
||||||
}
|
|
||||||
if (options.disallowedTools) {
|
|
||||||
claudeArgs.push("--disallowedTools", options.disallowedTools);
|
|
||||||
}
|
|
||||||
if (options.maxTurns) {
|
|
||||||
const maxTurnsNum = parseInt(options.maxTurns, 10);
|
|
||||||
if (isNaN(maxTurnsNum) || maxTurnsNum <= 0) {
|
|
||||||
throw new Error(
|
|
||||||
`maxTurns must be a positive number, got: ${options.maxTurns}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
claudeArgs.push("--max-turns", options.maxTurns);
|
|
||||||
}
|
|
||||||
if (options.mcpConfig) {
|
|
||||||
claudeArgs.push("--mcp-config", options.mcpConfig);
|
|
||||||
}
|
|
||||||
if (options.systemPrompt) {
|
|
||||||
claudeArgs.push("--system-prompt", options.systemPrompt);
|
|
||||||
}
|
|
||||||
if (options.appendSystemPrompt) {
|
|
||||||
claudeArgs.push("--append-system-prompt", options.appendSystemPrompt);
|
|
||||||
}
|
|
||||||
if (options.fallbackModel) {
|
|
||||||
claudeArgs.push("--fallback-model", options.fallbackModel);
|
|
||||||
}
|
|
||||||
if (options.timeoutMinutes) {
|
|
||||||
const timeoutMinutesNum = parseInt(options.timeoutMinutes, 10);
|
|
||||||
if (isNaN(timeoutMinutesNum) || timeoutMinutesNum <= 0) {
|
|
||||||
throw new Error(
|
|
||||||
`timeoutMinutes must be a positive number, got: ${options.timeoutMinutes}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Parse stream config for session_id and resume_endpoint
|
|
||||||
if (options.streamConfig) {
|
|
||||||
try {
|
|
||||||
const streamConfig: StreamConfig = JSON.parse(options.streamConfig);
|
|
||||||
// Add --session-id if session_id is provided
|
|
||||||
if (streamConfig.session_id) {
|
|
||||||
claudeArgs.push("--session-id", streamConfig.session_id);
|
|
||||||
}
|
|
||||||
// Only add --teleport if we have both session_id AND resume_endpoint
|
|
||||||
if (streamConfig.session_id && streamConfig.resume_endpoint) {
|
|
||||||
claudeArgs.push("--teleport", streamConfig.session_id);
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.error("Failed to parse stream_config JSON:", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse custom environment variables
|
|
||||||
const customEnv = parseCustomEnvVars(options.claudeEnv);
|
|
||||||
|
|
||||||
return {
|
|
||||||
claudeArgs,
|
|
||||||
promptPath,
|
|
||||||
env: customEnv,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
export async function runClaude(promptPath: string, options: ClaudeOptions) {
|
|
||||||
const config = prepareRunConfig(promptPath, options);
|
|
||||||
|
|
||||||
// Set up streaming if endpoint is provided in stream config
|
|
||||||
let streamHandler: StreamHandler | null = null;
|
|
||||||
let streamConfig: StreamConfig | null = null;
|
|
||||||
if (options.streamConfig) {
|
|
||||||
try {
|
|
||||||
streamConfig = JSON.parse(options.streamConfig);
|
|
||||||
if (streamConfig?.progress_endpoint) {
|
|
||||||
const customHeaders = streamConfig.headers || {};
|
|
||||||
console.log("parsed headers", customHeaders);
|
|
||||||
Object.keys(customHeaders).forEach((key) => {
|
|
||||||
console.log(`Custom header: ${key} = ${customHeaders[key]}`);
|
|
||||||
});
|
|
||||||
streamHandler = new StreamHandler(
|
|
||||||
streamConfig.progress_endpoint,
|
|
||||||
customHeaders,
|
|
||||||
);
|
|
||||||
console.log(`Streaming output to: ${streamConfig.progress_endpoint}`);
|
|
||||||
if (Object.keys(customHeaders).length > 0) {
|
|
||||||
console.log(
|
|
||||||
`Custom streaming headers: ${Object.keys(customHeaders).join(", ")}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.error("Failed to parse stream_config JSON:", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a named pipe
|
|
||||||
try {
|
try {
|
||||||
await unlink(PIPE_PATH);
|
validateEnvironmentVariables();
|
||||||
} catch (e) {
|
|
||||||
// Ignore if file doesn't exist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the named pipe
|
await setupClaudeCodeSettings(process.env.INPUT_SETTINGS);
|
||||||
await execAsync(`mkfifo "${PIPE_PATH}"`);
|
|
||||||
|
|
||||||
// Log prompt file size
|
const promptConfig = await preparePrompt({
|
||||||
let promptSize = "unknown";
|
prompt: process.env.INPUT_PROMPT || "",
|
||||||
try {
|
promptFile: process.env.INPUT_PROMPT_FILE || "",
|
||||||
const stats = await stat(config.promptPath);
|
|
||||||
promptSize = stats.size.toString();
|
|
||||||
} catch (e) {
|
|
||||||
// Ignore error
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Prompt file size: ${promptSize} bytes`);
|
|
||||||
|
|
||||||
// Log custom environment variables if any
|
|
||||||
if (Object.keys(config.env).length > 0) {
|
|
||||||
const envKeys = Object.keys(config.env).join(", ");
|
|
||||||
console.log(`Custom environment variables: ${envKeys}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output to console
|
|
||||||
console.log(`Running Claude with prompt from file: ${config.promptPath}`);
|
|
||||||
|
|
||||||
// Start sending prompt to pipe in background
|
|
||||||
const catProcess = spawn("cat", [config.promptPath], {
|
|
||||||
stdio: ["ignore", "pipe", "inherit"],
|
|
||||||
});
|
|
||||||
const pipeStream = createWriteStream(PIPE_PATH);
|
|
||||||
catProcess.stdout.pipe(pipeStream);
|
|
||||||
|
|
||||||
catProcess.on("error", (error) => {
|
|
||||||
console.error("Error reading prompt file:", error);
|
|
||||||
pipeStream.destroy();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Prepare environment variables
|
|
||||||
const processEnv = {
|
|
||||||
...process.env,
|
|
||||||
...config.env,
|
|
||||||
};
|
|
||||||
|
|
||||||
// If both session_id and resume_endpoint are provided, set environment variables
|
|
||||||
if (streamConfig?.session_id && streamConfig?.resume_endpoint) {
|
|
||||||
processEnv.TELEPORT_RESUME_URL = streamConfig.resume_endpoint;
|
|
||||||
console.log(
|
|
||||||
`Setting TELEPORT_RESUME_URL to: ${streamConfig.resume_endpoint}`,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (streamConfig.headers && Object.keys(streamConfig.headers).length > 0) {
|
|
||||||
processEnv.TELEPORT_HEADERS = JSON.stringify(streamConfig.headers);
|
|
||||||
console.log(`Setting TELEPORT_HEADERS for resume endpoint`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log the full Claude command being executed
|
|
||||||
console.log(`Running Claude with args: ${config.claudeArgs.join(" ")}`);
|
|
||||||
|
|
||||||
const claudeProcess = spawn("claude", config.claudeArgs, {
|
|
||||||
stdio: ["pipe", "pipe", "inherit"],
|
|
||||||
env: processEnv,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Handle Claude process errors
|
|
||||||
claudeProcess.on("error", (error) => {
|
|
||||||
console.error("Error spawning Claude process:", error);
|
|
||||||
pipeStream.destroy();
|
|
||||||
});
|
|
||||||
|
|
||||||
// Capture output for parsing execution metrics
|
|
||||||
let output = "";
|
|
||||||
let lineBuffer = ""; // Buffer for incomplete lines
|
|
||||||
|
|
||||||
claudeProcess.stdout.on("data", async (data) => {
|
|
||||||
const text = data.toString();
|
|
||||||
output += text;
|
|
||||||
|
|
||||||
// Add new data to line buffer
|
|
||||||
lineBuffer += text;
|
|
||||||
|
|
||||||
// Split into lines - the last element might be incomplete
|
|
||||||
const lines = lineBuffer.split("\n");
|
|
||||||
|
|
||||||
// The last element is either empty (if text ended with \n) or incomplete
|
|
||||||
lineBuffer = lines.pop() || "";
|
|
||||||
|
|
||||||
// Process complete lines
|
|
||||||
for (let index = 0; index < lines.length; index++) {
|
|
||||||
const line = lines[index];
|
|
||||||
if (!line || line.trim() === "") continue;
|
|
||||||
|
|
||||||
// Try to parse as JSON and pretty print if it's on a single line
|
|
||||||
try {
|
|
||||||
// Check if this line is a JSON object
|
|
||||||
const parsed = JSON.parse(line);
|
|
||||||
const prettyJson = JSON.stringify(parsed, null, 2);
|
|
||||||
process.stdout.write(prettyJson);
|
|
||||||
process.stdout.write("\n");
|
|
||||||
|
|
||||||
// Send valid JSON to stream handler if available
|
|
||||||
if (streamHandler) {
|
|
||||||
try {
|
|
||||||
// Send the original line (which is valid JSON) with newline for proper splitting
|
|
||||||
const dataToSend = line + "\n";
|
|
||||||
await streamHandler.addOutput(dataToSend);
|
|
||||||
} catch (error) {
|
|
||||||
core.warning(`Failed to stream output: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
// Not a JSON object, print as is
|
|
||||||
process.stdout.write(line);
|
|
||||||
process.stdout.write("\n");
|
|
||||||
// Don't send non-JSON lines to stream handler
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Handle stdout errors
|
|
||||||
claudeProcess.stdout.on("error", (error) => {
|
|
||||||
console.error("Error reading Claude stdout:", error);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Pipe from named pipe to Claude
|
|
||||||
const pipeProcess = spawn("cat", [PIPE_PATH]);
|
|
||||||
pipeProcess.stdout.pipe(claudeProcess.stdin);
|
|
||||||
|
|
||||||
// Handle pipe process errors
|
|
||||||
pipeProcess.on("error", (error) => {
|
|
||||||
console.error("Error reading from named pipe:", error);
|
|
||||||
claudeProcess.kill("SIGTERM");
|
|
||||||
});
|
|
||||||
|
|
||||||
// Wait for Claude to finish with timeout
|
|
||||||
let timeoutMs = 10 * 60 * 1000; // Default 10 minutes
|
|
||||||
if (options.timeoutMinutes) {
|
|
||||||
timeoutMs = parseInt(options.timeoutMinutes, 10) * 60 * 1000;
|
|
||||||
} else if (process.env.INPUT_TIMEOUT_MINUTES) {
|
|
||||||
const envTimeout = parseInt(process.env.INPUT_TIMEOUT_MINUTES, 10);
|
|
||||||
if (isNaN(envTimeout) || envTimeout <= 0) {
|
|
||||||
throw new Error(
|
|
||||||
`INPUT_TIMEOUT_MINUTES must be a positive number, got: ${process.env.INPUT_TIMEOUT_MINUTES}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
timeoutMs = envTimeout * 60 * 1000;
|
|
||||||
}
|
|
||||||
const exitCode = await new Promise<number>((resolve) => {
|
|
||||||
let resolved = false;
|
|
||||||
|
|
||||||
// Set a timeout for the process
|
|
||||||
const timeoutId = setTimeout(() => {
|
|
||||||
if (!resolved) {
|
|
||||||
console.error(
|
|
||||||
`Claude process timed out after ${timeoutMs / 1000} seconds`,
|
|
||||||
);
|
|
||||||
claudeProcess.kill("SIGTERM");
|
|
||||||
// Give it 5 seconds to terminate gracefully, then force kill
|
|
||||||
setTimeout(() => {
|
|
||||||
try {
|
|
||||||
claudeProcess.kill("SIGKILL");
|
|
||||||
} catch (e) {
|
|
||||||
// Process may already be dead
|
|
||||||
}
|
|
||||||
}, 5000);
|
|
||||||
resolved = true;
|
|
||||||
resolve(124); // Standard timeout exit code
|
|
||||||
}
|
|
||||||
}, timeoutMs);
|
|
||||||
|
|
||||||
claudeProcess.on("close", async (code) => {
|
|
||||||
if (!resolved) {
|
|
||||||
// Process any remaining data in the line buffer
|
|
||||||
if (lineBuffer.trim()) {
|
|
||||||
// Try to parse and print the remaining line
|
|
||||||
try {
|
|
||||||
const parsed = JSON.parse(lineBuffer);
|
|
||||||
const prettyJson = JSON.stringify(parsed, null, 2);
|
|
||||||
process.stdout.write(prettyJson);
|
|
||||||
process.stdout.write("\n");
|
|
||||||
|
|
||||||
// Send valid JSON to stream handler if available
|
|
||||||
if (streamHandler) {
|
|
||||||
try {
|
|
||||||
const dataToSend = lineBuffer + "\n";
|
|
||||||
await streamHandler.addOutput(dataToSend);
|
|
||||||
} catch (error) {
|
|
||||||
core.warning(`Failed to stream final output: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
process.stdout.write(lineBuffer);
|
|
||||||
process.stdout.write("\n");
|
|
||||||
// Don't send non-JSON lines to stream handler
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
clearTimeout(timeoutId);
|
|
||||||
resolved = true;
|
|
||||||
resolve(code || 0);
|
|
||||||
}
|
|
||||||
});
|
});
|
||||||
|
|
||||||
claudeProcess.on("error", (error) => {
|
await runClaudeCore({
|
||||||
if (!resolved) {
|
promptFile: promptConfig.path,
|
||||||
console.error("Claude process error:", error);
|
settings: process.env.INPUT_SETTINGS,
|
||||||
clearTimeout(timeoutId);
|
allowedTools: process.env.INPUT_ALLOWED_TOOLS,
|
||||||
resolved = true;
|
disallowedTools: process.env.INPUT_DISALLOWED_TOOLS,
|
||||||
resolve(1);
|
maxTurns: process.env.INPUT_MAX_TURNS,
|
||||||
}
|
mcpConfig: process.env.INPUT_MCP_CONFIG,
|
||||||
|
systemPrompt: process.env.INPUT_SYSTEM_PROMPT,
|
||||||
|
appendSystemPrompt: process.env.INPUT_APPEND_SYSTEM_PROMPT,
|
||||||
|
claudeEnv: process.env.INPUT_CLAUDE_ENV,
|
||||||
|
fallbackModel: process.env.INPUT_FALLBACK_MODEL,
|
||||||
|
model: process.env.ANTHROPIC_MODEL,
|
||||||
|
timeoutMinutes: process.env.INPUT_TIMEOUT_MINUTES,
|
||||||
});
|
});
|
||||||
});
|
} catch (error) {
|
||||||
|
core.setFailed(`Action failed with error: ${error}`);
|
||||||
// Clean up streaming
|
|
||||||
if (streamHandler) {
|
|
||||||
try {
|
|
||||||
await streamHandler.close();
|
|
||||||
} catch (error) {
|
|
||||||
core.warning(`Failed to close stream handler: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up processes
|
|
||||||
try {
|
|
||||||
catProcess.kill("SIGTERM");
|
|
||||||
} catch (e) {
|
|
||||||
// Process may already be dead
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
pipeProcess.kill("SIGTERM");
|
|
||||||
} catch (e) {
|
|
||||||
// Process may already be dead
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clean up pipe file
|
|
||||||
try {
|
|
||||||
await unlink(PIPE_PATH);
|
|
||||||
} catch (e) {
|
|
||||||
// Ignore errors during cleanup
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set conclusion based on exit code
|
|
||||||
if (exitCode === 0) {
|
|
||||||
// Try to process the output and save execution metrics
|
|
||||||
try {
|
|
||||||
await writeFile("output.txt", output);
|
|
||||||
|
|
||||||
// Process output.txt into JSON and save to execution file
|
|
||||||
const { stdout: jsonOutput } = await execAsync("jq -s '.' output.txt");
|
|
||||||
await writeFile(EXECUTION_FILE, jsonOutput);
|
|
||||||
|
|
||||||
console.log(`Log saved to ${EXECUTION_FILE}`);
|
|
||||||
} catch (e) {
|
|
||||||
core.warning(`Failed to process output for execution metrics: ${e}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
core.setOutput("conclusion", "success");
|
|
||||||
core.setOutput("execution_file", EXECUTION_FILE);
|
|
||||||
} else {
|
|
||||||
core.setOutput("conclusion", "failure");
|
core.setOutput("conclusion", "failure");
|
||||||
|
process.exit(1);
|
||||||
// Still try to save execution file if we have output
|
|
||||||
if (output) {
|
|
||||||
try {
|
|
||||||
await writeFile("output.txt", output);
|
|
||||||
const { stdout: jsonOutput } = await execAsync("jq -s '.' output.txt");
|
|
||||||
await writeFile(EXECUTION_FILE, jsonOutput);
|
|
||||||
core.setOutput("execution_file", EXECUTION_FILE);
|
|
||||||
} catch (e) {
|
|
||||||
// Ignore errors when processing output during failure
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
process.exit(exitCode);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (import.meta.main) {
|
||||||
|
run();
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,152 +0,0 @@
|
|||||||
import * as core from "@actions/core";
|
|
||||||
|
|
||||||
export function parseStreamHeaders(
|
|
||||||
headersInput?: string,
|
|
||||||
): Record<string, string> {
|
|
||||||
if (!headersInput || headersInput.trim() === "") {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
return JSON.parse(headersInput);
|
|
||||||
} catch (e) {
|
|
||||||
console.error("Failed to parse stream headers as JSON:", e);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export type TokenGetter = (audience: string) => Promise<string>;
|
|
||||||
|
|
||||||
export class StreamHandler {
|
|
||||||
private endpoint: string;
|
|
||||||
private customHeaders: Record<string, string>;
|
|
||||||
private tokenGetter: TokenGetter;
|
|
||||||
private token: string | null = null;
|
|
||||||
private tokenFetchTime: number = 0;
|
|
||||||
private buffer: string[] = [];
|
|
||||||
private flushTimer: NodeJS.Timeout | null = null;
|
|
||||||
private isClosed = false;
|
|
||||||
|
|
||||||
private readonly TOKEN_LIFETIME_MS = 4 * 60 * 1000; // 4 minutes
|
|
||||||
private readonly BATCH_SIZE = 10;
|
|
||||||
private readonly BATCH_TIMEOUT_MS = 1000;
|
|
||||||
private readonly REQUEST_TIMEOUT_MS = 5000;
|
|
||||||
|
|
||||||
constructor(
|
|
||||||
endpoint: string,
|
|
||||||
customHeaders: Record<string, string> = {},
|
|
||||||
tokenGetter?: TokenGetter,
|
|
||||||
) {
|
|
||||||
this.endpoint = endpoint;
|
|
||||||
this.customHeaders = customHeaders;
|
|
||||||
this.tokenGetter = tokenGetter || ((audience) => core.getIDToken(audience));
|
|
||||||
}
|
|
||||||
|
|
||||||
async addOutput(data: string): Promise<void> {
|
|
||||||
if (this.isClosed) return;
|
|
||||||
|
|
||||||
// Split by newlines and add to buffer
|
|
||||||
const lines = data.split("\n").filter((line) => line.length > 0);
|
|
||||||
this.buffer.push(...lines);
|
|
||||||
|
|
||||||
// Check if we should flush
|
|
||||||
if (this.buffer.length >= this.BATCH_SIZE) {
|
|
||||||
await this.flush();
|
|
||||||
} else {
|
|
||||||
// Set or reset the timer
|
|
||||||
this.resetFlushTimer();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private resetFlushTimer(): void {
|
|
||||||
if (this.flushTimer) {
|
|
||||||
clearTimeout(this.flushTimer);
|
|
||||||
}
|
|
||||||
this.flushTimer = setTimeout(() => {
|
|
||||||
this.flush().catch((err) => {
|
|
||||||
core.warning(`Failed to flush stream buffer: ${err}`);
|
|
||||||
});
|
|
||||||
}, this.BATCH_TIMEOUT_MS);
|
|
||||||
}
|
|
||||||
|
|
||||||
private async getToken(): Promise<string> {
|
|
||||||
const now = Date.now();
|
|
||||||
|
|
||||||
// Check if we need a new token
|
|
||||||
if (!this.token || now - this.tokenFetchTime >= this.TOKEN_LIFETIME_MS) {
|
|
||||||
try {
|
|
||||||
this.token = await this.tokenGetter("claude-code-github-action");
|
|
||||||
this.tokenFetchTime = now;
|
|
||||||
core.debug("Fetched new OIDC token for streaming");
|
|
||||||
} catch (error) {
|
|
||||||
throw new Error(`Failed to get OIDC token: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return this.token;
|
|
||||||
}
|
|
||||||
|
|
||||||
private async flush(): Promise<void> {
|
|
||||||
if (this.buffer.length === 0) return;
|
|
||||||
|
|
||||||
// Clear the flush timer
|
|
||||||
if (this.flushTimer) {
|
|
||||||
clearTimeout(this.flushTimer);
|
|
||||||
this.flushTimer = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the current buffer and clear it
|
|
||||||
const output = [...this.buffer];
|
|
||||||
this.buffer = [];
|
|
||||||
|
|
||||||
try {
|
|
||||||
const token = await this.getToken();
|
|
||||||
|
|
||||||
const payload = {
|
|
||||||
timestamp: new Date().toISOString(),
|
|
||||||
output: output,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create an AbortController for timeout
|
|
||||||
const controller = new AbortController();
|
|
||||||
const timeoutId = setTimeout(
|
|
||||||
() => controller.abort(),
|
|
||||||
this.REQUEST_TIMEOUT_MS,
|
|
||||||
);
|
|
||||||
|
|
||||||
try {
|
|
||||||
await fetch(this.endpoint, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
Authorization: `Bearer ${token}`,
|
|
||||||
...this.customHeaders,
|
|
||||||
},
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
signal: controller.signal,
|
|
||||||
});
|
|
||||||
} finally {
|
|
||||||
clearTimeout(timeoutId);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Log but don't throw - we don't want to interrupt Claude's execution
|
|
||||||
core.warning(`Failed to stream output: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async close(): Promise<void> {
|
|
||||||
// Clear any pending timer
|
|
||||||
if (this.flushTimer) {
|
|
||||||
clearTimeout(this.flushTimer);
|
|
||||||
this.flushTimer = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush any remaining output
|
|
||||||
if (this.buffer.length > 0) {
|
|
||||||
await this.flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark as closed after flushing
|
|
||||||
this.isClosed = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
import { describe, it, expect } from "bun:test";
|
|
||||||
import { prepareRunConfig } from "../src/run-claude";
|
|
||||||
|
|
||||||
describe("resume endpoint functionality", () => {
|
|
||||||
it("should add --teleport flag when both session_id and resume_endpoint are provided", () => {
|
|
||||||
const streamConfig = JSON.stringify({
|
|
||||||
session_id: "12345",
|
|
||||||
resume_endpoint: "https://example.com/resume/12345",
|
|
||||||
});
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
streamConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(config.claudeArgs).toContain("--teleport");
|
|
||||||
expect(config.claudeArgs).toContain("12345");
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should not add --teleport flag when no streamConfig is provided", () => {
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
allowedTools: "Edit",
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(config.claudeArgs).not.toContain("--teleport");
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should not add --teleport flag when only session_id is provided without resume_endpoint", () => {
|
|
||||||
const streamConfig = JSON.stringify({
|
|
||||||
session_id: "12345",
|
|
||||||
// No resume_endpoint
|
|
||||||
});
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
streamConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(config.claudeArgs).not.toContain("--teleport");
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should not add --teleport flag when only resume_endpoint is provided without session_id", () => {
|
|
||||||
const streamConfig = JSON.stringify({
|
|
||||||
resume_endpoint: "https://example.com/resume/12345",
|
|
||||||
// No session_id
|
|
||||||
});
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
streamConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(config.claudeArgs).not.toContain("--teleport");
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should maintain order of arguments with session_id", () => {
|
|
||||||
const streamConfig = JSON.stringify({
|
|
||||||
session_id: "12345",
|
|
||||||
resume_endpoint: "https://example.com/resume/12345",
|
|
||||||
});
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
allowedTools: "Edit",
|
|
||||||
streamConfig,
|
|
||||||
maxTurns: "5",
|
|
||||||
});
|
|
||||||
|
|
||||||
const teleportIndex = config.claudeArgs.indexOf("--teleport");
|
|
||||||
const maxTurnsIndex = config.claudeArgs.indexOf("--max-turns");
|
|
||||||
|
|
||||||
expect(teleportIndex).toBeGreaterThan(-1);
|
|
||||||
expect(maxTurnsIndex).toBeGreaterThan(-1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle progress_endpoint and headers in streamConfig", () => {
|
|
||||||
const streamConfig = JSON.stringify({
|
|
||||||
progress_endpoint: "https://example.com/progress",
|
|
||||||
headers: { "X-Test": "value" },
|
|
||||||
});
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
streamConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
// This test just verifies parsing doesn't fail - actual streaming logic
|
|
||||||
// is tested elsewhere as it requires environment setup
|
|
||||||
expect(config.claudeArgs).toBeDefined();
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle session_id with resume_endpoint and headers", () => {
|
|
||||||
const streamConfig = JSON.stringify({
|
|
||||||
session_id: "abc123",
|
|
||||||
resume_endpoint: "https://example.com/resume/abc123",
|
|
||||||
headers: { Authorization: "Bearer token" },
|
|
||||||
progress_endpoint: "https://example.com/progress",
|
|
||||||
});
|
|
||||||
const config = prepareRunConfig("/path/to/prompt", {
|
|
||||||
streamConfig,
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(config.claudeArgs).toContain("--teleport");
|
|
||||||
expect(config.claudeArgs).toContain("abc123");
|
|
||||||
// Note: Environment variable setup (TELEPORT_RESUME_URL, TELEPORT_HEADERS) is tested in integration tests
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -1,364 +0,0 @@
|
|||||||
import { describe, it, expect, beforeEach, mock } from "bun:test";
|
|
||||||
import {
|
|
||||||
StreamHandler,
|
|
||||||
parseStreamHeaders,
|
|
||||||
type TokenGetter,
|
|
||||||
} from "../src/stream-handler";
|
|
||||||
|
|
||||||
describe("parseStreamHeaders", () => {
|
|
||||||
it("should return empty object for empty input", () => {
|
|
||||||
expect(parseStreamHeaders("")).toEqual({});
|
|
||||||
expect(parseStreamHeaders(undefined)).toEqual({});
|
|
||||||
expect(parseStreamHeaders(" ")).toEqual({});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should parse single header", () => {
|
|
||||||
const result = parseStreamHeaders('{"X-Correlation-Id": "12345"}');
|
|
||||||
expect(result).toEqual({ "X-Correlation-Id": "12345" });
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should parse multiple headers", () => {
|
|
||||||
const headers = JSON.stringify({
|
|
||||||
"X-Correlation-Id": "12345",
|
|
||||||
"X-Custom-Header": "custom-value",
|
|
||||||
Authorization: "Bearer token123",
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = parseStreamHeaders(headers);
|
|
||||||
expect(result).toEqual({
|
|
||||||
"X-Correlation-Id": "12345",
|
|
||||||
"X-Custom-Header": "custom-value",
|
|
||||||
Authorization: "Bearer token123",
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle headers with spaces", () => {
|
|
||||||
const headers = JSON.stringify({
|
|
||||||
"X-Header-One": "value with spaces",
|
|
||||||
"X-Header-Two": "another value",
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = parseStreamHeaders(headers);
|
|
||||||
expect(result).toEqual({
|
|
||||||
"X-Header-One": "value with spaces",
|
|
||||||
"X-Header-Two": "another value",
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should skip empty lines and comments", () => {
|
|
||||||
const headers = JSON.stringify({
|
|
||||||
"X-Header-One": "value1",
|
|
||||||
"X-Header-Two": "value2",
|
|
||||||
"X-Header-Three": "value3",
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = parseStreamHeaders(headers);
|
|
||||||
expect(result).toEqual({
|
|
||||||
"X-Header-One": "value1",
|
|
||||||
"X-Header-Two": "value2",
|
|
||||||
"X-Header-Three": "value3",
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should skip lines without colons", () => {
|
|
||||||
const headers = JSON.stringify({
|
|
||||||
"X-Header-One": "value1",
|
|
||||||
"X-Header-Two": "value2",
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = parseStreamHeaders(headers);
|
|
||||||
expect(result).toEqual({
|
|
||||||
"X-Header-One": "value1",
|
|
||||||
"X-Header-Two": "value2",
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle headers with colons in values", () => {
|
|
||||||
const headers = JSON.stringify({
|
|
||||||
"X-URL": "https://example.com:8080/path",
|
|
||||||
"X-Time": "10:30:45",
|
|
||||||
});
|
|
||||||
|
|
||||||
const result = parseStreamHeaders(headers);
|
|
||||||
expect(result).toEqual({
|
|
||||||
"X-URL": "https://example.com:8080/path",
|
|
||||||
"X-Time": "10:30:45",
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("StreamHandler", () => {
|
|
||||||
let handler: StreamHandler;
|
|
||||||
let mockFetch: ReturnType<typeof mock>;
|
|
||||||
let mockTokenGetter: TokenGetter;
|
|
||||||
const mockEndpoint = "https://test.example.com/stream";
|
|
||||||
const mockToken = "mock-oidc-token";
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
// Mock fetch
|
|
||||||
mockFetch = mock(() => Promise.resolve({ ok: true }));
|
|
||||||
global.fetch = mockFetch as any;
|
|
||||||
|
|
||||||
// Mock token getter
|
|
||||||
mockTokenGetter = mock(() => Promise.resolve(mockToken));
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("basic functionality", () => {
|
|
||||||
it("should batch lines up to BATCH_SIZE", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// Add 9 lines (less than batch size of 10)
|
|
||||||
for (let i = 1; i <= 9; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not have sent anything yet
|
|
||||||
expect(mockFetch).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Add the 10th line to trigger flush
|
|
||||||
await handler.addOutput("line 10\n");
|
|
||||||
|
|
||||||
// Should have sent the batch
|
|
||||||
expect(mockFetch).toHaveBeenCalledTimes(1);
|
|
||||||
expect(mockFetch).toHaveBeenCalledWith(mockEndpoint, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
Authorization: `Bearer ${mockToken}`,
|
|
||||||
},
|
|
||||||
body: expect.stringContaining(
|
|
||||||
'"output":["line 1","line 2","line 3","line 4","line 5","line 6","line 7","line 8","line 9","line 10"]',
|
|
||||||
),
|
|
||||||
signal: expect.any(AbortSignal),
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should flush on timeout", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// Add a few lines
|
|
||||||
await handler.addOutput("line 1\n");
|
|
||||||
await handler.addOutput("line 2\n");
|
|
||||||
|
|
||||||
// Should not have sent anything yet
|
|
||||||
expect(mockFetch).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Wait for the timeout to trigger
|
|
||||||
await new Promise((resolve) => setTimeout(resolve, 1100));
|
|
||||||
|
|
||||||
// Should have sent the batch
|
|
||||||
expect(mockFetch).toHaveBeenCalledTimes(1);
|
|
||||||
const call = mockFetch.mock.calls[0];
|
|
||||||
expect(call).toBeDefined();
|
|
||||||
const body = JSON.parse(call![1].body);
|
|
||||||
expect(body.output).toEqual(["line 1", "line 2"]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should include custom headers", async () => {
|
|
||||||
const customHeaders = {
|
|
||||||
"X-Correlation-Id": "12345",
|
|
||||||
"X-Custom": "value",
|
|
||||||
};
|
|
||||||
handler = new StreamHandler(mockEndpoint, customHeaders, mockTokenGetter);
|
|
||||||
|
|
||||||
// Trigger a batch
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(mockFetch).toHaveBeenCalledWith(mockEndpoint, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
Authorization: `Bearer ${mockToken}`,
|
|
||||||
"X-Correlation-Id": "12345",
|
|
||||||
"X-Custom": "value",
|
|
||||||
},
|
|
||||||
body: expect.any(String),
|
|
||||||
signal: expect.any(AbortSignal),
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should include timestamp in payload", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
const beforeTime = new Date().toISOString();
|
|
||||||
|
|
||||||
// Trigger a batch
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const afterTime = new Date().toISOString();
|
|
||||||
|
|
||||||
const call = mockFetch.mock.calls[0];
|
|
||||||
expect(call).toBeDefined();
|
|
||||||
const body = JSON.parse(call![1].body);
|
|
||||||
|
|
||||||
expect(body).toHaveProperty("timestamp");
|
|
||||||
expect(new Date(body.timestamp).toISOString()).toBe(body.timestamp);
|
|
||||||
expect(body.timestamp >= beforeTime).toBe(true);
|
|
||||||
expect(body.timestamp <= afterTime).toBe(true);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("token management", () => {
|
|
||||||
it("should fetch token on first request", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// Trigger a flush
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
expect(mockTokenGetter).toHaveBeenCalledWith("claude-code-github-action");
|
|
||||||
expect(mockTokenGetter).toHaveBeenCalledTimes(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should reuse token within 4 minutes", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// First batch
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second batch immediately (within 4 minutes)
|
|
||||||
for (let i = 11; i <= 20; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should have only fetched token once
|
|
||||||
expect(mockTokenGetter).toHaveBeenCalledTimes(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle token fetch errors", async () => {
|
|
||||||
const errorTokenGetter = mock(() =>
|
|
||||||
Promise.reject(new Error("Token fetch failed")),
|
|
||||||
);
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, errorTokenGetter);
|
|
||||||
|
|
||||||
// Try to send data
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not have made fetch request
|
|
||||||
expect(mockFetch).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("error handling", () => {
|
|
||||||
it("should handle fetch errors gracefully", async () => {
|
|
||||||
mockFetch.mockImplementation(() =>
|
|
||||||
Promise.reject(new Error("Network error")),
|
|
||||||
);
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// Send data - should not throw
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should have attempted to fetch
|
|
||||||
expect(mockFetch).toHaveBeenCalledTimes(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should continue processing after errors", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// First batch - make it fail
|
|
||||||
let callCount = 0;
|
|
||||||
mockFetch.mockImplementation(() => {
|
|
||||||
callCount++;
|
|
||||||
if (callCount === 1) {
|
|
||||||
return Promise.reject(new Error("First batch failed"));
|
|
||||||
}
|
|
||||||
return Promise.resolve({ ok: true });
|
|
||||||
});
|
|
||||||
|
|
||||||
for (let i = 1; i <= 10; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Second batch - should work
|
|
||||||
for (let i = 11; i <= 20; i++) {
|
|
||||||
await handler.addOutput(`line ${i}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should have attempted both batches
|
|
||||||
expect(mockFetch).toHaveBeenCalledTimes(2);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("close functionality", () => {
|
|
||||||
it("should flush remaining data on close", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
// Add some data but not enough to trigger batch
|
|
||||||
await handler.addOutput("line 1\n");
|
|
||||||
await handler.addOutput("line 2\n");
|
|
||||||
|
|
||||||
expect(mockFetch).not.toHaveBeenCalled();
|
|
||||||
|
|
||||||
// Close should flush
|
|
||||||
await handler.close();
|
|
||||||
|
|
||||||
expect(mockFetch).toHaveBeenCalledTimes(1);
|
|
||||||
const call = mockFetch.mock.calls[0];
|
|
||||||
expect(call).toBeDefined();
|
|
||||||
const body = JSON.parse(call![1].body);
|
|
||||||
expect(body.output).toEqual(["line 1", "line 2"]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should not accept new data after close", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
await handler.close();
|
|
||||||
|
|
||||||
// Try to add data after close
|
|
||||||
await handler.addOutput("should not be sent\n");
|
|
||||||
|
|
||||||
// Should not have sent anything
|
|
||||||
expect(mockFetch).not.toHaveBeenCalled();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe("data handling", () => {
|
|
||||||
it("should filter out empty lines", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
await handler.addOutput("line 1\n\n\nline 2\n\n");
|
|
||||||
await handler.close();
|
|
||||||
|
|
||||||
const call = mockFetch.mock.calls[0];
|
|
||||||
expect(call).toBeDefined();
|
|
||||||
const body = JSON.parse(call![1].body);
|
|
||||||
expect(body.output).toEqual(["line 1", "line 2"]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle data without newlines", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
await handler.addOutput("single line");
|
|
||||||
await handler.close();
|
|
||||||
|
|
||||||
const call = mockFetch.mock.calls[0];
|
|
||||||
expect(call).toBeDefined();
|
|
||||||
const body = JSON.parse(call![1].body);
|
|
||||||
expect(body.output).toEqual(["single line"]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should handle multi-line input correctly", async () => {
|
|
||||||
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
|
|
||||||
|
|
||||||
await handler.addOutput("line 1\nline 2\nline 3");
|
|
||||||
await handler.close();
|
|
||||||
|
|
||||||
const call = mockFetch.mock.calls[0];
|
|
||||||
expect(call).toBeDefined();
|
|
||||||
const body = JSON.parse(call![1].body);
|
|
||||||
expect(body.output).toEqual(["line 1", "line 2", "line 3"]);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
@@ -6,8 +6,8 @@ echo "Installing git hooks..."
|
|||||||
# Make sure hooks directory exists
|
# Make sure hooks directory exists
|
||||||
mkdir -p .git/hooks
|
mkdir -p .git/hooks
|
||||||
|
|
||||||
# Install pre-push hook
|
# Install pre-commit hook
|
||||||
cp scripts/pre-push .git/hooks/pre-push
|
cp scripts/pre-commit .git/hooks/pre-commit
|
||||||
chmod +x .git/hooks/pre-push
|
chmod +x .git/hooks/pre-commit
|
||||||
|
|
||||||
echo "Git hooks installed successfully!"
|
echo "Git hooks installed successfully!"
|
||||||
@@ -60,8 +60,6 @@ export function buildAllowedToolsString(
|
|||||||
"Bash(git diff:*)",
|
"Bash(git diff:*)",
|
||||||
"Bash(git log:*)",
|
"Bash(git log:*)",
|
||||||
"Bash(git rm:*)",
|
"Bash(git rm:*)",
|
||||||
"Bash(git config user.name:*)",
|
|
||||||
"Bash(git config user.email:*)",
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -81,44 +79,6 @@ export function buildAllowedToolsString(
|
|||||||
return allAllowedTools;
|
return allAllowedTools;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Specialized allowed tools string for remote agent mode
|
|
||||||
* Always uses MCP commit signing and excludes dangerous git commands
|
|
||||||
*/
|
|
||||||
export function buildRemoteAgentAllowedToolsString(
|
|
||||||
customAllowedTools?: string[],
|
|
||||||
includeActionsTools: boolean = false,
|
|
||||||
): string {
|
|
||||||
let baseTools = [...BASE_ALLOWED_TOOLS];
|
|
||||||
|
|
||||||
// Always include the comment update tool from the comment server
|
|
||||||
baseTools.push("mcp__github_comment__update_claude_comment");
|
|
||||||
|
|
||||||
// Remote agent mode always uses MCP commit signing
|
|
||||||
baseTools.push(
|
|
||||||
"mcp__github_file_ops__commit_files",
|
|
||||||
"mcp__github_file_ops__delete_files",
|
|
||||||
);
|
|
||||||
|
|
||||||
// Add safe git tools only (read-only operations)
|
|
||||||
baseTools.push("Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)");
|
|
||||||
|
|
||||||
// Add GitHub Actions MCP tools if enabled
|
|
||||||
if (includeActionsTools) {
|
|
||||||
baseTools.push(
|
|
||||||
"mcp__github_ci__get_ci_status",
|
|
||||||
"mcp__github_ci__get_workflow_run_details",
|
|
||||||
"mcp__github_ci__download_job_log",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let allAllowedTools = baseTools.join(",");
|
|
||||||
if (customAllowedTools && customAllowedTools.length > 0) {
|
|
||||||
allAllowedTools = `${allAllowedTools},${customAllowedTools.join(",")}`;
|
|
||||||
}
|
|
||||||
return allAllowedTools;
|
|
||||||
}
|
|
||||||
|
|
||||||
export function buildDisallowedToolsString(
|
export function buildDisallowedToolsString(
|
||||||
customDisallowedTools?: string[],
|
customDisallowedTools?: string[],
|
||||||
allowedTools?: string[],
|
allowedTools?: string[],
|
||||||
@@ -851,12 +811,18 @@ f. If you are unable to complete certain steps, such as running a linter or test
|
|||||||
return promptContent;
|
return promptContent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export type CreatePromptResult = {
|
||||||
|
promptFile: string;
|
||||||
|
allowedTools: string;
|
||||||
|
disallowedTools: string;
|
||||||
|
};
|
||||||
|
|
||||||
export async function createPrompt(
|
export async function createPrompt(
|
||||||
mode: Mode,
|
mode: Mode,
|
||||||
modeContext: ModeContext,
|
modeContext: ModeContext,
|
||||||
githubData: FetchDataResult,
|
githubData: FetchDataResult,
|
||||||
context: ParsedGitHubContext,
|
context: ParsedGitHubContext,
|
||||||
) {
|
): Promise<CreatePromptResult> {
|
||||||
try {
|
try {
|
||||||
// Prepare the context for prompt generation
|
// Prepare the context for prompt generation
|
||||||
let claudeCommentId: string = "";
|
let claudeCommentId: string = "";
|
||||||
@@ -928,8 +894,17 @@ export async function createPrompt(
|
|||||||
combinedAllowedTools,
|
combinedAllowedTools,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// TODO: Remove these environment variable exports once modes are updated to use return values
|
||||||
core.exportVariable("ALLOWED_TOOLS", allAllowedTools);
|
core.exportVariable("ALLOWED_TOOLS", allAllowedTools);
|
||||||
core.exportVariable("DISALLOWED_TOOLS", allDisallowedTools);
|
core.exportVariable("DISALLOWED_TOOLS", allDisallowedTools);
|
||||||
|
|
||||||
|
const promptFile = `${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`;
|
||||||
|
|
||||||
|
return {
|
||||||
|
promptFile,
|
||||||
|
allowedTools: allAllowedTools,
|
||||||
|
disallowedTools: allDisallowedTools,
|
||||||
|
};
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
core.setFailed(`Create prompt failed with error: ${error}`);
|
core.setFailed(`Create prompt failed with error: ${error}`);
|
||||||
process.exit(1);
|
process.exit(1);
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import { createOctokit } from "../github/api/client";
|
|||||||
import { parseGitHubContext, isEntityContext } from "../github/context";
|
import { parseGitHubContext, isEntityContext } from "../github/context";
|
||||||
import { getMode, isValidMode, DEFAULT_MODE } from "../modes/registry";
|
import { getMode, isValidMode, DEFAULT_MODE } from "../modes/registry";
|
||||||
import type { ModeName } from "../modes/types";
|
import type { ModeName } from "../modes/types";
|
||||||
|
import { prepare } from "../prepare";
|
||||||
|
|
||||||
async function run() {
|
async function run() {
|
||||||
try {
|
try {
|
||||||
@@ -59,19 +60,7 @@ async function run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Step 4: Get mode and check trigger conditions
|
// Step 4: Get mode and check trigger conditions
|
||||||
let mode;
|
const mode = getMode(validatedMode, context);
|
||||||
|
|
||||||
// TEMPORARY HACK: Always use remote-agent mode for repository_dispatch events
|
|
||||||
// This ensures backward compatibility while we transition
|
|
||||||
if (context.eventName === "repository_dispatch") {
|
|
||||||
console.log(
|
|
||||||
"🔧 TEMPORARY HACK: Forcing remote-agent mode for repository_dispatch event",
|
|
||||||
);
|
|
||||||
mode = getMode("remote-agent", context);
|
|
||||||
} else {
|
|
||||||
mode = getMode(context.inputs.mode, context);
|
|
||||||
}
|
|
||||||
|
|
||||||
const containsTrigger = mode.shouldTrigger(context);
|
const containsTrigger = mode.shouldTrigger(context);
|
||||||
|
|
||||||
// Set output for action.yml to check
|
// Set output for action.yml to check
|
||||||
@@ -83,9 +72,10 @@ async function run() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Step 5: Use the new modular prepare function
|
// Step 5: Use the new modular prepare function
|
||||||
const result = await mode.prepare({
|
const result = await prepare({
|
||||||
context,
|
context,
|
||||||
octokit,
|
octokit,
|
||||||
|
mode,
|
||||||
githubToken,
|
githubToken,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -1,118 +0,0 @@
|
|||||||
#!/usr/bin/env bun
|
|
||||||
|
|
||||||
import * as core from "@actions/core";
|
|
||||||
import { reportClaudeComplete } from "../modes/remote-agent/system-progress-handler";
|
|
||||||
import type { SystemProgressConfig } from "../modes/remote-agent/progress-types";
|
|
||||||
import type { StreamConfig } from "../types/stream-config";
|
|
||||||
import { commitUncommittedChanges } from "../github/utils/git-common-utils";
|
|
||||||
|
|
||||||
async function run() {
|
|
||||||
try {
|
|
||||||
// Only run if we're in remote-agent mode
|
|
||||||
const mode = process.env.MODE;
|
|
||||||
if (mode !== "remote-agent") {
|
|
||||||
console.log(
|
|
||||||
"Not in remote-agent mode, skipping Claude completion reporting",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we have stream config with system progress endpoint
|
|
||||||
const streamConfigStr = process.env.STREAM_CONFIG;
|
|
||||||
if (!streamConfigStr) {
|
|
||||||
console.log(
|
|
||||||
"No stream config available, skipping Claude completion reporting",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let streamConfig: StreamConfig;
|
|
||||||
try {
|
|
||||||
streamConfig = JSON.parse(streamConfigStr);
|
|
||||||
} catch (e) {
|
|
||||||
console.error("Failed to parse stream config:", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!streamConfig.system_progress_endpoint) {
|
|
||||||
console.log(
|
|
||||||
"No system progress endpoint in stream config, skipping Claude completion reporting",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the system progress config
|
|
||||||
const systemProgressConfig: SystemProgressConfig = {
|
|
||||||
endpoint: streamConfig.system_progress_endpoint,
|
|
||||||
headers: streamConfig.headers || {},
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get the OIDC token from Authorization header
|
|
||||||
const authHeader = systemProgressConfig.headers?.["Authorization"];
|
|
||||||
if (!authHeader || !authHeader.startsWith("Bearer ")) {
|
|
||||||
console.error("No valid Authorization header in stream config");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
const oidcToken = authHeader.substring(7); // Remove "Bearer " prefix
|
|
||||||
|
|
||||||
// Get Claude execution status
|
|
||||||
const claudeConclusion = process.env.CLAUDE_CONCLUSION || "failure";
|
|
||||||
const exitCode = claudeConclusion === "success" ? 0 : 1;
|
|
||||||
|
|
||||||
// Calculate duration if possible
|
|
||||||
const startTime = process.env.CLAUDE_START_TIME;
|
|
||||||
let durationMs = 0;
|
|
||||||
if (startTime) {
|
|
||||||
durationMs = Date.now() - parseInt(startTime, 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report Claude completion
|
|
||||||
console.log(
|
|
||||||
`Reporting Claude completion: exitCode=${exitCode}, duration=${durationMs}ms`,
|
|
||||||
);
|
|
||||||
reportClaudeComplete(systemProgressConfig, oidcToken, exitCode, durationMs);
|
|
||||||
|
|
||||||
// Ensure that uncommitted changes are committed
|
|
||||||
const claudeBranch = process.env.CLAUDE_BRANCH;
|
|
||||||
const useCommitSigning = process.env.USE_COMMIT_SIGNING === "true";
|
|
||||||
const githubToken = process.env.GITHUB_TOKEN;
|
|
||||||
|
|
||||||
// Parse repository from GITHUB_REPOSITORY (format: owner/repo)
|
|
||||||
const repository = process.env.GITHUB_REPOSITORY;
|
|
||||||
if (!repository) {
|
|
||||||
console.log("No GITHUB_REPOSITORY available, skipping branch cleanup");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const [repoOwner, repoName] = repository.split("/");
|
|
||||||
|
|
||||||
if (claudeBranch && githubToken && repoOwner && repoName) {
|
|
||||||
console.log(`Checking for uncommitted changes in remote-agent mode...`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
const commitResult = await commitUncommittedChanges(
|
|
||||||
repoOwner,
|
|
||||||
repoName,
|
|
||||||
claudeBranch,
|
|
||||||
useCommitSigning,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (commitResult) {
|
|
||||||
console.log(`Committed uncommitted changes: ${commitResult.sha}`);
|
|
||||||
} else {
|
|
||||||
console.log("No uncommitted changes found");
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Don't fail the action if commit fails
|
|
||||||
core.warning(`Failed to commit changes: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Don't fail the action if reporting fails
|
|
||||||
core.warning(`Failed to report Claude completion: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (import.meta.main) {
|
|
||||||
run();
|
|
||||||
}
|
|
||||||
145
src/entrypoints/run.ts
Normal file
145
src/entrypoints/run.ts
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
#!/usr/bin/env bun
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unified entrypoint that combines prepare and run-claude steps
|
||||||
|
*/
|
||||||
|
|
||||||
|
import * as core from "@actions/core";
|
||||||
|
import { setupGitHubToken } from "../github/token";
|
||||||
|
import { checkWritePermissions } from "../github/validation/permissions";
|
||||||
|
import { createOctokit } from "../github/api/client";
|
||||||
|
import { parseGitHubContext, isEntityContext } from "../github/context";
|
||||||
|
import { getMode, isValidMode, DEFAULT_MODE } from "../modes/registry";
|
||||||
|
import type { ModeName } from "../modes/types";
|
||||||
|
import { prepare } from "../prepare";
|
||||||
|
import { runClaudeCore } from "../../base-action/src/run-claude-core";
|
||||||
|
import { validateEnvironmentVariables } from "../../base-action/src/validate-env";
|
||||||
|
import { setupClaudeCodeSettings } from "../../base-action/src/setup-claude-code-settings";
|
||||||
|
|
||||||
|
async function run() {
|
||||||
|
try {
|
||||||
|
// Step 1: Get mode first to determine authentication method
|
||||||
|
const modeInput = process.env.MODE || DEFAULT_MODE;
|
||||||
|
|
||||||
|
// Validate mode input
|
||||||
|
if (!isValidMode(modeInput)) {
|
||||||
|
throw new Error(`Invalid mode: ${modeInput}`);
|
||||||
|
}
|
||||||
|
const validatedMode: ModeName = modeInput;
|
||||||
|
|
||||||
|
// Step 2: Setup GitHub token based on mode
|
||||||
|
let githubToken: string;
|
||||||
|
if (validatedMode === "experimental-review") {
|
||||||
|
// For experimental-review mode, use the default GitHub Action token
|
||||||
|
githubToken = process.env.DEFAULT_WORKFLOW_TOKEN || "";
|
||||||
|
if (!githubToken) {
|
||||||
|
throw new Error(
|
||||||
|
"DEFAULT_WORKFLOW_TOKEN not found for experimental-review mode",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
console.log("Using default GitHub Action token for review mode");
|
||||||
|
} else {
|
||||||
|
// For other modes, use the existing token exchange
|
||||||
|
githubToken = await setupGitHubToken();
|
||||||
|
}
|
||||||
|
const octokit = createOctokit(githubToken);
|
||||||
|
|
||||||
|
// Step 3: Parse GitHub context (once for all operations)
|
||||||
|
const context = parseGitHubContext();
|
||||||
|
|
||||||
|
// Step 4: Check write permissions (only for entity contexts)
|
||||||
|
if (isEntityContext(context)) {
|
||||||
|
const hasWritePermissions = await checkWritePermissions(
|
||||||
|
octokit.rest,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
if (!hasWritePermissions) {
|
||||||
|
throw new Error(
|
||||||
|
"Actor does not have write permissions to the repository",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 5: Get mode and check trigger conditions
|
||||||
|
const mode = getMode(validatedMode, context);
|
||||||
|
const containsTrigger = mode.shouldTrigger(context);
|
||||||
|
|
||||||
|
// Set output for action.yml to check (in case it's still needed)
|
||||||
|
core.setOutput("contains_trigger", containsTrigger.toString());
|
||||||
|
|
||||||
|
if (!containsTrigger) {
|
||||||
|
console.log("No trigger found, skipping remaining steps");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 6: Use the modular prepare function
|
||||||
|
const prepareResult = await prepare({
|
||||||
|
context,
|
||||||
|
octokit,
|
||||||
|
mode,
|
||||||
|
githubToken,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set critical outputs immediately after prepare completes
|
||||||
|
// This ensures they're available for cleanup even if Claude fails
|
||||||
|
core.setOutput("GITHUB_TOKEN", githubToken);
|
||||||
|
core.setOutput("mcp_config", prepareResult.mcpConfig);
|
||||||
|
if (prepareResult.branchInfo.claudeBranch) {
|
||||||
|
core.setOutput("branch_name", prepareResult.branchInfo.claudeBranch);
|
||||||
|
core.setOutput("CLAUDE_BRANCH", prepareResult.branchInfo.claudeBranch);
|
||||||
|
}
|
||||||
|
core.setOutput("BASE_BRANCH", prepareResult.branchInfo.baseBranch);
|
||||||
|
if (prepareResult.commentId) {
|
||||||
|
core.setOutput("claude_comment_id", prepareResult.commentId.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 7: The mode.prepare() call already created the prompt and set up tools
|
||||||
|
// We need to get the allowed/disallowed tools from environment variables
|
||||||
|
// TODO: Update Mode interface to return tools from prepare() instead of relying on env vars
|
||||||
|
const allowedTools = process.env.ALLOWED_TOOLS || "";
|
||||||
|
const disallowedTools = process.env.DISALLOWED_TOOLS || "";
|
||||||
|
const promptFile = `${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`;
|
||||||
|
|
||||||
|
// Step 8: Validate environment and setup Claude settings
|
||||||
|
validateEnvironmentVariables();
|
||||||
|
await setupClaudeCodeSettings(process.env.SETTINGS);
|
||||||
|
|
||||||
|
// Step 9: Run Claude Code
|
||||||
|
console.log("Running Claude Code...");
|
||||||
|
|
||||||
|
// Build environment object to pass to Claude
|
||||||
|
const claudeEnvObject: Record<string, string> = {
|
||||||
|
GITHUB_TOKEN: githubToken,
|
||||||
|
NODE_VERSION: process.env.NODE_VERSION || "18.x",
|
||||||
|
DETAILED_PERMISSION_MESSAGES: "1",
|
||||||
|
CLAUDE_CODE_ACTION: "1",
|
||||||
|
};
|
||||||
|
|
||||||
|
await runClaudeCore({
|
||||||
|
promptFile,
|
||||||
|
settings: process.env.SETTINGS,
|
||||||
|
allowedTools,
|
||||||
|
disallowedTools,
|
||||||
|
maxTurns: process.env.MAX_TURNS,
|
||||||
|
mcpConfig: prepareResult.mcpConfig,
|
||||||
|
systemPrompt: "",
|
||||||
|
appendSystemPrompt: "",
|
||||||
|
claudeEnv: process.env.CLAUDE_ENV,
|
||||||
|
fallbackModel: process.env.FALLBACK_MODEL,
|
||||||
|
model: process.env.ANTHROPIC_MODEL || process.env.MODEL,
|
||||||
|
timeoutMinutes: process.env.TIMEOUT_MINUTES || "30",
|
||||||
|
env: claudeEnvObject,
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||||
|
core.setFailed(`Action failed with error: ${errorMessage}`);
|
||||||
|
// Also output the clean error message for the action to capture
|
||||||
|
core.setOutput("prepare_error", errorMessage);
|
||||||
|
core.setOutput("conclusion", "failure");
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (import.meta.main) {
|
||||||
|
run();
|
||||||
|
}
|
||||||
@@ -6,7 +6,6 @@ import type {
|
|||||||
PullRequestEvent,
|
PullRequestEvent,
|
||||||
PullRequestReviewEvent,
|
PullRequestReviewEvent,
|
||||||
PullRequestReviewCommentEvent,
|
PullRequestReviewCommentEvent,
|
||||||
RepositoryDispatchEvent,
|
|
||||||
} from "@octokit/webhooks-types";
|
} from "@octokit/webhooks-types";
|
||||||
// Custom types for GitHub Actions events that aren't webhooks
|
// Custom types for GitHub Actions events that aren't webhooks
|
||||||
export type WorkflowDispatchEvent = {
|
export type WorkflowDispatchEvent = {
|
||||||
@@ -47,11 +46,7 @@ const ENTITY_EVENT_NAMES = [
|
|||||||
"pull_request_review_comment",
|
"pull_request_review_comment",
|
||||||
] as const;
|
] as const;
|
||||||
|
|
||||||
const AUTOMATION_EVENT_NAMES = [
|
const AUTOMATION_EVENT_NAMES = ["workflow_dispatch", "schedule"] as const;
|
||||||
"workflow_dispatch",
|
|
||||||
"schedule",
|
|
||||||
"repository_dispatch",
|
|
||||||
] as const;
|
|
||||||
|
|
||||||
// Derive types from constants for better maintainability
|
// Derive types from constants for better maintainability
|
||||||
type EntityEventName = (typeof ENTITY_EVENT_NAMES)[number];
|
type EntityEventName = (typeof ENTITY_EVENT_NAMES)[number];
|
||||||
@@ -67,17 +62,6 @@ type BaseContext = {
|
|||||||
full_name: string;
|
full_name: string;
|
||||||
};
|
};
|
||||||
actor: string;
|
actor: string;
|
||||||
payload:
|
|
||||||
| IssuesEvent
|
|
||||||
| IssueCommentEvent
|
|
||||||
| PullRequestEvent
|
|
||||||
| PullRequestReviewEvent
|
|
||||||
| PullRequestReviewCommentEvent
|
|
||||||
| RepositoryDispatchEvent
|
|
||||||
| WorkflowDispatchEvent
|
|
||||||
| ScheduleEvent;
|
|
||||||
entityNumber?: number;
|
|
||||||
isPR?: boolean;
|
|
||||||
inputs: {
|
inputs: {
|
||||||
mode: ModeName;
|
mode: ModeName;
|
||||||
triggerPhrase: string;
|
triggerPhrase: string;
|
||||||
@@ -94,14 +78,6 @@ type BaseContext = {
|
|||||||
additionalPermissions: Map<string, string>;
|
additionalPermissions: Map<string, string>;
|
||||||
useCommitSigning: boolean;
|
useCommitSigning: boolean;
|
||||||
};
|
};
|
||||||
progressTracking?: {
|
|
||||||
headers?: Record<string, string>;
|
|
||||||
resumeEndpoint?: string;
|
|
||||||
sessionId?: string;
|
|
||||||
progressEndpoint: string;
|
|
||||||
systemProgressEndpoint?: string;
|
|
||||||
oauthTokenEndpoint?: string;
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Context for entity-based events (issues, PRs, comments)
|
// Context for entity-based events (issues, PRs, comments)
|
||||||
@@ -120,7 +96,7 @@ export type ParsedGitHubContext = BaseContext & {
|
|||||||
// Context for automation events (workflow_dispatch, schedule)
|
// Context for automation events (workflow_dispatch, schedule)
|
||||||
export type AutomationContext = BaseContext & {
|
export type AutomationContext = BaseContext & {
|
||||||
eventName: AutomationEventName;
|
eventName: AutomationEventName;
|
||||||
payload: WorkflowDispatchEvent | ScheduleEvent | RepositoryDispatchEvent;
|
payload: WorkflowDispatchEvent | ScheduleEvent;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Union type for all contexts
|
// Union type for all contexts
|
||||||
@@ -214,66 +190,6 @@ export function parseGitHubContext(): GitHubContext {
|
|||||||
isPR: true,
|
isPR: true,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
case "repository_dispatch": {
|
|
||||||
const payload = context.payload as RepositoryDispatchEvent;
|
|
||||||
// Extract task description from client_payload
|
|
||||||
const clientPayload = payload.client_payload as {
|
|
||||||
prompt?: string;
|
|
||||||
stream_endpoint?: string;
|
|
||||||
headers?: Record<string, string>;
|
|
||||||
resume_endpoint?: string;
|
|
||||||
session_id?: string;
|
|
||||||
endpoints?: {
|
|
||||||
resume?: string;
|
|
||||||
progress?: string;
|
|
||||||
system_progress?: string;
|
|
||||||
oauth_endpoint?: string;
|
|
||||||
};
|
|
||||||
overrideInputs?: {
|
|
||||||
model?: string;
|
|
||||||
base_branch?: string;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// Override directPrompt with the prompt
|
|
||||||
if (clientPayload.prompt) {
|
|
||||||
commonFields.inputs.directPrompt = clientPayload.prompt;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply input overrides
|
|
||||||
if (clientPayload.overrideInputs) {
|
|
||||||
if (clientPayload.overrideInputs.base_branch) {
|
|
||||||
commonFields.inputs.baseBranch =
|
|
||||||
clientPayload.overrideInputs.base_branch;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up progress tracking - prioritize endpoints object if available, fallback to individual fields
|
|
||||||
let progressTracking: ParsedGitHubContext["progressTracking"] = undefined;
|
|
||||||
|
|
||||||
if (clientPayload.endpoints?.progress || clientPayload.stream_endpoint) {
|
|
||||||
progressTracking = {
|
|
||||||
progressEndpoint:
|
|
||||||
clientPayload.endpoints?.progress ||
|
|
||||||
clientPayload.stream_endpoint ||
|
|
||||||
"",
|
|
||||||
headers: clientPayload.headers,
|
|
||||||
resumeEndpoint:
|
|
||||||
// clientPayload.endpoints?.resume || clientPayload.resume_endpoint,
|
|
||||||
clientPayload.resume_endpoint,
|
|
||||||
sessionId: clientPayload.session_id,
|
|
||||||
systemProgressEndpoint: clientPayload.endpoints?.system_progress,
|
|
||||||
oauthTokenEndpoint: clientPayload.endpoints?.oauth_endpoint,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
return {
|
|
||||||
...commonFields,
|
|
||||||
eventName: "repository_dispatch",
|
|
||||||
payload: payload,
|
|
||||||
progressTracking,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
case "workflow_dispatch": {
|
case "workflow_dispatch": {
|
||||||
return {
|
return {
|
||||||
...commonFields,
|
...commonFields,
|
||||||
@@ -371,9 +287,3 @@ export function isAutomationContext(
|
|||||||
context.eventName as AutomationEventName,
|
context.eventName as AutomationEventName,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isRepositoryDispatchEvent(
|
|
||||||
context: GitHubContext,
|
|
||||||
): context is GitHubContext & { payload: RepositoryDispatchEvent } {
|
|
||||||
return context.eventName === "repository_dispatch";
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
|
|
||||||
import { $ } from "bun";
|
import { $ } from "bun";
|
||||||
import * as core from "@actions/core";
|
import * as core from "@actions/core";
|
||||||
import type { GitHubContext } from "../context";
|
import type { ParsedGitHubContext } from "../context";
|
||||||
import type { GitHubPullRequest } from "../types";
|
import type { GitHubPullRequest } from "../types";
|
||||||
import type { Octokits } from "../api/client";
|
import type { Octokits } from "../api/client";
|
||||||
import type { FetchDataResult } from "../data/fetcher";
|
import type { FetchDataResult } from "../data/fetcher";
|
||||||
@@ -21,15 +21,15 @@ export type BranchInfo = {
|
|||||||
|
|
||||||
export async function setupBranch(
|
export async function setupBranch(
|
||||||
octokits: Octokits,
|
octokits: Octokits,
|
||||||
githubData: FetchDataResult | null,
|
githubData: FetchDataResult,
|
||||||
context: GitHubContext,
|
context: ParsedGitHubContext,
|
||||||
): Promise<BranchInfo> {
|
): Promise<BranchInfo> {
|
||||||
const { owner, repo } = context.repository;
|
const { owner, repo } = context.repository;
|
||||||
const entityNumber = context.entityNumber;
|
const entityNumber = context.entityNumber;
|
||||||
const { baseBranch, branchPrefix } = context.inputs;
|
const { baseBranch, branchPrefix } = context.inputs;
|
||||||
const isPR = context.isPR;
|
const isPR = context.isPR;
|
||||||
|
|
||||||
if (isPR && githubData) {
|
if (isPR) {
|
||||||
const prData = githubData.contextData as GitHubPullRequest;
|
const prData = githubData.contextData as GitHubPullRequest;
|
||||||
const prState = prData.state;
|
const prState = prData.state;
|
||||||
|
|
||||||
@@ -84,27 +84,19 @@ export async function setupBranch(
|
|||||||
sourceBranch = repoResponse.data.default_branch;
|
sourceBranch = repoResponse.data.default_branch;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate branch name for either an issue, closed/merged PR, or repository_dispatch event
|
// Generate branch name for either an issue or closed/merged PR
|
||||||
let branchName: string;
|
const entityType = isPR ? "pr" : "issue";
|
||||||
|
|
||||||
if (context.eventName === "repository_dispatch") {
|
// Create Kubernetes-compatible timestamp: lowercase, hyphens only, shorter format
|
||||||
// For repository_dispatch events, use run ID for uniqueness since there's no entity number
|
const now = new Date();
|
||||||
const now = new Date();
|
const timestamp = `${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, "0")}${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}${String(now.getMinutes()).padStart(2, "0")}`;
|
||||||
const timestamp = `${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, "0")}${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}${String(now.getMinutes()).padStart(2, "0")}`;
|
|
||||||
branchName = `${branchPrefix}dispatch-${context.runId}-${timestamp}`;
|
|
||||||
} else {
|
|
||||||
// For issues and PRs, use the existing logic
|
|
||||||
const entityType = isPR ? "pr" : "issue";
|
|
||||||
const now = new Date();
|
|
||||||
const timestamp = `${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, "0")}${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}${String(now.getMinutes()).padStart(2, "0")}`;
|
|
||||||
branchName = `${branchPrefix}${entityType}-${entityNumber}-${timestamp}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure branch name is Kubernetes-compatible:
|
// Ensure branch name is Kubernetes-compatible:
|
||||||
// - Lowercase only
|
// - Lowercase only
|
||||||
// - Alphanumeric with hyphens
|
// - Alphanumeric with hyphens
|
||||||
// - No underscores
|
// - No underscores
|
||||||
// - Max 50 chars (to allow for prefixes)
|
// - Max 50 chars (to allow for prefixes)
|
||||||
|
const branchName = `${branchPrefix}${entityType}-${entityNumber}-${timestamp}`;
|
||||||
const newBranch = branchName.toLowerCase().substring(0, 50);
|
const newBranch = branchName.toLowerCase().substring(0, 50);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@@ -140,18 +132,8 @@ export async function setupBranch(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For non-signing case, create and checkout the branch locally only
|
// For non-signing case, create and checkout the branch locally only
|
||||||
const entityType =
|
|
||||||
context.eventName === "repository_dispatch"
|
|
||||||
? "dispatch"
|
|
||||||
: isPR
|
|
||||||
? "pr"
|
|
||||||
: "issue";
|
|
||||||
const entityId =
|
|
||||||
context.eventName === "repository_dispatch"
|
|
||||||
? context.runId
|
|
||||||
: entityNumber!.toString();
|
|
||||||
console.log(
|
console.log(
|
||||||
`Creating local branch ${newBranch} for ${entityType} ${entityId} from source branch: ${sourceBranch}...`,
|
`Creating local branch ${newBranch} for ${entityType} #${entityNumber} from source branch: ${sourceBranch}...`,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Fetch and checkout the source branch first to ensure we branch from the correct base
|
// Fetch and checkout the source branch first to ensure we branch from the correct base
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import { $ } from "bun";
|
import { $ } from "bun";
|
||||||
import type { GitHubContext } from "../context";
|
import type { ParsedGitHubContext } from "../context";
|
||||||
import { GITHUB_SERVER_URL } from "../api/config";
|
import { GITHUB_SERVER_URL } from "../api/config";
|
||||||
|
|
||||||
type GitUser = {
|
type GitUser = {
|
||||||
@@ -16,7 +16,7 @@ type GitUser = {
|
|||||||
|
|
||||||
export async function configureGitAuth(
|
export async function configureGitAuth(
|
||||||
githubToken: string,
|
githubToken: string,
|
||||||
context: GitHubContext,
|
context: ParsedGitHubContext,
|
||||||
user: GitUser | null,
|
user: GitUser | null,
|
||||||
) {
|
) {
|
||||||
console.log("Configuring git authentication for non-signing mode");
|
console.log("Configuring git authentication for non-signing mode");
|
||||||
|
|||||||
@@ -1,533 +0,0 @@
|
|||||||
/**
|
|
||||||
* Git Common Utilities
|
|
||||||
*
|
|
||||||
* This module provides utilities for Git operations using both GitHub API and CLI.
|
|
||||||
*
|
|
||||||
* ## When to use API vs CLI:
|
|
||||||
*
|
|
||||||
* ### GitHub API (for signed commits):
|
|
||||||
* - When commit signing is enabled (`useCommitSigning: true`)
|
|
||||||
* - Required for signed commits as GitHub Apps can't sign commits locally
|
|
||||||
* - Functions with "API" in the name use the GitHub REST API
|
|
||||||
*
|
|
||||||
* ### Git CLI (for unsigned commits):
|
|
||||||
* - When commit signing is disabled (`useCommitSigning: false`)
|
|
||||||
* - Faster for simple operations when signing isn't required
|
|
||||||
* - Uses local git commands (`git add`, `git commit`, `git push`)
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { readFile } from "fs/promises";
|
|
||||||
import { join } from "path";
|
|
||||||
import { $ } from "bun";
|
|
||||||
import { GITHUB_API_URL } from "../api/config";
|
|
||||||
import { retryWithBackoff } from "../../utils/retry";
|
|
||||||
import fetch from "node-fetch";
|
|
||||||
|
|
||||||
interface FileEntry {
|
|
||||||
path: string;
|
|
||||||
content?: string;
|
|
||||||
deleted?: boolean;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface CommitResult {
|
|
||||||
sha: string;
|
|
||||||
message: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GitHubRef {
|
|
||||||
object: {
|
|
||||||
sha: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GitHubCommit {
|
|
||||||
tree: {
|
|
||||||
sha: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GitHubTree {
|
|
||||||
sha: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GitHubNewCommit {
|
|
||||||
sha: string;
|
|
||||||
message: string;
|
|
||||||
author: {
|
|
||||||
name: string;
|
|
||||||
date: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getUncommittedFiles(): Promise<FileEntry[]> {
|
|
||||||
try {
|
|
||||||
console.log("Getting uncommitted files...");
|
|
||||||
const gitStatus = await $`git status --porcelain`.quiet();
|
|
||||||
const statusOutput = gitStatus.stdout.toString().trim();
|
|
||||||
|
|
||||||
if (!statusOutput) {
|
|
||||||
console.log("No uncommitted files found (git status output is empty)");
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log("Git status output:");
|
|
||||||
console.log(statusOutput);
|
|
||||||
|
|
||||||
const files: FileEntry[] = [];
|
|
||||||
const lines = statusOutput.split("\n");
|
|
||||||
console.log(`Found ${lines.length} lines in git status output`);
|
|
||||||
|
|
||||||
for (const line of lines) {
|
|
||||||
const trimmedLine = line.trim();
|
|
||||||
if (!trimmedLine) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse git status output
|
|
||||||
// Format: XY filename (e.g., "M file.txt", "A new.txt", "?? untracked.txt", "D deleted.txt")
|
|
||||||
const statusCode = trimmedLine.substring(0, 1);
|
|
||||||
const filePath = trimmedLine.substring(2).trim();
|
|
||||||
console.log(`Processing: status='${statusCode}' path='${filePath}'`);
|
|
||||||
|
|
||||||
// Skip files we shouldn't auto-commit
|
|
||||||
if (filePath === "output.txt" || filePath.endsWith("/output.txt")) {
|
|
||||||
console.log(`Skipping temporary file: ${filePath}`);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const isDeleted = statusCode.includes("D");
|
|
||||||
console.log(`File ${filePath}: deleted=${isDeleted}`);
|
|
||||||
|
|
||||||
files.push({
|
|
||||||
path: filePath,
|
|
||||||
deleted: isDeleted,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Returning ${files.length} files to commit`);
|
|
||||||
return files;
|
|
||||||
} catch (error) {
|
|
||||||
// If git status fails (e.g., not in a git repo), return empty array
|
|
||||||
console.error("Error running git status:", error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Helper function to get or create branch reference via GitHub API
|
|
||||||
* Used when we need to ensure a branch exists before committing via API
|
|
||||||
*/
|
|
||||||
async function getOrCreateBranchRefViaAPI(
|
|
||||||
owner: string,
|
|
||||||
repo: string,
|
|
||||||
branch: string,
|
|
||||||
githubToken: string,
|
|
||||||
): Promise<string> {
|
|
||||||
// Try to get the branch reference
|
|
||||||
const refUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${branch}`;
|
|
||||||
const refResponse = await fetch(refUrl, {
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (refResponse.ok) {
|
|
||||||
const refData = (await refResponse.json()) as GitHubRef;
|
|
||||||
return refData.object.sha;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (refResponse.status !== 404) {
|
|
||||||
throw new Error(`Failed to get branch reference: ${refResponse.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const baseBranch = process.env.BASE_BRANCH!;
|
|
||||||
|
|
||||||
// Get the SHA of the base branch
|
|
||||||
const baseRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${baseBranch}`;
|
|
||||||
const baseRefResponse = await fetch(baseRefUrl, {
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
let baseSha: string;
|
|
||||||
|
|
||||||
if (!baseRefResponse.ok) {
|
|
||||||
// If base branch doesn't exist, try default branch
|
|
||||||
const repoUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}`;
|
|
||||||
const repoResponse = await fetch(repoUrl, {
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!repoResponse.ok) {
|
|
||||||
throw new Error(`Failed to get repository info: ${repoResponse.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const repoData = (await repoResponse.json()) as {
|
|
||||||
default_branch: string;
|
|
||||||
};
|
|
||||||
const defaultBranch = repoData.default_branch;
|
|
||||||
|
|
||||||
// Try default branch
|
|
||||||
const defaultRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${defaultBranch}`;
|
|
||||||
const defaultRefResponse = await fetch(defaultRefUrl, {
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!defaultRefResponse.ok) {
|
|
||||||
throw new Error(
|
|
||||||
`Failed to get default branch reference: ${defaultRefResponse.status}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultRefData = (await defaultRefResponse.json()) as GitHubRef;
|
|
||||||
baseSha = defaultRefData.object.sha;
|
|
||||||
} else {
|
|
||||||
const baseRefData = (await baseRefResponse.json()) as GitHubRef;
|
|
||||||
baseSha = baseRefData.object.sha;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the new branch using the same pattern as octokit
|
|
||||||
const createRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs`;
|
|
||||||
const createRefResponse = await fetch(createRefUrl, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
ref: `refs/heads/${branch}`,
|
|
||||||
sha: baseSha,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!createRefResponse.ok) {
|
|
||||||
const errorText = await createRefResponse.text();
|
|
||||||
throw new Error(
|
|
||||||
`Failed to create branch: ${createRefResponse.status} - ${errorText}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`Successfully created branch ${branch}`);
|
|
||||||
return baseSha;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a commit via GitHub API with the given files (for signed commits)
|
|
||||||
* Handles both file updates and deletions
|
|
||||||
* Used when commit signing is enabled - GitHub Apps can create signed commits via API
|
|
||||||
*/
|
|
||||||
async function createCommitViaAPI(
|
|
||||||
owner: string,
|
|
||||||
repo: string,
|
|
||||||
branch: string,
|
|
||||||
files: Array<string | FileEntry>,
|
|
||||||
message: string,
|
|
||||||
REPO_DIR: string = process.cwd(),
|
|
||||||
): Promise<CommitResult> {
|
|
||||||
const githubToken = process.env.GITHUB_TOKEN;
|
|
||||||
if (!githubToken) {
|
|
||||||
throw new Error("GITHUB_TOKEN environment variable is required");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Normalize file entries
|
|
||||||
const fileEntries: FileEntry[] = files.map((f) => {
|
|
||||||
if (typeof f === "string") {
|
|
||||||
// Legacy string path format
|
|
||||||
const path = f.startsWith("/") ? f.slice(1) : f;
|
|
||||||
return { path, deleted: false };
|
|
||||||
}
|
|
||||||
// Already a FileEntry
|
|
||||||
const path = f.path.startsWith("/") ? f.path.slice(1) : f.path;
|
|
||||||
return { ...f, path };
|
|
||||||
});
|
|
||||||
|
|
||||||
// 1. Get the branch reference (create if doesn't exist)
|
|
||||||
const baseSha = await getOrCreateBranchRefViaAPI(
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
branch,
|
|
||||||
githubToken,
|
|
||||||
);
|
|
||||||
|
|
||||||
// 2. Get the base commit
|
|
||||||
const commitUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/commits/${baseSha}`;
|
|
||||||
const commitResponse = await fetch(commitUrl, {
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!commitResponse.ok) {
|
|
||||||
throw new Error(`Failed to get base commit: ${commitResponse.status}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const commitData = (await commitResponse.json()) as GitHubCommit;
|
|
||||||
const baseTreeSha = commitData.tree.sha;
|
|
||||||
|
|
||||||
// 3. Create tree entries for all files
|
|
||||||
const treeEntries = await Promise.all(
|
|
||||||
fileEntries.map(async (fileEntry) => {
|
|
||||||
const { path: filePath, deleted } = fileEntry;
|
|
||||||
|
|
||||||
// Handle deleted files by setting SHA to null
|
|
||||||
if (deleted) {
|
|
||||||
return {
|
|
||||||
path: filePath,
|
|
||||||
mode: "100644",
|
|
||||||
type: "blob" as const,
|
|
||||||
sha: null,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
const fullPath = filePath.startsWith("/")
|
|
||||||
? filePath
|
|
||||||
: join(REPO_DIR, filePath);
|
|
||||||
|
|
||||||
// Check if file is binary (images, etc.)
|
|
||||||
const isBinaryFile =
|
|
||||||
/\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test(
|
|
||||||
filePath,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (isBinaryFile) {
|
|
||||||
// For binary files, create a blob first using the Blobs API
|
|
||||||
const binaryContent = await readFile(fullPath);
|
|
||||||
|
|
||||||
// Create blob using Blobs API (supports encoding parameter)
|
|
||||||
const blobUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/blobs`;
|
|
||||||
const blobResponse = await fetch(blobUrl, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
content: binaryContent.toString("base64"),
|
|
||||||
encoding: "base64",
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!blobResponse.ok) {
|
|
||||||
const errorText = await blobResponse.text();
|
|
||||||
throw new Error(
|
|
||||||
`Failed to create blob for ${filePath}: ${blobResponse.status} - ${errorText}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const blobData = (await blobResponse.json()) as { sha: string };
|
|
||||||
|
|
||||||
// Return tree entry with blob SHA
|
|
||||||
return {
|
|
||||||
path: filePath,
|
|
||||||
mode: "100644",
|
|
||||||
type: "blob" as const,
|
|
||||||
sha: blobData.sha,
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
// For text files, include content directly in tree
|
|
||||||
const content = await readFile(fullPath, "utf-8");
|
|
||||||
return {
|
|
||||||
path: filePath,
|
|
||||||
mode: "100644",
|
|
||||||
type: "blob" as const,
|
|
||||||
content: content,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
|
|
||||||
// 4. Create a new tree
|
|
||||||
const treeUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/trees`;
|
|
||||||
const treeResponse = await fetch(treeUrl, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
base_tree: baseTreeSha,
|
|
||||||
tree: treeEntries,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!treeResponse.ok) {
|
|
||||||
const errorText = await treeResponse.text();
|
|
||||||
throw new Error(
|
|
||||||
`Failed to create tree: ${treeResponse.status} - ${errorText}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const treeData = (await treeResponse.json()) as GitHubTree;
|
|
||||||
|
|
||||||
// 5. Create a new commit
|
|
||||||
const newCommitUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/commits`;
|
|
||||||
const newCommitResponse = await fetch(newCommitUrl, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
message: message,
|
|
||||||
tree: treeData.sha,
|
|
||||||
parents: [baseSha],
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!newCommitResponse.ok) {
|
|
||||||
const errorText = await newCommitResponse.text();
|
|
||||||
throw new Error(
|
|
||||||
`Failed to create commit: ${newCommitResponse.status} - ${errorText}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const newCommitData = (await newCommitResponse.json()) as GitHubNewCommit;
|
|
||||||
|
|
||||||
// 6. Update the reference to point to the new commit
|
|
||||||
const updateRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${branch}`;
|
|
||||||
|
|
||||||
// We're seeing intermittent 403 "Resource not accessible by integration" errors
|
|
||||||
// on certain repos when updating git references. These appear to be transient
|
|
||||||
// GitHub API issues that succeed on retry.
|
|
||||||
await retryWithBackoff(
|
|
||||||
async () => {
|
|
||||||
const updateRefResponse = await fetch(updateRefUrl, {
|
|
||||||
method: "PATCH",
|
|
||||||
headers: {
|
|
||||||
Accept: "application/vnd.github+json",
|
|
||||||
Authorization: `Bearer ${githubToken}`,
|
|
||||||
"X-GitHub-Api-Version": "2022-11-28",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
sha: newCommitData.sha,
|
|
||||||
force: false,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!updateRefResponse.ok) {
|
|
||||||
const errorText = await updateRefResponse.text();
|
|
||||||
const error = new Error(
|
|
||||||
`Failed to update reference: ${updateRefResponse.status} - ${errorText}`,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Only retry on 403 errors - these are the intermittent failures we're targeting
|
|
||||||
if (updateRefResponse.status === 403) {
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
// For non-403 errors, fail immediately without retry
|
|
||||||
console.error("Non-retryable error:", updateRefResponse.status);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
maxAttempts: 3,
|
|
||||||
initialDelayMs: 1000, // Start with 1 second delay
|
|
||||||
maxDelayMs: 5000, // Max 5 seconds delay
|
|
||||||
backoffFactor: 2, // Double the delay each time
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
return {
|
|
||||||
sha: newCommitData.sha,
|
|
||||||
message: newCommitData.message,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Commit uncommitted changes - automatically chooses API or CLI based on signing requirement
|
|
||||||
*
|
|
||||||
* @param useCommitSigning - If true, uses GitHub API for signed commits. If false, uses git CLI.
|
|
||||||
*/
|
|
||||||
export async function commitUncommittedChanges(
|
|
||||||
owner: string,
|
|
||||||
repo: string,
|
|
||||||
branch: string,
|
|
||||||
useCommitSigning: boolean,
|
|
||||||
): Promise<CommitResult | null> {
|
|
||||||
try {
|
|
||||||
// Check for uncommitted changes
|
|
||||||
const gitStatus = await $`git status --porcelain`.quiet();
|
|
||||||
const hasUncommittedChanges = gitStatus.stdout.toString().trim().length > 0;
|
|
||||||
|
|
||||||
if (!hasUncommittedChanges) {
|
|
||||||
console.log("No uncommitted changes found");
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log("Found uncommitted changes, committing them...");
|
|
||||||
|
|
||||||
const runId = process.env.GITHUB_RUN_ID || "unknown";
|
|
||||||
const commitMessage = `Auto-commit: Save uncommitted changes from Claude\n\nRun ID: ${runId}`;
|
|
||||||
|
|
||||||
if (useCommitSigning) {
|
|
||||||
// Use GitHub API when commit signing is required
|
|
||||||
console.log("Using GitHub API for signed commit...");
|
|
||||||
|
|
||||||
const files = await getUncommittedFiles();
|
|
||||||
|
|
||||||
if (files.length === 0) {
|
|
||||||
console.log("No files to commit");
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return await createCommitViaAPI(
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
branch,
|
|
||||||
files,
|
|
||||||
commitMessage,
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// Use git CLI when commit signing is not required
|
|
||||||
console.log("Using git CLI for unsigned commit...");
|
|
||||||
|
|
||||||
// Add all changes
|
|
||||||
await $`git add -A`;
|
|
||||||
|
|
||||||
// Commit with a descriptive message
|
|
||||||
await $`git commit -m ${commitMessage}`;
|
|
||||||
|
|
||||||
// Push the changes
|
|
||||||
await $`git push origin ${branch}`;
|
|
||||||
|
|
||||||
console.log("✅ Successfully committed and pushed uncommitted changes");
|
|
||||||
|
|
||||||
// Get the commit SHA
|
|
||||||
const commitSha = await $`git rev-parse HEAD`.quiet();
|
|
||||||
|
|
||||||
return {
|
|
||||||
sha: commitSha.stdout.toString().trim(),
|
|
||||||
message: commitMessage,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// If we can't check git status (e.g., not in a git repo during tests), return null
|
|
||||||
console.error("Error checking/committing changes:", error);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -3,11 +3,17 @@ import path from "path";
|
|||||||
import type { Octokits } from "../api/client";
|
import type { Octokits } from "../api/client";
|
||||||
import { GITHUB_SERVER_URL } from "../api/config";
|
import { GITHUB_SERVER_URL } from "../api/config";
|
||||||
|
|
||||||
|
const escapedUrl = GITHUB_SERVER_URL.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
||||||
const IMAGE_REGEX = new RegExp(
|
const IMAGE_REGEX = new RegExp(
|
||||||
`!\\[[^\\]]*\\]\\((${GITHUB_SERVER_URL.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}\\/user-attachments\\/assets\\/[^)]+)\\)`,
|
`!\\[[^\\]]*\\]\\((${escapedUrl}\\/user-attachments\\/assets\\/[^)]+)\\)`,
|
||||||
"g",
|
"g",
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const HTML_IMG_REGEX = new RegExp(
|
||||||
|
`<img[^>]+src=["']([^"']*${escapedUrl}\\/user-attachments\\/assets\\/[^"']+)["'][^>]*>`,
|
||||||
|
"gi",
|
||||||
|
);
|
||||||
|
|
||||||
type IssueComment = {
|
type IssueComment = {
|
||||||
type: "issue_comment";
|
type: "issue_comment";
|
||||||
id: string;
|
id: string;
|
||||||
@@ -63,8 +69,16 @@ export async function downloadCommentImages(
|
|||||||
}> = [];
|
}> = [];
|
||||||
|
|
||||||
for (const comment of comments) {
|
for (const comment of comments) {
|
||||||
const imageMatches = [...comment.body.matchAll(IMAGE_REGEX)];
|
// Extract URLs from Markdown format
|
||||||
const urls = imageMatches.map((match) => match[1] as string);
|
const markdownMatches = [...comment.body.matchAll(IMAGE_REGEX)];
|
||||||
|
const markdownUrls = markdownMatches.map((match) => match[1] as string);
|
||||||
|
|
||||||
|
// Extract URLs from HTML format
|
||||||
|
const htmlMatches = [...comment.body.matchAll(HTML_IMG_REGEX)];
|
||||||
|
const htmlUrls = htmlMatches.map((match) => match[1] as string);
|
||||||
|
|
||||||
|
// Combine and deduplicate URLs
|
||||||
|
const urls = [...new Set([...markdownUrls, ...htmlUrls])];
|
||||||
|
|
||||||
if (urls.length > 0) {
|
if (urls.length > 0) {
|
||||||
commentsWithImages.push({ comment, urls });
|
commentsWithImages.push({ comment, urls });
|
||||||
|
|||||||
@@ -3,20 +3,12 @@
|
|||||||
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
||||||
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
||||||
import { z } from "zod";
|
import { z } from "zod";
|
||||||
import { readFile, access, stat } from "fs/promises";
|
import { readFile } from "fs/promises";
|
||||||
import { join } from "path";
|
import { join } from "path";
|
||||||
import { constants } from "fs";
|
|
||||||
import { execFile } from "child_process";
|
|
||||||
import { promisify } from "util";
|
|
||||||
import fetch from "node-fetch";
|
import fetch from "node-fetch";
|
||||||
import { GITHUB_API_URL } from "../github/api/config";
|
import { GITHUB_API_URL } from "../github/api/config";
|
||||||
import { retryWithBackoff } from "../utils/retry";
|
import { retryWithBackoff } from "../utils/retry";
|
||||||
|
|
||||||
// NOTE: We should extract out common git utilities into a shared module
|
|
||||||
// as we need to perform these operations outside of an MCP server. (See git-common-utils.ts)
|
|
||||||
|
|
||||||
const execFileAsync = promisify(execFile);
|
|
||||||
|
|
||||||
type GitHubRef = {
|
type GitHubRef = {
|
||||||
object: {
|
object: {
|
||||||
sha: string;
|
sha: string;
|
||||||
@@ -170,77 +162,6 @@ async function getOrCreateBranchRef(
|
|||||||
return baseSha;
|
return baseSha;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the appropriate Git file mode for a file
|
|
||||||
async function getFileMode(filePath: string): Promise<string> {
|
|
||||||
try {
|
|
||||||
const fileStat = await stat(filePath);
|
|
||||||
|
|
||||||
if (fileStat.isFile()) {
|
|
||||||
// Check if execute bit is set for user
|
|
||||||
if (fileStat.mode & constants.S_IXUSR) {
|
|
||||||
return "100755"; // Executable file
|
|
||||||
} else {
|
|
||||||
return "100644"; // Regular file
|
|
||||||
}
|
|
||||||
} else if (fileStat.isDirectory()) {
|
|
||||||
return "040000"; // Directory (tree)
|
|
||||||
} else if (fileStat.isSymbolicLink()) {
|
|
||||||
return "120000"; // Symbolic link
|
|
||||||
} else {
|
|
||||||
// Fallback for unknown file types
|
|
||||||
return "100644";
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// If we can't stat the file, default to regular file
|
|
||||||
console.warn(
|
|
||||||
`Could not determine file mode for ${filePath}, using default: ${error}`,
|
|
||||||
);
|
|
||||||
return "100644";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helper function to run pre-commit hooks
|
|
||||||
async function runPreCommitHooks(repoDir: string): Promise<void> {
|
|
||||||
const hookPath = join(repoDir, ".git", "hooks", "pre-commit");
|
|
||||||
|
|
||||||
try {
|
|
||||||
// Check if pre-commit hook exists and is executable
|
|
||||||
await access(hookPath);
|
|
||||||
|
|
||||||
console.log("Running pre-commit hook...");
|
|
||||||
|
|
||||||
// Execute the pre-commit hook
|
|
||||||
const { stdout, stderr } = await execFileAsync(hookPath, [], {
|
|
||||||
cwd: repoDir,
|
|
||||||
env: {
|
|
||||||
...process.env,
|
|
||||||
GIT_INDEX_FILE: join(repoDir, ".git", "index"),
|
|
||||||
GIT_DIR: join(repoDir, ".git"),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (stdout) console.log("Pre-commit hook stdout:", stdout);
|
|
||||||
if (stderr) console.log("Pre-commit hook stderr:", stderr);
|
|
||||||
|
|
||||||
console.log("Pre-commit hook passed");
|
|
||||||
} catch (error: any) {
|
|
||||||
if (error.code === "ENOENT") {
|
|
||||||
// Hook doesn't exist, that's fine
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (error.code === "EACCES") {
|
|
||||||
console.log("Pre-commit hook exists but is not executable, skipping");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hook failed with non-zero exit code
|
|
||||||
const errorMessage =
|
|
||||||
error.stderr || error.message || "Pre-commit hook failed";
|
|
||||||
throw new Error(`Pre-commit hook failed: ${errorMessage}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit files tool
|
// Commit files tool
|
||||||
server.tool(
|
server.tool(
|
||||||
"commit_files",
|
"commit_files",
|
||||||
@@ -252,12 +173,8 @@ server.tool(
|
|||||||
'Array of file paths relative to repository root (e.g. ["src/main.js", "README.md"]). All files must exist locally.',
|
'Array of file paths relative to repository root (e.g. ["src/main.js", "README.md"]). All files must exist locally.',
|
||||||
),
|
),
|
||||||
message: z.string().describe("Commit message"),
|
message: z.string().describe("Commit message"),
|
||||||
noVerify: z
|
|
||||||
.boolean()
|
|
||||||
.optional()
|
|
||||||
.describe("Skip pre-commit hooks (equivalent to git commit --no-verify)"),
|
|
||||||
},
|
},
|
||||||
async ({ files, message, noVerify }) => {
|
async ({ files, message }) => {
|
||||||
const owner = REPO_OWNER;
|
const owner = REPO_OWNER;
|
||||||
const repo = REPO_NAME;
|
const repo = REPO_NAME;
|
||||||
const branch = BRANCH_NAME;
|
const branch = BRANCH_NAME;
|
||||||
@@ -267,11 +184,6 @@ server.tool(
|
|||||||
throw new Error("GITHUB_TOKEN environment variable is required");
|
throw new Error("GITHUB_TOKEN environment variable is required");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run pre-commit hooks unless explicitly skipped
|
|
||||||
if (!noVerify) {
|
|
||||||
await runPreCommitHooks(REPO_DIR);
|
|
||||||
}
|
|
||||||
|
|
||||||
const processedFiles = files.map((filePath) => {
|
const processedFiles = files.map((filePath) => {
|
||||||
if (filePath.startsWith("/")) {
|
if (filePath.startsWith("/")) {
|
||||||
return filePath.slice(1);
|
return filePath.slice(1);
|
||||||
@@ -311,9 +223,6 @@ server.tool(
|
|||||||
? filePath
|
? filePath
|
||||||
: join(REPO_DIR, filePath);
|
: join(REPO_DIR, filePath);
|
||||||
|
|
||||||
// Get the proper file mode based on file permissions
|
|
||||||
const fileMode = await getFileMode(fullPath);
|
|
||||||
|
|
||||||
// Check if file is binary (images, etc.)
|
// Check if file is binary (images, etc.)
|
||||||
const isBinaryFile =
|
const isBinaryFile =
|
||||||
/\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test(
|
/\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test(
|
||||||
@@ -352,7 +261,7 @@ server.tool(
|
|||||||
// Return tree entry with blob SHA
|
// Return tree entry with blob SHA
|
||||||
return {
|
return {
|
||||||
path: filePath,
|
path: filePath,
|
||||||
mode: fileMode,
|
mode: "100644",
|
||||||
type: "blob",
|
type: "blob",
|
||||||
sha: blobData.sha,
|
sha: blobData.sha,
|
||||||
};
|
};
|
||||||
@@ -361,7 +270,7 @@ server.tool(
|
|||||||
const content = await readFile(fullPath, "utf-8");
|
const content = await readFile(fullPath, "utf-8");
|
||||||
return {
|
return {
|
||||||
path: filePath,
|
path: filePath,
|
||||||
mode: fileMode,
|
mode: "100644",
|
||||||
type: "blob",
|
type: "blob",
|
||||||
content: content,
|
content: content,
|
||||||
};
|
};
|
||||||
|
|||||||
178
src/mcp/github-inline-comment-server.ts
Normal file
178
src/mcp/github-inline-comment-server.ts
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
||||||
|
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
||||||
|
import { z } from "zod";
|
||||||
|
import { createOctokit } from "../github/api/client";
|
||||||
|
|
||||||
|
// Get repository and PR information from environment variables
|
||||||
|
const REPO_OWNER = process.env.REPO_OWNER;
|
||||||
|
const REPO_NAME = process.env.REPO_NAME;
|
||||||
|
const PR_NUMBER = process.env.PR_NUMBER;
|
||||||
|
|
||||||
|
if (!REPO_OWNER || !REPO_NAME || !PR_NUMBER) {
|
||||||
|
console.error(
|
||||||
|
"Error: REPO_OWNER, REPO_NAME, and PR_NUMBER environment variables are required",
|
||||||
|
);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// GitHub Inline Comment MCP Server - Provides inline PR comment functionality
|
||||||
|
// Provides an inline comment tool without exposing full PR review capabilities, so that
|
||||||
|
// Claude can't accidentally approve a PR
|
||||||
|
const server = new McpServer({
|
||||||
|
name: "GitHub Inline Comment Server",
|
||||||
|
version: "0.0.1",
|
||||||
|
});
|
||||||
|
|
||||||
|
server.tool(
|
||||||
|
"create_inline_comment",
|
||||||
|
"Create an inline comment on a specific line or lines in a PR file",
|
||||||
|
{
|
||||||
|
path: z
|
||||||
|
.string()
|
||||||
|
.describe("The file path to comment on (e.g., 'src/index.js')"),
|
||||||
|
body: z
|
||||||
|
.string()
|
||||||
|
.describe(
|
||||||
|
"The comment text (supports markdown and GitHub code suggestion blocks). " +
|
||||||
|
"For code suggestions, use: ```suggestion\\nreplacement code\\n```. " +
|
||||||
|
"IMPORTANT: The suggestion block will REPLACE the ENTIRE line range (single line or startLine to line). " +
|
||||||
|
"Ensure the replacement is syntactically complete and valid - it must work as a drop-in replacement for the selected lines.",
|
||||||
|
),
|
||||||
|
line: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
"Line number for single-line comments (required if startLine is not provided)",
|
||||||
|
),
|
||||||
|
startLine: z
|
||||||
|
.number()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
"Start line for multi-line comments (use with line parameter for the end line)",
|
||||||
|
),
|
||||||
|
side: z
|
||||||
|
.enum(["LEFT", "RIGHT"])
|
||||||
|
.optional()
|
||||||
|
.default("RIGHT")
|
||||||
|
.describe(
|
||||||
|
"Side of the diff to comment on: LEFT (old code) or RIGHT (new code)",
|
||||||
|
),
|
||||||
|
commit_id: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.describe(
|
||||||
|
"Specific commit SHA to comment on (defaults to latest commit)",
|
||||||
|
),
|
||||||
|
},
|
||||||
|
async ({ path, body, line, startLine, side, commit_id }) => {
|
||||||
|
try {
|
||||||
|
const githubToken = process.env.GITHUB_TOKEN;
|
||||||
|
|
||||||
|
if (!githubToken) {
|
||||||
|
throw new Error("GITHUB_TOKEN environment variable is required");
|
||||||
|
}
|
||||||
|
|
||||||
|
const owner = REPO_OWNER;
|
||||||
|
const repo = REPO_NAME;
|
||||||
|
const pull_number = parseInt(PR_NUMBER, 10);
|
||||||
|
|
||||||
|
const octokit = createOctokit(githubToken).rest;
|
||||||
|
|
||||||
|
// Validate that either line or both startLine and line are provided
|
||||||
|
if (!line && !startLine) {
|
||||||
|
throw new Error(
|
||||||
|
"Either 'line' for single-line comments or both 'startLine' and 'line' for multi-line comments must be provided",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If only line is provided, it's a single-line comment
|
||||||
|
// If both startLine and line are provided, it's a multi-line comment
|
||||||
|
const isSingleLine = !startLine;
|
||||||
|
|
||||||
|
const pr = await octokit.pulls.get({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
pull_number,
|
||||||
|
});
|
||||||
|
|
||||||
|
const params: Parameters<
|
||||||
|
typeof octokit.rest.pulls.createReviewComment
|
||||||
|
>[0] = {
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
pull_number,
|
||||||
|
body,
|
||||||
|
path,
|
||||||
|
side: side || "RIGHT",
|
||||||
|
commit_id: commit_id || pr.data.head.sha,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isSingleLine) {
|
||||||
|
// Single-line comment
|
||||||
|
params.line = line;
|
||||||
|
} else {
|
||||||
|
// Multi-line comment
|
||||||
|
params.start_line = startLine;
|
||||||
|
params.start_side = side || "RIGHT";
|
||||||
|
params.line = line;
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await octokit.rest.pulls.createReviewComment(params);
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: JSON.stringify(
|
||||||
|
{
|
||||||
|
success: true,
|
||||||
|
comment_id: result.data.id,
|
||||||
|
html_url: result.data.html_url,
|
||||||
|
path: result.data.path,
|
||||||
|
line: result.data.line || result.data.original_line,
|
||||||
|
message: `Inline comment created successfully on ${path}${isSingleLine ? ` at line ${line}` : ` from line ${startLine} to ${line}`}`,
|
||||||
|
},
|
||||||
|
null,
|
||||||
|
2,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
} catch (error) {
|
||||||
|
const errorMessage =
|
||||||
|
error instanceof Error ? error.message : String(error);
|
||||||
|
|
||||||
|
// Provide more helpful error messages for common issues
|
||||||
|
let helpMessage = "";
|
||||||
|
if (errorMessage.includes("Validation Failed")) {
|
||||||
|
helpMessage =
|
||||||
|
"\n\nThis usually means the line number doesn't exist in the diff or the file path is incorrect. Make sure you're commenting on lines that are part of the PR's changes.";
|
||||||
|
} else if (errorMessage.includes("Not Found")) {
|
||||||
|
helpMessage =
|
||||||
|
"\n\nThis usually means the PR number, repository, or file path is incorrect.";
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
content: [
|
||||||
|
{
|
||||||
|
type: "text",
|
||||||
|
text: `Error creating inline comment: ${errorMessage}${helpMessage}`,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
error: errorMessage,
|
||||||
|
isError: true,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
async function runServer() {
|
||||||
|
const transport = new StdioServerTransport();
|
||||||
|
await server.connect(transport);
|
||||||
|
process.on("exit", () => {
|
||||||
|
server.close();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
runServer().catch(console.error);
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
import * as core from "@actions/core";
|
import * as core from "@actions/core";
|
||||||
import { GITHUB_API_URL, GITHUB_SERVER_URL } from "../github/api/config";
|
import { GITHUB_API_URL, GITHUB_SERVER_URL } from "../github/api/config";
|
||||||
import type { GitHubContext } from "../github/context";
|
import type { ParsedGitHubContext } from "../github/context";
|
||||||
import { Octokit } from "@octokit/rest";
|
import { Octokit } from "@octokit/rest";
|
||||||
|
|
||||||
type PrepareConfigParams = {
|
type PrepareConfigParams = {
|
||||||
@@ -12,7 +12,7 @@ type PrepareConfigParams = {
|
|||||||
additionalMcpConfig?: string;
|
additionalMcpConfig?: string;
|
||||||
claudeCommentId?: string;
|
claudeCommentId?: string;
|
||||||
allowedTools: string[];
|
allowedTools: string[];
|
||||||
context: GitHubContext;
|
context: ParsedGitHubContext;
|
||||||
};
|
};
|
||||||
|
|
||||||
async function checkActionsReadPermission(
|
async function checkActionsReadPermission(
|
||||||
@@ -73,23 +73,21 @@ export async function prepareMcpConfig(
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Always include comment server for updating Claude comments
|
// Always include comment server for updating Claude comments
|
||||||
if (context.inputs.mode === "tag") {
|
baseMcpConfig.mcpServers.github_comment = {
|
||||||
baseMcpConfig.mcpServers.github_comment = {
|
command: "bun",
|
||||||
command: "bun",
|
args: [
|
||||||
args: [
|
"run",
|
||||||
"run",
|
`${process.env.GITHUB_ACTION_PATH}/src/mcp/github-comment-server.ts`,
|
||||||
`${process.env.GITHUB_ACTION_PATH}/src/mcp/github-comment-server.ts`,
|
],
|
||||||
],
|
env: {
|
||||||
env: {
|
GITHUB_TOKEN: githubToken,
|
||||||
GITHUB_TOKEN: githubToken,
|
REPO_OWNER: owner,
|
||||||
REPO_OWNER: owner,
|
REPO_NAME: repo,
|
||||||
REPO_NAME: repo,
|
...(claudeCommentId && { CLAUDE_COMMENT_ID: claudeCommentId }),
|
||||||
...(claudeCommentId && { CLAUDE_COMMENT_ID: claudeCommentId }),
|
GITHUB_EVENT_NAME: process.env.GITHUB_EVENT_NAME || "",
|
||||||
GITHUB_EVENT_NAME: process.env.GITHUB_EVENT_NAME || "",
|
GITHUB_API_URL: GITHUB_API_URL,
|
||||||
GITHUB_API_URL: GITHUB_API_URL,
|
},
|
||||||
},
|
};
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Include file ops server when commit signing is enabled
|
// Include file ops server when commit signing is enabled
|
||||||
if (context.inputs.useCommitSigning) {
|
if (context.inputs.useCommitSigning) {
|
||||||
@@ -113,6 +111,24 @@ export async function prepareMcpConfig(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Include inline comment server for experimental review mode
|
||||||
|
if (context.inputs.mode === "experimental-review" && context.isPR) {
|
||||||
|
baseMcpConfig.mcpServers.github_inline_comment = {
|
||||||
|
command: "bun",
|
||||||
|
args: [
|
||||||
|
"run",
|
||||||
|
`${process.env.GITHUB_ACTION_PATH}/src/mcp/github-inline-comment-server.ts`,
|
||||||
|
],
|
||||||
|
env: {
|
||||||
|
GITHUB_TOKEN: githubToken,
|
||||||
|
REPO_OWNER: owner,
|
||||||
|
REPO_NAME: repo,
|
||||||
|
PR_NUMBER: context.entityNumber?.toString() || "",
|
||||||
|
GITHUB_API_URL: GITHUB_API_URL,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// Only add CI server if we have actions:read permission and we're in a PR context
|
// Only add CI server if we have actions:read permission and we're in a PR context
|
||||||
const hasActionsReadPermission =
|
const hasActionsReadPermission =
|
||||||
context.inputs.additionalPermissions.get("actions") === "read";
|
context.inputs.additionalPermissions.get("actions") === "read";
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import * as core from "@actions/core";
|
import * as core from "@actions/core";
|
||||||
|
import { mkdir, writeFile } from "fs/promises";
|
||||||
import type { Mode, ModeOptions, ModeResult } from "../types";
|
import type { Mode, ModeOptions, ModeResult } from "../types";
|
||||||
import { isAutomationContext } from "../../github/context";
|
import { isAutomationContext } from "../../github/context";
|
||||||
import type { PreparedContext } from "../../create-prompt/types";
|
import type { PreparedContext } from "../../create-prompt/types";
|
||||||
@@ -42,7 +43,23 @@ export const agentMode: Mode = {
|
|||||||
async prepare({ context }: ModeOptions): Promise<ModeResult> {
|
async prepare({ context }: ModeOptions): Promise<ModeResult> {
|
||||||
// Agent mode handles automation events (workflow_dispatch, schedule) only
|
// Agent mode handles automation events (workflow_dispatch, schedule) only
|
||||||
|
|
||||||
// Agent mode doesn't need to create prompt files here - handled by createPrompt
|
// TODO: handle by createPrompt (similar to tag and review modes)
|
||||||
|
// Create prompt directory
|
||||||
|
await mkdir(`${process.env.RUNNER_TEMP}/claude-prompts`, {
|
||||||
|
recursive: true,
|
||||||
|
});
|
||||||
|
// Write the prompt file - the base action requires a prompt_file parameter,
|
||||||
|
// so we must create this file even though agent mode typically uses
|
||||||
|
// override_prompt or direct_prompt. If neither is provided, we write
|
||||||
|
// a minimal prompt with just the repository information.
|
||||||
|
const promptContent =
|
||||||
|
context.inputs.overridePrompt ||
|
||||||
|
context.inputs.directPrompt ||
|
||||||
|
`Repository: ${context.repository.owner}/${context.repository.repo}`;
|
||||||
|
await writeFile(
|
||||||
|
`${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`,
|
||||||
|
promptContent,
|
||||||
|
);
|
||||||
|
|
||||||
// Export tool environment variables for agent mode
|
// Export tool environment variables for agent mode
|
||||||
const baseTools = [
|
const baseTools = [
|
||||||
|
|||||||
@@ -16,15 +16,9 @@ import { agentMode } from "./agent";
|
|||||||
import { reviewMode } from "./review";
|
import { reviewMode } from "./review";
|
||||||
import type { GitHubContext } from "../github/context";
|
import type { GitHubContext } from "../github/context";
|
||||||
import { isAutomationContext } from "../github/context";
|
import { isAutomationContext } from "../github/context";
|
||||||
import { remoteAgentMode } from "./remote-agent";
|
|
||||||
|
|
||||||
export const DEFAULT_MODE = "tag" as const;
|
export const DEFAULT_MODE = "tag" as const;
|
||||||
export const VALID_MODES = [
|
export const VALID_MODES = ["tag", "agent", "experimental-review"] as const;
|
||||||
"tag",
|
|
||||||
"agent",
|
|
||||||
"remote-agent",
|
|
||||||
"experimental-review",
|
|
||||||
] as const;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* All available modes.
|
* All available modes.
|
||||||
@@ -34,7 +28,6 @@ const modes = {
|
|||||||
tag: tagMode,
|
tag: tagMode,
|
||||||
agent: agentMode,
|
agent: agentMode,
|
||||||
"experimental-review": reviewMode,
|
"experimental-review": reviewMode,
|
||||||
"remote-agent": remoteAgentMode,
|
|
||||||
} as const satisfies Record<ModeName, Mode>;
|
} as const satisfies Record<ModeName, Mode>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -56,13 +49,7 @@ export function getMode(name: ModeName, context: GitHubContext): Mode {
|
|||||||
// Validate mode can handle the event type
|
// Validate mode can handle the event type
|
||||||
if (name === "tag" && isAutomationContext(context)) {
|
if (name === "tag" && isAutomationContext(context)) {
|
||||||
throw new Error(
|
throw new Error(
|
||||||
`Tag mode cannot handle ${context.eventName} events. Use 'agent' mode for automation events or 'remote-agent' mode for repository_dispatch events.`,
|
`Tag mode cannot handle ${context.eventName} events. Use 'agent' mode for automation events.`,
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (name === "remote-agent" && context.eventName !== "repository_dispatch") {
|
|
||||||
throw new Error(
|
|
||||||
`Remote agent mode can only handle repository_dispatch events. Use 'tag' mode for @claude mentions or 'agent' mode for other automation events.`,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,157 +0,0 @@
|
|||||||
/**
|
|
||||||
* Branch handling for remote-agent mode with resume support
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { $ } from "bun";
|
|
||||||
import * as core from "@actions/core";
|
|
||||||
import type { GitHubContext } from "../../github/context";
|
|
||||||
import type { Octokits } from "../../github/api/client";
|
|
||||||
import type { ResumeResponse, ResumeResult } from "../../types/resume";
|
|
||||||
import {
|
|
||||||
setupBranch as setupBaseBranch,
|
|
||||||
type BranchInfo,
|
|
||||||
} from "../../github/operations/branch";
|
|
||||||
|
|
||||||
export type RemoteBranchInfo = BranchInfo & {
|
|
||||||
resumeMessages?: ResumeResult["messages"];
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Attempts to resume from an existing session using the resume endpoint
|
|
||||||
* @param resumeEndpoint The URL to fetch the resume data from
|
|
||||||
* @param headers Headers to include in the request (including auth)
|
|
||||||
* @returns ResumeResult if successful, null otherwise
|
|
||||||
*/
|
|
||||||
async function fetchResumeData(
|
|
||||||
resumeEndpoint: string,
|
|
||||||
headers?: Record<string, string>,
|
|
||||||
): Promise<ResumeResult | null> {
|
|
||||||
try {
|
|
||||||
console.log(`Attempting to resume from: ${resumeEndpoint}`);
|
|
||||||
|
|
||||||
const response = await fetch(resumeEndpoint, {
|
|
||||||
method: "GET",
|
|
||||||
headers: headers || {},
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
console.log(
|
|
||||||
`Resume endpoint returned ${response.status}: ${response.statusText}`,
|
|
||||||
);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = (await response.json()) as ResumeResponse;
|
|
||||||
|
|
||||||
if (!data.log || !Array.isArray(data.log)) {
|
|
||||||
console.log("Resume endpoint returned invalid data structure");
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(
|
|
||||||
`Successfully fetched resume data with ${data.log.length} messages`,
|
|
||||||
);
|
|
||||||
|
|
||||||
// If a branch is specified in the response, we'll use it
|
|
||||||
// Otherwise, we'll determine the branch from the current git state
|
|
||||||
const branchName = data.branch || "";
|
|
||||||
|
|
||||||
return {
|
|
||||||
messages: data.log,
|
|
||||||
branchName,
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to fetch resume data:", error);
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Setup branch for remote-agent mode with resume support
|
|
||||||
* @param octokits GitHub API clients
|
|
||||||
* @param context GitHub context
|
|
||||||
* @param oidcToken OIDC token for authentication
|
|
||||||
* @returns Branch information with optional resume messages
|
|
||||||
*/
|
|
||||||
export async function setupBranchWithResume(
|
|
||||||
octokits: Octokits,
|
|
||||||
context: GitHubContext,
|
|
||||||
oidcToken: string,
|
|
||||||
): Promise<RemoteBranchInfo> {
|
|
||||||
const { owner, repo } = context.repository;
|
|
||||||
const { baseBranch } = context.inputs;
|
|
||||||
|
|
||||||
// Check if we have a resume endpoint
|
|
||||||
if (context.progressTracking?.resumeEndpoint) {
|
|
||||||
console.log("Resume endpoint detected, attempting to resume session...");
|
|
||||||
|
|
||||||
// Prepare headers with OIDC token
|
|
||||||
const headers: Record<string, string> = {
|
|
||||||
...(context.progressTracking.headers || {}),
|
|
||||||
Authorization: `Bearer ${oidcToken}`,
|
|
||||||
};
|
|
||||||
|
|
||||||
const resumeData = await fetchResumeData(
|
|
||||||
context.progressTracking.resumeEndpoint,
|
|
||||||
headers,
|
|
||||||
);
|
|
||||||
|
|
||||||
if (resumeData && resumeData.branchName) {
|
|
||||||
// Try to checkout the resumed branch
|
|
||||||
try {
|
|
||||||
console.log(`Resuming on branch: ${resumeData.branchName}`);
|
|
||||||
|
|
||||||
// Fetch the branch from origin
|
|
||||||
await $`git fetch origin ${resumeData.branchName}`;
|
|
||||||
|
|
||||||
// Checkout the branch
|
|
||||||
await $`git checkout ${resumeData.branchName}`;
|
|
||||||
|
|
||||||
console.log(`Successfully resumed on branch: ${resumeData.branchName}`);
|
|
||||||
|
|
||||||
// Get the base branch for this branch (we'll use the default branch as fallback)
|
|
||||||
let resumeBaseBranch = baseBranch;
|
|
||||||
if (!resumeBaseBranch) {
|
|
||||||
const repoResponse = await octokits.rest.repos.get({
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
});
|
|
||||||
resumeBaseBranch = repoResponse.data.default_branch;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set outputs for GitHub Actions
|
|
||||||
core.setOutput("CLAUDE_BRANCH", resumeData.branchName);
|
|
||||||
core.setOutput("BASE_BRANCH", resumeBaseBranch);
|
|
||||||
|
|
||||||
return {
|
|
||||||
baseBranch: resumeBaseBranch,
|
|
||||||
claudeBranch: resumeData.branchName,
|
|
||||||
currentBranch: resumeData.branchName,
|
|
||||||
resumeMessages: resumeData.messages,
|
|
||||||
};
|
|
||||||
} catch (error) {
|
|
||||||
console.error(
|
|
||||||
`Failed to checkout resumed branch ${resumeData.branchName}:`,
|
|
||||||
error,
|
|
||||||
);
|
|
||||||
console.log("Falling back to creating a new branch...");
|
|
||||||
// Fall through to normal branch creation
|
|
||||||
}
|
|
||||||
} else if (resumeData) {
|
|
||||||
console.log(
|
|
||||||
"Resume data fetched but no branch specified, will create new branch",
|
|
||||||
);
|
|
||||||
// We have messages but no branch, so we'll create a new branch
|
|
||||||
// but still pass along the messages
|
|
||||||
const branchInfo = await setupBaseBranch(octokits, null, context);
|
|
||||||
return {
|
|
||||||
...branchInfo,
|
|
||||||
resumeMessages: resumeData.messages,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No resume endpoint or resume failed, use normal branch setup
|
|
||||||
console.log("No resume endpoint or resume failed, creating new branch...");
|
|
||||||
return setupBaseBranch(octokits, null, context);
|
|
||||||
}
|
|
||||||
@@ -1,444 +0,0 @@
|
|||||||
import * as core from "@actions/core";
|
|
||||||
import { mkdir, writeFile } from "fs/promises";
|
|
||||||
import type { Mode, ModeOptions, ModeResult } from "../types";
|
|
||||||
import { isRepositoryDispatchEvent } from "../../github/context";
|
|
||||||
import type { GitHubContext } from "../../github/context";
|
|
||||||
import { setupBranchWithResume } from "./branch";
|
|
||||||
import { prepareMcpConfig } from "../../mcp/install-mcp-server";
|
|
||||||
import { GITHUB_SERVER_URL } from "../../github/api/config";
|
|
||||||
import {
|
|
||||||
buildRemoteAgentAllowedToolsString,
|
|
||||||
buildDisallowedToolsString,
|
|
||||||
type PreparedContext,
|
|
||||||
} from "../../create-prompt";
|
|
||||||
import {
|
|
||||||
reportWorkflowInitialized,
|
|
||||||
reportClaudeStarting,
|
|
||||||
reportWorkflowFailed,
|
|
||||||
} from "./system-progress-handler";
|
|
||||||
import type { SystemProgressConfig } from "./progress-types";
|
|
||||||
import { fetchUserDisplayName } from "../../github/data/fetcher";
|
|
||||||
import { createOctokit } from "../../github/api/client";
|
|
||||||
import type { StreamConfig } from "../../types/stream-config";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Fetches a Claude Code OAuth token from the specified endpoint using OIDC authentication
|
|
||||||
*/
|
|
||||||
async function fetchClaudeCodeOAuthToken(
|
|
||||||
oauthTokenEndpoint: string,
|
|
||||||
oidcToken?: string,
|
|
||||||
sessionId?: string,
|
|
||||||
): Promise<string> {
|
|
||||||
console.log(`Fetching Claude Code OAuth token from: ${oauthTokenEndpoint}`);
|
|
||||||
|
|
||||||
try {
|
|
||||||
if (!oidcToken) {
|
|
||||||
throw new Error("OIDC token is required for OAuth authentication");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make request to OAuth token endpoint
|
|
||||||
const response = await fetch(oauthTokenEndpoint, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
Authorization: `Bearer ${oidcToken}`,
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
},
|
|
||||||
body: JSON.stringify({
|
|
||||||
...(sessionId && { session_id: sessionId }),
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(
|
|
||||||
`OAuth token request failed: ${response.status} ${response.statusText}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = (await response.json()) as {
|
|
||||||
oauth_token?: string;
|
|
||||||
message?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!data.oauth_token) {
|
|
||||||
const message = data.message || "Unknown error";
|
|
||||||
throw new Error(`OAuth token request failed: ${message}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log("Successfully fetched Claude Code OAuth token");
|
|
||||||
return data.oauth_token;
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to fetch Claude Code OAuth token:", error);
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Remote Agent mode implementation.
|
|
||||||
*
|
|
||||||
* This mode is specifically designed for repository_dispatch events triggered by external APIs.
|
|
||||||
* It bypasses the standard trigger checking, comment tracking, and GitHub data fetching used by tag mode,
|
|
||||||
* making it ideal for automated tasks triggered via API calls with custom payloads.
|
|
||||||
*/
|
|
||||||
export const remoteAgentMode: Mode = {
|
|
||||||
name: "remote-agent",
|
|
||||||
description: "Remote automation mode for repository_dispatch events",
|
|
||||||
|
|
||||||
shouldTrigger(context) {
|
|
||||||
// Only trigger for repository_dispatch events
|
|
||||||
return isRepositoryDispatchEvent(context);
|
|
||||||
},
|
|
||||||
|
|
||||||
prepareContext(context, data) {
|
|
||||||
// Remote agent mode uses minimal context
|
|
||||||
return {
|
|
||||||
mode: "remote-agent",
|
|
||||||
githubContext: context,
|
|
||||||
baseBranch: data?.baseBranch,
|
|
||||||
claudeBranch: data?.claudeBranch,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
|
|
||||||
getAllowedTools() {
|
|
||||||
return [];
|
|
||||||
},
|
|
||||||
|
|
||||||
getDisallowedTools() {
|
|
||||||
return [];
|
|
||||||
},
|
|
||||||
|
|
||||||
shouldCreateTrackingComment() {
|
|
||||||
return false;
|
|
||||||
},
|
|
||||||
|
|
||||||
async prepare({
|
|
||||||
context,
|
|
||||||
octokit,
|
|
||||||
githubToken,
|
|
||||||
}: ModeOptions): Promise<ModeResult> {
|
|
||||||
// Remote agent mode handles repository_dispatch events only
|
|
||||||
|
|
||||||
if (!isRepositoryDispatchEvent(context)) {
|
|
||||||
throw new Error(
|
|
||||||
"Remote agent mode can only handle repository_dispatch events",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract task details from client_payload
|
|
||||||
const payload = context.payload;
|
|
||||||
const clientPayload = payload.client_payload as {
|
|
||||||
prompt?: string;
|
|
||||||
stream_endpoint?: string;
|
|
||||||
headers?: Record<string, string>;
|
|
||||||
resume_endpoint?: string;
|
|
||||||
session_id?: string;
|
|
||||||
endpoints?: {
|
|
||||||
stream?: string;
|
|
||||||
progress?: string;
|
|
||||||
systemProgress?: string;
|
|
||||||
oauthToken?: string;
|
|
||||||
};
|
|
||||||
overrideInputs?: {
|
|
||||||
model?: string;
|
|
||||||
base_branch?: string;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get OIDC token for streaming and potential OAuth token fetching
|
|
||||||
let oidcToken: string;
|
|
||||||
try {
|
|
||||||
oidcToken = await core.getIDToken("claude-code-github-action");
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to get OIDC token:", error);
|
|
||||||
throw new Error(
|
|
||||||
`OIDC token required for remote-agent mode. Please add 'id-token: write' to your workflow permissions. Error: ${error}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up system progress config if endpoint is provided
|
|
||||||
let systemProgressConfig: SystemProgressConfig | null = null;
|
|
||||||
if (context.progressTracking?.systemProgressEndpoint) {
|
|
||||||
systemProgressConfig = {
|
|
||||||
endpoint: context.progressTracking.systemProgressEndpoint,
|
|
||||||
headers: context.progressTracking.headers,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle authentication - fetch OAuth token if needed
|
|
||||||
const anthropicApiKey = process.env.ANTHROPIC_API_KEY;
|
|
||||||
const claudeCodeOAuthToken = process.env.CLAUDE_CODE_OAUTH_TOKEN;
|
|
||||||
|
|
||||||
if (!anthropicApiKey && !claudeCodeOAuthToken) {
|
|
||||||
const oauthTokenEndpoint = context.progressTracking?.oauthTokenEndpoint;
|
|
||||||
|
|
||||||
if (oauthTokenEndpoint) {
|
|
||||||
console.log(
|
|
||||||
"No API key or OAuth token found, fetching OAuth token from endpoint",
|
|
||||||
);
|
|
||||||
try {
|
|
||||||
const fetchedToken = await fetchClaudeCodeOAuthToken(
|
|
||||||
oauthTokenEndpoint,
|
|
||||||
oidcToken,
|
|
||||||
context.progressTracking?.sessionId,
|
|
||||||
);
|
|
||||||
core.setOutput("claude_code_oauth_token", fetchedToken);
|
|
||||||
console.log(
|
|
||||||
"Successfully fetched and set OAuth token for Claude Code",
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
console.error("Failed to fetch OAuth token:", error);
|
|
||||||
throw new Error(
|
|
||||||
`Authentication failed: No API key or OAuth token available, and OAuth token fetching failed: ${error}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new Error(
|
|
||||||
"No authentication available: Missing ANTHROPIC_API_KEY, CLAUDE_CODE_OAUTH_TOKEN, and no OAuth token endpoint provided",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
console.log("Using existing authentication (API key or OAuth token)");
|
|
||||||
}
|
|
||||||
|
|
||||||
const taskDescription =
|
|
||||||
clientPayload.prompt ||
|
|
||||||
context.inputs.directPrompt ||
|
|
||||||
"No task description provided";
|
|
||||||
|
|
||||||
// Setup branch for work isolation with resume support
|
|
||||||
let branchInfo;
|
|
||||||
try {
|
|
||||||
branchInfo = await setupBranchWithResume(octokit, context, oidcToken);
|
|
||||||
} catch (error) {
|
|
||||||
// Report failure if we have system progress config
|
|
||||||
if (systemProgressConfig) {
|
|
||||||
reportWorkflowFailed(
|
|
||||||
systemProgressConfig,
|
|
||||||
oidcToken,
|
|
||||||
"initialization",
|
|
||||||
error as Error,
|
|
||||||
"branch_setup_failed",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
throw error;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remote agent mode always uses commit signing for security
|
|
||||||
// No git authentication configuration needed as we use GitHub API
|
|
||||||
|
|
||||||
// Handle resume messages if they exist
|
|
||||||
if (branchInfo.resumeMessages && branchInfo.resumeMessages.length > 0) {
|
|
||||||
console.log(
|
|
||||||
`Resumed session with ${branchInfo.resumeMessages.length} previous messages`,
|
|
||||||
);
|
|
||||||
// Store resume messages for later use
|
|
||||||
// These will be prepended to the conversation when Claude starts
|
|
||||||
core.setOutput(
|
|
||||||
"resume_messages",
|
|
||||||
JSON.stringify(branchInfo.resumeMessages),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Report workflow initialized
|
|
||||||
if (systemProgressConfig) {
|
|
||||||
reportWorkflowInitialized(
|
|
||||||
systemProgressConfig,
|
|
||||||
oidcToken,
|
|
||||||
branchInfo.claudeBranch || branchInfo.currentBranch,
|
|
||||||
branchInfo.baseBranch,
|
|
||||||
context.progressTracking?.sessionId,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create prompt directory
|
|
||||||
await mkdir(`${process.env.RUNNER_TEMP}/claude-prompts`, {
|
|
||||||
recursive: true,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Fetch trigger user display name from context.actor
|
|
||||||
let triggerDisplayName: string | null | undefined;
|
|
||||||
if (context.actor) {
|
|
||||||
try {
|
|
||||||
const octokits = createOctokit(githubToken);
|
|
||||||
triggerDisplayName = await fetchUserDisplayName(
|
|
||||||
octokits,
|
|
||||||
context.actor,
|
|
||||||
);
|
|
||||||
} catch (error) {
|
|
||||||
console.warn(
|
|
||||||
`Failed to fetch user display name for ${context.actor}:`,
|
|
||||||
error,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate dispatch-specific prompt (just the task description)
|
|
||||||
const promptContent = generateDispatchPrompt(taskDescription);
|
|
||||||
|
|
||||||
console.log("Writing prompt file...");
|
|
||||||
console.log("Contents: ", promptContent);
|
|
||||||
// Write the prompt file
|
|
||||||
await writeFile(
|
|
||||||
`${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`,
|
|
||||||
promptContent,
|
|
||||||
);
|
|
||||||
console.log(
|
|
||||||
`Prompt file written successfully to ${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`,
|
|
||||||
);
|
|
||||||
|
|
||||||
// Set stream configuration for repository_dispatch events
|
|
||||||
if (context.progressTracking) {
|
|
||||||
const streamConfig: StreamConfig = {};
|
|
||||||
|
|
||||||
if (context.progressTracking.resumeEndpoint) {
|
|
||||||
streamConfig.resume_endpoint = context.progressTracking.resumeEndpoint;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (context.progressTracking.sessionId) {
|
|
||||||
streamConfig.session_id = context.progressTracking.sessionId;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (context.progressTracking.progressEndpoint) {
|
|
||||||
streamConfig.progress_endpoint =
|
|
||||||
context.progressTracking.progressEndpoint;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (context.progressTracking.systemProgressEndpoint) {
|
|
||||||
streamConfig.system_progress_endpoint =
|
|
||||||
context.progressTracking.systemProgressEndpoint;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge provided headers with OIDC token
|
|
||||||
const headers: Record<string, string> = {
|
|
||||||
...(context.progressTracking.headers || {}),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Use existing OIDC token for streaming
|
|
||||||
headers["Authorization"] = `Bearer ${oidcToken}`;
|
|
||||||
|
|
||||||
if (Object.keys(headers).length > 0) {
|
|
||||||
streamConfig.headers = headers;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log("Setting stream config:", streamConfig);
|
|
||||||
core.setOutput("stream_config", JSON.stringify(streamConfig));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export tool environment variables for remote agent mode
|
|
||||||
// Check if we have actions:read permission for CI tools
|
|
||||||
const hasActionsReadPermission =
|
|
||||||
context.inputs.additionalPermissions.get("actions") === "read";
|
|
||||||
|
|
||||||
const allowedToolsString = buildRemoteAgentAllowedToolsString(
|
|
||||||
context.inputs.allowedTools,
|
|
||||||
hasActionsReadPermission,
|
|
||||||
);
|
|
||||||
const disallowedToolsString = buildDisallowedToolsString(
|
|
||||||
context.inputs.disallowedTools,
|
|
||||||
);
|
|
||||||
|
|
||||||
core.exportVariable("ALLOWED_TOOLS", allowedToolsString);
|
|
||||||
core.exportVariable("DISALLOWED_TOOLS", disallowedToolsString);
|
|
||||||
|
|
||||||
// Handle model override from repository_dispatch payload
|
|
||||||
if (clientPayload.overrideInputs?.model) {
|
|
||||||
core.setOutput("anthropic_model", clientPayload.overrideInputs.model);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get minimal MCP configuration for remote agent mode
|
|
||||||
const additionalMcpConfig = process.env.MCP_CONFIG || "";
|
|
||||||
const mcpConfig = await prepareMcpConfig({
|
|
||||||
githubToken,
|
|
||||||
owner: context.repository.owner,
|
|
||||||
repo: context.repository.repo,
|
|
||||||
branch: branchInfo.claudeBranch || branchInfo.currentBranch,
|
|
||||||
baseBranch: branchInfo.baseBranch,
|
|
||||||
additionalMcpConfig,
|
|
||||||
claudeCommentId: "", // No comment ID for remote agent mode
|
|
||||||
allowedTools: context.inputs.allowedTools,
|
|
||||||
context,
|
|
||||||
});
|
|
||||||
|
|
||||||
core.setOutput("mcp_config", mcpConfig);
|
|
||||||
|
|
||||||
// Report Claude is starting
|
|
||||||
if (systemProgressConfig) {
|
|
||||||
reportClaudeStarting(systemProgressConfig, oidcToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Track Claude start time for duration calculation
|
|
||||||
core.setOutput("claude_start_time", Date.now().toString());
|
|
||||||
|
|
||||||
// Export system prompt for remote agent mode
|
|
||||||
const systemPrompt = generateDispatchSystemPrompt(
|
|
||||||
context,
|
|
||||||
branchInfo.baseBranch,
|
|
||||||
branchInfo.claudeBranch,
|
|
||||||
context.actor,
|
|
||||||
triggerDisplayName,
|
|
||||||
);
|
|
||||||
core.exportVariable("APPEND_SYSTEM_PROMPT", systemPrompt);
|
|
||||||
|
|
||||||
return {
|
|
||||||
commentId: undefined, // No comment tracking for remote agent mode
|
|
||||||
branchInfo,
|
|
||||||
mcpConfig,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
|
|
||||||
generatePrompt(context: PreparedContext): string {
|
|
||||||
// TODO: update this to generate a more meaningful prompt
|
|
||||||
return `Repository: ${context.repository}`;
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generates a task-focused prompt for repository_dispatch events
|
|
||||||
*/
|
|
||||||
function generateDispatchPrompt(taskDescription: string): string {
|
|
||||||
return taskDescription;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Generates the system prompt portion for repository_dispatch events
|
|
||||||
*/
|
|
||||||
function generateDispatchSystemPrompt(
|
|
||||||
context: GitHubContext,
|
|
||||||
baseBranch: string,
|
|
||||||
claudeBranch: string | undefined,
|
|
||||||
triggerUsername?: string,
|
|
||||||
triggerDisplayName?: string | null,
|
|
||||||
): string {
|
|
||||||
const { repository } = context;
|
|
||||||
|
|
||||||
const coAuthorLine =
|
|
||||||
triggerUsername && (triggerDisplayName || triggerUsername !== "Unknown")
|
|
||||||
? `Co-authored-by: ${triggerDisplayName ?? triggerUsername} <${triggerUsername}@users.noreply.github.com>`
|
|
||||||
: "";
|
|
||||||
|
|
||||||
// Remote agent mode always uses MCP for commit signing
|
|
||||||
let commitInstructions = `- Use mcp__github_file_ops__commit_files and mcp__github_file_ops__delete_files to commit and push changes`;
|
|
||||||
if (coAuthorLine) {
|
|
||||||
commitInstructions += `
|
|
||||||
- When pushing changes, include a Co-authored-by trailer in the commit message
|
|
||||||
- Use: "${coAuthorLine}"`;
|
|
||||||
}
|
|
||||||
|
|
||||||
return `You are Claude, an AI assistant designed to help with GitHub issues and pull requests. Think carefully as you analyze the context and respond appropriately. Here's the context for your current task:
|
|
||||||
|
|
||||||
Your task is to complete the request described in the task description.
|
|
||||||
|
|
||||||
Instructions:
|
|
||||||
1. For questions: Research the codebase and provide a detailed answer
|
|
||||||
2. For implementations: Make the requested changes, commit, and push
|
|
||||||
|
|
||||||
Key points:
|
|
||||||
- You're already on a new branch - NEVER create another branch (this is very important). ${claudeBranch} is the ONLY branch you should work on.
|
|
||||||
${commitInstructions}
|
|
||||||
${
|
|
||||||
claudeBranch
|
|
||||||
? `- After completing your work, provide a URL to create a PR in this format:
|
|
||||||
|
|
||||||
${GITHUB_SERVER_URL}/${repository.owner}/${repository.repo}/compare/${baseBranch}...${claudeBranch}?quick_pull=1`
|
|
||||||
: ""
|
|
||||||
}`;
|
|
||||||
}
|
|
||||||
@@ -1,78 +0,0 @@
|
|||||||
/**
|
|
||||||
* System progress tracking types for remote agent mode
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Base event structure
|
|
||||||
*/
|
|
||||||
type BaseProgressEvent = {
|
|
||||||
timestamp: string; // ISO 8601
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Workflow initializing event
|
|
||||||
*/
|
|
||||||
export type WorkflowInitializingEvent = BaseProgressEvent & {
|
|
||||||
event_type: "workflow_initializing";
|
|
||||||
data: {
|
|
||||||
branch: string;
|
|
||||||
base_branch: string;
|
|
||||||
session_id?: string;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Claude starting event
|
|
||||||
*/
|
|
||||||
export type ClaudeStartingEvent = BaseProgressEvent & {
|
|
||||||
event_type: "claude_starting";
|
|
||||||
data: Record<string, never>; // No data needed
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Claude complete event
|
|
||||||
*/
|
|
||||||
export type ClaudeCompleteEvent = BaseProgressEvent & {
|
|
||||||
event_type: "claude_complete";
|
|
||||||
data: {
|
|
||||||
exit_code: number;
|
|
||||||
duration_ms: number;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Workflow failed event
|
|
||||||
*/
|
|
||||||
export type WorkflowFailedEvent = BaseProgressEvent & {
|
|
||||||
event_type: "workflow_failed";
|
|
||||||
data: {
|
|
||||||
error: {
|
|
||||||
phase: "initialization" | "claude_execution";
|
|
||||||
message: string;
|
|
||||||
code: string;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Discriminated union of all progress events
|
|
||||||
*/
|
|
||||||
export type ProgressEvent =
|
|
||||||
| WorkflowInitializingEvent
|
|
||||||
| ClaudeStartingEvent
|
|
||||||
| ClaudeCompleteEvent
|
|
||||||
| WorkflowFailedEvent;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Payload sent to the system progress endpoint
|
|
||||||
*/
|
|
||||||
export type SystemProgressPayload = ProgressEvent;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Configuration for system progress reporting
|
|
||||||
*/
|
|
||||||
export type SystemProgressConfig = {
|
|
||||||
endpoint: string;
|
|
||||||
headers?: Record<string, string>;
|
|
||||||
timeout_ms?: number; // Default: 5000
|
|
||||||
};
|
|
||||||
@@ -1,149 +0,0 @@
|
|||||||
import * as core from "@actions/core";
|
|
||||||
import type {
|
|
||||||
ProgressEvent,
|
|
||||||
SystemProgressPayload,
|
|
||||||
SystemProgressConfig,
|
|
||||||
WorkflowInitializingEvent,
|
|
||||||
ClaudeStartingEvent,
|
|
||||||
ClaudeCompleteEvent,
|
|
||||||
WorkflowFailedEvent,
|
|
||||||
} from "./progress-types";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Send a progress event to the system progress endpoint (fire-and-forget)
|
|
||||||
*/
|
|
||||||
function sendProgressEvent(
|
|
||||||
event: ProgressEvent,
|
|
||||||
config: SystemProgressConfig,
|
|
||||||
oidcToken: string,
|
|
||||||
): void {
|
|
||||||
const payload: SystemProgressPayload = event;
|
|
||||||
|
|
||||||
console.log(
|
|
||||||
`Sending system progress event: ${event.event_type}`,
|
|
||||||
JSON.stringify(payload, null, 2),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Fire and forget - don't await
|
|
||||||
Promise.resolve().then(async () => {
|
|
||||||
try {
|
|
||||||
// Create an AbortController for timeout
|
|
||||||
const controller = new AbortController();
|
|
||||||
const timeoutId = setTimeout(
|
|
||||||
() => controller.abort(),
|
|
||||||
config.timeout_ms || 5000,
|
|
||||||
);
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await fetch(config.endpoint, {
|
|
||||||
method: "POST",
|
|
||||||
headers: {
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
Authorization: `Bearer ${oidcToken}`,
|
|
||||||
...config.headers,
|
|
||||||
},
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
signal: controller.signal,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
console.error(
|
|
||||||
`System progress endpoint returned ${response.status}: ${response.statusText}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
clearTimeout(timeoutId);
|
|
||||||
}
|
|
||||||
} catch (error) {
|
|
||||||
// Log but don't throw - we don't want progress reporting to interrupt the workflow
|
|
||||||
core.warning(`Failed to send system progress event: ${error}`);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Report workflow initialization complete
|
|
||||||
*/
|
|
||||||
export function reportWorkflowInitialized(
|
|
||||||
config: SystemProgressConfig,
|
|
||||||
oidcToken: string,
|
|
||||||
branch: string,
|
|
||||||
baseBranch: string,
|
|
||||||
sessionId?: string,
|
|
||||||
): void {
|
|
||||||
const event: WorkflowInitializingEvent = {
|
|
||||||
timestamp: new Date().toISOString(),
|
|
||||||
event_type: "workflow_initializing",
|
|
||||||
data: {
|
|
||||||
branch,
|
|
||||||
base_branch: baseBranch,
|
|
||||||
...(sessionId && { session_id: sessionId }),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
sendProgressEvent(event, config, oidcToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Report Claude is starting
|
|
||||||
*/
|
|
||||||
export function reportClaudeStarting(
|
|
||||||
config: SystemProgressConfig,
|
|
||||||
oidcToken: string,
|
|
||||||
): void {
|
|
||||||
const event: ClaudeStartingEvent = {
|
|
||||||
timestamp: new Date().toISOString(),
|
|
||||||
event_type: "claude_starting",
|
|
||||||
data: {},
|
|
||||||
};
|
|
||||||
|
|
||||||
sendProgressEvent(event, config, oidcToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Report Claude completed
|
|
||||||
*/
|
|
||||||
export function reportClaudeComplete(
|
|
||||||
config: SystemProgressConfig,
|
|
||||||
oidcToken: string,
|
|
||||||
exitCode: number,
|
|
||||||
durationMs: number,
|
|
||||||
): void {
|
|
||||||
const event: ClaudeCompleteEvent = {
|
|
||||||
timestamp: new Date().toISOString(),
|
|
||||||
event_type: "claude_complete",
|
|
||||||
data: {
|
|
||||||
exit_code: exitCode,
|
|
||||||
duration_ms: durationMs,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
sendProgressEvent(event, config, oidcToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Report workflow failed
|
|
||||||
*/
|
|
||||||
export function reportWorkflowFailed(
|
|
||||||
config: SystemProgressConfig,
|
|
||||||
oidcToken: string,
|
|
||||||
phase: "initialization" | "claude_execution",
|
|
||||||
error: Error | string,
|
|
||||||
code: string,
|
|
||||||
): void {
|
|
||||||
const errorMessage = error instanceof Error ? error.message : error;
|
|
||||||
|
|
||||||
const event: WorkflowFailedEvent = {
|
|
||||||
timestamp: new Date().toISOString(),
|
|
||||||
event_type: "workflow_failed",
|
|
||||||
data: {
|
|
||||||
error: {
|
|
||||||
phase,
|
|
||||||
message: errorMessage,
|
|
||||||
code,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
sendProgressEvent(event, config, oidcToken);
|
|
||||||
}
|
|
||||||
@@ -60,20 +60,8 @@ export const reviewMode: Mode = {
|
|||||||
|
|
||||||
getAllowedTools() {
|
getAllowedTools() {
|
||||||
return [
|
return [
|
||||||
// Context tools - to know who the current user is
|
"Bash(gh issue comment:*)",
|
||||||
"mcp__github__get_me",
|
"mcp__github_inline_comment__create_inline_comment",
|
||||||
// Core review tools
|
|
||||||
"mcp__github__create_pending_pull_request_review",
|
|
||||||
"mcp__github__add_comment_to_pending_review",
|
|
||||||
"mcp__github__submit_pending_pull_request_review",
|
|
||||||
"mcp__github__delete_pending_pull_request_review",
|
|
||||||
"mcp__github__create_and_submit_pull_request_review",
|
|
||||||
// Comment tools
|
|
||||||
"mcp__github__add_issue_comment",
|
|
||||||
// PR information tools
|
|
||||||
"mcp__github__get_pull_request",
|
|
||||||
"mcp__github__get_pull_request_reviews",
|
|
||||||
"mcp__github__get_pull_request_status",
|
|
||||||
];
|
];
|
||||||
},
|
},
|
||||||
|
|
||||||
@@ -163,17 +151,13 @@ REVIEW MODE WORKFLOW:
|
|||||||
|
|
||||||
1. First, understand the PR context:
|
1. First, understand the PR context:
|
||||||
- You are reviewing PR #${eventData.isPR && eventData.prNumber ? eventData.prNumber : "[PR number]"} in ${context.repository}
|
- You are reviewing PR #${eventData.isPR && eventData.prNumber ? eventData.prNumber : "[PR number]"} in ${context.repository}
|
||||||
- Use mcp__github__get_pull_request to get PR metadata
|
|
||||||
- Use the Read, Grep, and Glob tools to examine the modified files directly from disk
|
- Use the Read, Grep, and Glob tools to examine the modified files directly from disk
|
||||||
- This provides the full context and latest state of the code
|
- This provides the full context and latest state of the code
|
||||||
- Look at the changed_files section above to see which files were modified
|
- Look at the changed_files section above to see which files were modified
|
||||||
|
|
||||||
2. Create a pending review:
|
2. Add comments:
|
||||||
- Use mcp__github__create_pending_pull_request_review to start your review
|
- use Bash(gh issue comment:*) to add top-level comments
|
||||||
- This allows you to batch comments before submitting
|
- Use mcp__github_inline_comment__create_inline_comment to add inline comments (prefer this where possible)
|
||||||
|
|
||||||
3. Add inline comments:
|
|
||||||
- Use mcp__github__add_comment_to_pending_review for each issue or suggestion
|
|
||||||
- Parameters:
|
- Parameters:
|
||||||
* path: The file path (e.g., "src/index.js")
|
* path: The file path (e.g., "src/index.js")
|
||||||
* line: Line number for single-line comments
|
* line: Line number for single-line comments
|
||||||
@@ -182,49 +166,6 @@ REVIEW MODE WORKFLOW:
|
|||||||
* subjectType: "line" for line-level comments
|
* subjectType: "line" for line-level comments
|
||||||
* body: Your comment text
|
* body: Your comment text
|
||||||
|
|
||||||
- When to use multi-line comments:
|
|
||||||
* When replacing multiple consecutive lines
|
|
||||||
* When the fix requires changes across several lines
|
|
||||||
* Example: To replace lines 19-20, use startLine: 19, line: 20
|
|
||||||
|
|
||||||
- For code suggestions, use this EXACT format in the body:
|
|
||||||
\`\`\`suggestion
|
|
||||||
corrected code here
|
|
||||||
\`\`\`
|
|
||||||
|
|
||||||
CRITICAL: GitHub suggestion blocks must ONLY contain the replacement for the specific line(s) being commented on:
|
|
||||||
- For single-line comments: Replace ONLY that line
|
|
||||||
- For multi-line comments: Replace ONLY the lines in the range
|
|
||||||
- Do NOT include surrounding context or function signatures
|
|
||||||
- Do NOT suggest changes that span beyond the commented lines
|
|
||||||
|
|
||||||
Example for line 19 \`var name = user.name;\`:
|
|
||||||
WRONG:
|
|
||||||
\\\`\\\`\\\`suggestion
|
|
||||||
function processUser(user) {
|
|
||||||
if (!user) throw new Error('Invalid user');
|
|
||||||
const name = user.name;
|
|
||||||
\\\`\\\`\\\`
|
|
||||||
|
|
||||||
CORRECT:
|
|
||||||
\\\`\\\`\\\`suggestion
|
|
||||||
const name = user.name;
|
|
||||||
\\\`\\\`\\\`
|
|
||||||
|
|
||||||
For validation suggestions, comment on the function declaration line or create separate comments for each concern.
|
|
||||||
|
|
||||||
4. Submit your review:
|
|
||||||
- Use mcp__github__submit_pending_pull_request_review
|
|
||||||
- Parameters:
|
|
||||||
* event: "COMMENT" (general feedback), "REQUEST_CHANGES" (issues found), or "APPROVE" (if appropriate)
|
|
||||||
* body: Write a comprehensive review summary that includes:
|
|
||||||
- Overview of what was reviewed (files, scope, focus areas)
|
|
||||||
- Summary of all issues found (with counts by severity if applicable)
|
|
||||||
- Key recommendations and action items
|
|
||||||
- Highlights of good practices observed
|
|
||||||
- Overall assessment and recommendation
|
|
||||||
- The body should be detailed and informative since it's the main review content
|
|
||||||
- Structure the body with clear sections using markdown headers
|
|
||||||
|
|
||||||
REVIEW GUIDELINES:
|
REVIEW GUIDELINES:
|
||||||
|
|
||||||
@@ -301,6 +242,7 @@ This ensures users get value from the review even before checking individual inl
|
|||||||
claudeBranch: branchInfo.claudeBranch,
|
claudeBranch: branchInfo.claudeBranch,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// TODO: Capture and return the allowed/disallowed tools from createPrompt
|
||||||
await createPrompt(reviewMode, modeContext, githubData, context);
|
await createPrompt(reviewMode, modeContext, githubData, context);
|
||||||
|
|
||||||
// Export tool environment variables for review mode
|
// Export tool environment variables for review mode
|
||||||
|
|||||||
@@ -98,6 +98,7 @@ export const tagMode: Mode = {
|
|||||||
claudeBranch: branchInfo.claudeBranch,
|
claudeBranch: branchInfo.claudeBranch,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// TODO: Capture and return the allowed/disallowed tools from createPrompt
|
||||||
await createPrompt(tagMode, modeContext, githubData, context);
|
await createPrompt(tagMode, modeContext, githubData, context);
|
||||||
|
|
||||||
// Get MCP configuration
|
// Get MCP configuration
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import type { PreparedContext } from "../create-prompt/types";
|
|||||||
import type { FetchDataResult } from "../github/data/fetcher";
|
import type { FetchDataResult } from "../github/data/fetcher";
|
||||||
import type { Octokits } from "../github/api/client";
|
import type { Octokits } from "../github/api/client";
|
||||||
|
|
||||||
export type ModeName = "tag" | "agent" | "remote-agent" | "experimental-review";
|
export type ModeName = "tag" | "agent" | "experimental-review";
|
||||||
|
|
||||||
export type ModeContext = {
|
export type ModeContext = {
|
||||||
mode: ModeName;
|
mode: ModeName;
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ export type PrepareResult = {
|
|||||||
currentBranch: string;
|
currentBranch: string;
|
||||||
};
|
};
|
||||||
mcpConfig: string;
|
mcpConfig: string;
|
||||||
|
// TODO: Add allowedTools and disallowedTools here once modes are updated
|
||||||
};
|
};
|
||||||
|
|
||||||
export type PrepareOptions = {
|
export type PrepareOptions = {
|
||||||
|
|||||||
@@ -1,29 +0,0 @@
|
|||||||
/**
|
|
||||||
* Types for resume endpoint functionality
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Message structure from the resume endpoint
|
|
||||||
* This matches the structure used in Claude CLI's teleport feature
|
|
||||||
*/
|
|
||||||
export type ResumeMessage = {
|
|
||||||
role: "user" | "assistant" | "system";
|
|
||||||
content: string | Array<{ type: string; text?: string; [key: string]: any }>;
|
|
||||||
[key: string]: any;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Response structure from the resume endpoint
|
|
||||||
*/
|
|
||||||
export type ResumeResponse = {
|
|
||||||
log: ResumeMessage[];
|
|
||||||
branch?: string;
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Result after processing resume endpoint
|
|
||||||
*/
|
|
||||||
export type ResumeResult = {
|
|
||||||
messages: ResumeMessage[];
|
|
||||||
branchName: string;
|
|
||||||
};
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
/**
|
|
||||||
* Configuration for streaming and progress tracking
|
|
||||||
*/
|
|
||||||
export type StreamConfig = {
|
|
||||||
/** Endpoint for streaming Claude execution progress */
|
|
||||||
progress_endpoint?: string;
|
|
||||||
|
|
||||||
/** Endpoint for system-level progress reporting (workflow lifecycle events) */
|
|
||||||
system_progress_endpoint?: string;
|
|
||||||
|
|
||||||
/** Resume endpoint for teleport functionality */
|
|
||||||
resume_endpoint?: string;
|
|
||||||
|
|
||||||
/** Session ID for tracking */
|
|
||||||
session_id?: string;
|
|
||||||
|
|
||||||
/** Headers to include with streaming requests (includes Authorization) */
|
|
||||||
headers?: Record<string, string>;
|
|
||||||
};
|
|
||||||
@@ -7,7 +7,6 @@ import {
|
|||||||
getEventTypeAndContext,
|
getEventTypeAndContext,
|
||||||
buildAllowedToolsString,
|
buildAllowedToolsString,
|
||||||
buildDisallowedToolsString,
|
buildDisallowedToolsString,
|
||||||
buildRemoteAgentAllowedToolsString,
|
|
||||||
} from "../src/create-prompt";
|
} from "../src/create-prompt";
|
||||||
import type { PreparedContext } from "../src/create-prompt";
|
import type { PreparedContext } from "../src/create-prompt";
|
||||||
import type { Mode } from "../src/modes/types";
|
import type { Mode } from "../src/modes/types";
|
||||||
@@ -1042,8 +1041,6 @@ describe("buildAllowedToolsString", () => {
|
|||||||
expect(result).toContain("Bash(git diff:*)");
|
expect(result).toContain("Bash(git diff:*)");
|
||||||
expect(result).toContain("Bash(git log:*)");
|
expect(result).toContain("Bash(git log:*)");
|
||||||
expect(result).toContain("Bash(git rm:*)");
|
expect(result).toContain("Bash(git rm:*)");
|
||||||
expect(result).toContain("Bash(git config user.name:*)");
|
|
||||||
expect(result).toContain("Bash(git config user.email:*)");
|
|
||||||
|
|
||||||
// Comment tool from minimal server should be included
|
// Comment tool from minimal server should be included
|
||||||
expect(result).toContain("mcp__github_comment__update_claude_comment");
|
expect(result).toContain("mcp__github_comment__update_claude_comment");
|
||||||
@@ -1150,117 +1147,3 @@ describe("buildDisallowedToolsString", () => {
|
|||||||
expect(result).toBe("BadTool1,BadTool2");
|
expect(result).toBe("BadTool1,BadTool2");
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("buildRemoteAgentAllowedToolsString", () => {
|
|
||||||
test("should return correct tools for remote agent mode (always uses commit signing)", () => {
|
|
||||||
const result = buildRemoteAgentAllowedToolsString();
|
|
||||||
|
|
||||||
// Base tools should be present
|
|
||||||
expect(result).toContain("Edit");
|
|
||||||
expect(result).toContain("Glob");
|
|
||||||
expect(result).toContain("Grep");
|
|
||||||
expect(result).toContain("LS");
|
|
||||||
expect(result).toContain("Read");
|
|
||||||
expect(result).toContain("Write");
|
|
||||||
|
|
||||||
// Comment tool should always be included
|
|
||||||
expect(result).toContain("mcp__github_comment__update_claude_comment");
|
|
||||||
|
|
||||||
// MCP commit signing tools should always be included
|
|
||||||
expect(result).toContain("mcp__github_file_ops__commit_files");
|
|
||||||
expect(result).toContain("mcp__github_file_ops__delete_files");
|
|
||||||
|
|
||||||
// Safe git tools should be included
|
|
||||||
expect(result).toContain("Bash(git status:*)");
|
|
||||||
expect(result).toContain("Bash(git diff:*)");
|
|
||||||
expect(result).toContain("Bash(git log:*)");
|
|
||||||
|
|
||||||
// Dangerous git tools should NOT be included
|
|
||||||
expect(result).not.toContain("Bash(git commit:*)");
|
|
||||||
expect(result).not.toContain("Bash(git add:*)");
|
|
||||||
expect(result).not.toContain("Bash(git push:*)");
|
|
||||||
expect(result).not.toContain("Bash(git config");
|
|
||||||
expect(result).not.toContain("Bash(git rm:*)");
|
|
||||||
});
|
|
||||||
|
|
||||||
test("should include custom tools when provided", () => {
|
|
||||||
const customTools = ["CustomTool1", "CustomTool2"];
|
|
||||||
const result = buildRemoteAgentAllowedToolsString(customTools);
|
|
||||||
|
|
||||||
// Base tools should be present
|
|
||||||
expect(result).toContain("Edit");
|
|
||||||
expect(result).toContain("Glob");
|
|
||||||
|
|
||||||
// Custom tools should be included
|
|
||||||
expect(result).toContain("CustomTool1");
|
|
||||||
expect(result).toContain("CustomTool2");
|
|
||||||
|
|
||||||
// MCP commit signing tools should still be included
|
|
||||||
expect(result).toContain("mcp__github_file_ops__commit_files");
|
|
||||||
expect(result).toContain("mcp__github_file_ops__delete_files");
|
|
||||||
|
|
||||||
// Dangerous git tools should still NOT be included
|
|
||||||
expect(result).not.toContain("Bash(git commit:*)");
|
|
||||||
expect(result).not.toContain("Bash(git config");
|
|
||||||
});
|
|
||||||
|
|
||||||
test("should include GitHub Actions tools when includeActionsTools is true", () => {
|
|
||||||
const result = buildRemoteAgentAllowedToolsString([], true);
|
|
||||||
|
|
||||||
// Base tools should be present
|
|
||||||
expect(result).toContain("Edit");
|
|
||||||
expect(result).toContain("Glob");
|
|
||||||
|
|
||||||
// GitHub Actions tools should be included
|
|
||||||
expect(result).toContain("mcp__github_ci__get_ci_status");
|
|
||||||
expect(result).toContain("mcp__github_ci__get_workflow_run_details");
|
|
||||||
expect(result).toContain("mcp__github_ci__download_job_log");
|
|
||||||
|
|
||||||
// MCP commit signing tools should still be included
|
|
||||||
expect(result).toContain("mcp__github_file_ops__commit_files");
|
|
||||||
expect(result).toContain("mcp__github_file_ops__delete_files");
|
|
||||||
|
|
||||||
// Dangerous git tools should still NOT be included
|
|
||||||
expect(result).not.toContain("Bash(git commit:*)");
|
|
||||||
expect(result).not.toContain("Bash(git config");
|
|
||||||
});
|
|
||||||
|
|
||||||
test("should include both custom and Actions tools when both provided", () => {
|
|
||||||
const customTools = ["CustomTool1"];
|
|
||||||
const result = buildRemoteAgentAllowedToolsString(customTools, true);
|
|
||||||
|
|
||||||
// Base tools should be present
|
|
||||||
expect(result).toContain("Edit");
|
|
||||||
|
|
||||||
// Custom tools should be included
|
|
||||||
expect(result).toContain("CustomTool1");
|
|
||||||
|
|
||||||
// GitHub Actions tools should be included
|
|
||||||
expect(result).toContain("mcp__github_ci__get_ci_status");
|
|
||||||
|
|
||||||
// MCP commit signing tools should still be included
|
|
||||||
expect(result).toContain("mcp__github_file_ops__commit_files");
|
|
||||||
|
|
||||||
// Dangerous git tools should still NOT be included
|
|
||||||
expect(result).not.toContain("Bash(git commit:*)");
|
|
||||||
expect(result).not.toContain("Bash(git config");
|
|
||||||
});
|
|
||||||
|
|
||||||
test("should never include dangerous git tools regardless of parameters", () => {
|
|
||||||
const dangerousCustomTools = ["Bash(git commit:*)", "Bash(git config:*)"];
|
|
||||||
const result = buildRemoteAgentAllowedToolsString(
|
|
||||||
dangerousCustomTools,
|
|
||||||
true,
|
|
||||||
);
|
|
||||||
|
|
||||||
// The function should still include dangerous tools if explicitly provided in custom tools
|
|
||||||
// This is by design - if someone explicitly adds them, they should be included
|
|
||||||
expect(result).toContain("Bash(git commit:*)");
|
|
||||||
expect(result).toContain("Bash(git config:*)");
|
|
||||||
|
|
||||||
// But the base function should not add them automatically
|
|
||||||
const resultWithoutCustom = buildRemoteAgentAllowedToolsString([], true);
|
|
||||||
expect(resultWithoutCustom).not.toContain("Bash(git commit:*)");
|
|
||||||
expect(resultWithoutCustom).not.toContain("Bash(git config");
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|||||||
@@ -662,4 +662,255 @@ describe("downloadCommentImages", () => {
|
|||||||
);
|
);
|
||||||
expect(result.get(imageUrl2)).toBeUndefined();
|
expect(result.get(imageUrl2)).toBeUndefined();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("should detect and download images from HTML img tags", async () => {
|
||||||
|
const mockOctokit = createMockOctokit();
|
||||||
|
const imageUrl =
|
||||||
|
"https://github.com/user-attachments/assets/html-image.png";
|
||||||
|
const signedUrl =
|
||||||
|
"https://private-user-images.githubusercontent.com/html.png?jwt=token";
|
||||||
|
|
||||||
|
// Mock octokit response
|
||||||
|
// @ts-expect-error Mock implementation doesn't match full type signature
|
||||||
|
mockOctokit.rest.issues.getComment = jest.fn().mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
body_html: `<img src="${signedUrl}">`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mock fetch for image download
|
||||||
|
const mockArrayBuffer = new ArrayBuffer(8);
|
||||||
|
fetchSpy = spyOn(global, "fetch").mockResolvedValue({
|
||||||
|
ok: true,
|
||||||
|
arrayBuffer: async () => mockArrayBuffer,
|
||||||
|
} as Response);
|
||||||
|
|
||||||
|
const comments: CommentWithImages[] = [
|
||||||
|
{
|
||||||
|
type: "issue_comment",
|
||||||
|
id: "777",
|
||||||
|
body: `Here's an HTML image: <img src="${imageUrl}" alt="test">`,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await downloadCommentImages(
|
||||||
|
mockOctokit,
|
||||||
|
"owner",
|
||||||
|
"repo",
|
||||||
|
comments,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(mockOctokit.rest.issues.getComment).toHaveBeenCalledWith({
|
||||||
|
owner: "owner",
|
||||||
|
repo: "repo",
|
||||||
|
comment_id: 777,
|
||||||
|
mediaType: { format: "full+json" },
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(fetchSpy).toHaveBeenCalledWith(signedUrl);
|
||||||
|
expect(fsWriteFileSpy).toHaveBeenCalledWith(
|
||||||
|
"/tmp/github-images/image-1704067200000-0.png",
|
||||||
|
Buffer.from(mockArrayBuffer),
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(result.size).toBe(1);
|
||||||
|
expect(result.get(imageUrl)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-0.png",
|
||||||
|
);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
"Found 1 image(s) in issue_comment 777",
|
||||||
|
);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(`Downloading ${imageUrl}...`);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
"✓ Saved: /tmp/github-images/image-1704067200000-0.png",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("should handle HTML img tags with different quote styles", async () => {
|
||||||
|
const mockOctokit = createMockOctokit();
|
||||||
|
const imageUrl1 =
|
||||||
|
"https://github.com/user-attachments/assets/single-quote.jpg";
|
||||||
|
const imageUrl2 =
|
||||||
|
"https://github.com/user-attachments/assets/double-quote.png";
|
||||||
|
const signedUrl1 =
|
||||||
|
"https://private-user-images.githubusercontent.com/single.jpg?jwt=token1";
|
||||||
|
const signedUrl2 =
|
||||||
|
"https://private-user-images.githubusercontent.com/double.png?jwt=token2";
|
||||||
|
|
||||||
|
// @ts-expect-error Mock implementation doesn't match full type signature
|
||||||
|
mockOctokit.rest.issues.getComment = jest.fn().mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
body_html: `<img src="${signedUrl1}"><img src="${signedUrl2}">`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
fetchSpy = spyOn(global, "fetch").mockResolvedValue({
|
||||||
|
ok: true,
|
||||||
|
arrayBuffer: async () => new ArrayBuffer(8),
|
||||||
|
} as Response);
|
||||||
|
|
||||||
|
const comments: CommentWithImages[] = [
|
||||||
|
{
|
||||||
|
type: "issue_comment",
|
||||||
|
id: "888",
|
||||||
|
body: `Single quote: <img src='${imageUrl1}' alt="test"> and double quote: <img src="${imageUrl2}" alt="test">`,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await downloadCommentImages(
|
||||||
|
mockOctokit,
|
||||||
|
"owner",
|
||||||
|
"repo",
|
||||||
|
comments,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(fetchSpy).toHaveBeenCalledTimes(2);
|
||||||
|
expect(result.size).toBe(2);
|
||||||
|
expect(result.get(imageUrl1)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-0.jpg",
|
||||||
|
);
|
||||||
|
expect(result.get(imageUrl2)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-1.png",
|
||||||
|
);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
"Found 2 image(s) in issue_comment 888",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("should handle mixed Markdown and HTML images", async () => {
|
||||||
|
const mockOctokit = createMockOctokit();
|
||||||
|
const markdownUrl =
|
||||||
|
"https://github.com/user-attachments/assets/markdown.png";
|
||||||
|
const htmlUrl = "https://github.com/user-attachments/assets/html.jpg";
|
||||||
|
const signedUrl1 =
|
||||||
|
"https://private-user-images.githubusercontent.com/md.png?jwt=token1";
|
||||||
|
const signedUrl2 =
|
||||||
|
"https://private-user-images.githubusercontent.com/html.jpg?jwt=token2";
|
||||||
|
|
||||||
|
// @ts-expect-error Mock implementation doesn't match full type signature
|
||||||
|
mockOctokit.rest.issues.getComment = jest.fn().mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
body_html: `<img src="${signedUrl1}"><img src="${signedUrl2}">`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
fetchSpy = spyOn(global, "fetch").mockResolvedValue({
|
||||||
|
ok: true,
|
||||||
|
arrayBuffer: async () => new ArrayBuffer(8),
|
||||||
|
} as Response);
|
||||||
|
|
||||||
|
const comments: CommentWithImages[] = [
|
||||||
|
{
|
||||||
|
type: "issue_comment",
|
||||||
|
id: "999",
|
||||||
|
body: `Markdown:  and HTML: <img src="${htmlUrl}" alt="test">`,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await downloadCommentImages(
|
||||||
|
mockOctokit,
|
||||||
|
"owner",
|
||||||
|
"repo",
|
||||||
|
comments,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(fetchSpy).toHaveBeenCalledTimes(2);
|
||||||
|
expect(result.size).toBe(2);
|
||||||
|
expect(result.get(markdownUrl)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-0.png",
|
||||||
|
);
|
||||||
|
expect(result.get(htmlUrl)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-1.jpg",
|
||||||
|
);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
"Found 2 image(s) in issue_comment 999",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("should deduplicate identical URLs from Markdown and HTML", async () => {
|
||||||
|
const mockOctokit = createMockOctokit();
|
||||||
|
const imageUrl = "https://github.com/user-attachments/assets/duplicate.png";
|
||||||
|
const signedUrl =
|
||||||
|
"https://private-user-images.githubusercontent.com/dup.png?jwt=token";
|
||||||
|
|
||||||
|
// @ts-expect-error Mock implementation doesn't match full type signature
|
||||||
|
mockOctokit.rest.issues.getComment = jest.fn().mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
body_html: `<img src="${signedUrl}">`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
fetchSpy = spyOn(global, "fetch").mockResolvedValue({
|
||||||
|
ok: true,
|
||||||
|
arrayBuffer: async () => new ArrayBuffer(8),
|
||||||
|
} as Response);
|
||||||
|
|
||||||
|
const comments: CommentWithImages[] = [
|
||||||
|
{
|
||||||
|
type: "issue_comment",
|
||||||
|
id: "1000",
|
||||||
|
body: `Same image twice:  and <img src="${imageUrl}" alt="test">`,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await downloadCommentImages(
|
||||||
|
mockOctokit,
|
||||||
|
"owner",
|
||||||
|
"repo",
|
||||||
|
comments,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(fetchSpy).toHaveBeenCalledTimes(1); // Only downloaded once
|
||||||
|
expect(result.size).toBe(1);
|
||||||
|
expect(result.get(imageUrl)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-0.png",
|
||||||
|
);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
"Found 1 image(s) in issue_comment 1000",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("should handle HTML img tags with additional attributes", async () => {
|
||||||
|
const mockOctokit = createMockOctokit();
|
||||||
|
const imageUrl =
|
||||||
|
"https://github.com/user-attachments/assets/complex-tag.webp";
|
||||||
|
const signedUrl =
|
||||||
|
"https://private-user-images.githubusercontent.com/complex.webp?jwt=token";
|
||||||
|
|
||||||
|
// @ts-expect-error Mock implementation doesn't match full type signature
|
||||||
|
mockOctokit.rest.issues.getComment = jest.fn().mockResolvedValue({
|
||||||
|
data: {
|
||||||
|
body_html: `<img src="${signedUrl}">`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
fetchSpy = spyOn(global, "fetch").mockResolvedValue({
|
||||||
|
ok: true,
|
||||||
|
arrayBuffer: async () => new ArrayBuffer(8),
|
||||||
|
} as Response);
|
||||||
|
|
||||||
|
const comments: CommentWithImages[] = [
|
||||||
|
{
|
||||||
|
type: "issue_comment",
|
||||||
|
id: "1001",
|
||||||
|
body: `Complex tag: <img class="image" src="${imageUrl}" alt="test image" width="100" height="200">`,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
const result = await downloadCommentImages(
|
||||||
|
mockOctokit,
|
||||||
|
"owner",
|
||||||
|
"repo",
|
||||||
|
comments,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(fetchSpy).toHaveBeenCalledTimes(1);
|
||||||
|
expect(result.size).toBe(1);
|
||||||
|
expect(result.get(imageUrl)).toBe(
|
||||||
|
"/tmp/github-images/image-1704067200000-0.webp",
|
||||||
|
);
|
||||||
|
expect(consoleLogSpy).toHaveBeenCalledWith(
|
||||||
|
"Found 1 image(s) in issue_comment 1001",
|
||||||
|
);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -39,13 +39,13 @@ describe("Mode Registry", () => {
|
|||||||
|
|
||||||
test("getMode throws error for tag mode with workflow_dispatch event", () => {
|
test("getMode throws error for tag mode with workflow_dispatch event", () => {
|
||||||
expect(() => getMode("tag", mockWorkflowDispatchContext)).toThrow(
|
expect(() => getMode("tag", mockWorkflowDispatchContext)).toThrow(
|
||||||
"Tag mode cannot handle workflow_dispatch events. Use 'agent' mode for automation events or 'remote-agent' mode for repository_dispatch events.",
|
"Tag mode cannot handle workflow_dispatch events. Use 'agent' mode for automation events.",
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
test("getMode throws error for tag mode with schedule event", () => {
|
test("getMode throws error for tag mode with schedule event", () => {
|
||||||
expect(() => getMode("tag", mockScheduleContext)).toThrow(
|
expect(() => getMode("tag", mockScheduleContext)).toThrow(
|
||||||
"Tag mode cannot handle schedule events. Use 'agent' mode for automation events or 'remote-agent' mode for repository_dispatch events.",
|
"Tag mode cannot handle schedule events. Use 'agent' mode for automation events.",
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ describe("Mode Registry", () => {
|
|||||||
test("getMode throws error for invalid mode", () => {
|
test("getMode throws error for invalid mode", () => {
|
||||||
const invalidMode = "invalid" as unknown as ModeName;
|
const invalidMode = "invalid" as unknown as ModeName;
|
||||||
expect(() => getMode(invalidMode, mockContext)).toThrow(
|
expect(() => getMode(invalidMode, mockContext)).toThrow(
|
||||||
"Invalid mode 'invalid'. Valid modes are: 'tag', 'agent', 'remote-agent', 'experimental-review'. Please check your workflow configuration.",
|
"Invalid mode 'invalid'. Valid modes are: 'tag', 'agent', 'experimental-review'. Please check your workflow configuration.",
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -72,7 +72,6 @@ describe("Mode Registry", () => {
|
|||||||
expect(isValidMode("tag")).toBe(true);
|
expect(isValidMode("tag")).toBe(true);
|
||||||
expect(isValidMode("agent")).toBe(true);
|
expect(isValidMode("agent")).toBe(true);
|
||||||
expect(isValidMode("experimental-review")).toBe(true);
|
expect(isValidMode("experimental-review")).toBe(true);
|
||||||
expect(isValidMode("remote-agent")).toBe(true);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
test("isValidMode returns false for invalid mode", () => {
|
test("isValidMode returns false for invalid mode", () => {
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
import { describe, test, expect } from "bun:test";
|
|
||||||
import type { StreamConfig } from "../src/types/stream-config";
|
|
||||||
|
|
||||||
describe("report-claude-complete", () => {
|
|
||||||
test("StreamConfig type should include system_progress_endpoint", () => {
|
|
||||||
const config: StreamConfig = {
|
|
||||||
progress_endpoint: "https://example.com/progress",
|
|
||||||
system_progress_endpoint: "https://example.com/system-progress",
|
|
||||||
resume_endpoint: "https://example.com/resume",
|
|
||||||
session_id: "test-session",
|
|
||||||
headers: {
|
|
||||||
Authorization: "Bearer test-token",
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
expect(config.system_progress_endpoint).toBe(
|
|
||||||
"https://example.com/system-progress",
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
test("StreamConfig type should allow optional fields", () => {
|
|
||||||
const config: StreamConfig = {};
|
|
||||||
|
|
||||||
expect(config.system_progress_endpoint).toBeUndefined();
|
|
||||||
expect(config.progress_endpoint).toBeUndefined();
|
|
||||||
expect(config.headers).toBeUndefined();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
Reference in New Issue
Block a user