Compare commits

...

19 Commits
v1.0.29 ... eap

Author SHA1 Message Date
ollie-anthropic
9278e59355 typecheck 2025-08-28 13:03:27 -07:00
ollie-anthropic
2ef669b4c0 format 2025-08-28 13:03:27 -07:00
ollie-anthropic
7bd5b28434 merge to eap 2025-08-28 13:03:27 -07:00
Ashwin Bhat
3fdfa8eea7 fix conflict 2025-08-21 20:20:39 -07:00
Ashwin Bhat
733e2f5302 Merge pull request #8 from anthropic-labs/ashwin/resumefix
feat: add resume endpoint support for remote-agent mode
2025-08-21 20:20:39 -07:00
Chris Lloyd
1e24c646ef feat: add pre-commit hook support to GitHub MCP commit tool
- Execute .git/hooks/pre-commit before creating commits via GitHub API
- Add noVerify parameter to skip hooks (like git commit --no-verify)
- Handle hook failures by preventing commit creation
- Set proper Git environment variables for hook execution

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-21 20:20:39 -07:00
Chris Lloyd
e9ad08ee09 Fix file mode permissions in GitHub file operations
- Add getFileMode() function to detect proper file permissions
- Update commit_files tool to preserve execute permissions
- Support Git file modes: 100644 (regular), 100755 (executable)
- Prevent executable files from losing execute permissions

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-21 20:20:39 -07:00
Chris Lloyd
baeaddf546 Fix file mode permissions in commit signing operations
- Add getFileMode() function to detect proper file permissions
- Update commit_files tool to preserve execute permissions
- Support all Git file modes: 100644, 100755, 040000, 120000
- Prevent executable files from losing execute permissions
- Add resign-commits.ts and branch cleanup logic for commit signing

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-21 20:20:39 -07:00
claude[bot]
e55fe60b4e style: apply prettier formatting
Co-authored-by: Chris Lloyd <chrislloyd@users.noreply.github.com>
2025-08-21 20:20:39 -07:00
Chris Lloyd
a328bf4b16 feat: enforce MCP-only commits in remote agent mode for enhanced security
Remote agent mode now exclusively uses MCP tools for all commit operations,
eliminating the security risks associated with direct git command execution.

## Key Changes

### Security Enhancements
- **Removed git authentication setup**: No longer configures local git credentials
- **Eliminated dangerous git tools**: Blocked `git commit`, `git add`, `git push`, `git config`, `git rm`
- **Enforced API-based commits**: All commits go through GitHub API with proper authentication
- **Maintained read-only git access**: Preserved safe tools like `git status`, `git diff`, `git log`

### Implementation Details
- **New specialized function**: `buildRemoteAgentAllowedToolsString()` replaces general tool builder
- **Simplified system prompts**: Removed conditional logic since MCP is always used
- **Cleaner codebase**: Eliminated git configuration complexity for remote agents

### Tool Changes
**Added (always present):**
- `mcp__github_file_ops__commit_files` - Atomic multi-file commits via GitHub API
- `mcp__github_file_ops__delete_files` - File deletion via GitHub API

**Removed (security risks):**
- `Bash(git commit:*)` - Direct git commits
- `Bash(git add:*)` - Git staging
- `Bash(git push:*)` - Direct git pushes
- `Bash(git config:*)` - Git configuration
- `Bash(git rm:*)` - Git file removal

**Preserved (safe operations):**
- `Bash(git status:*)` - Repository status
- `Bash(git diff:*)` - Change inspection
- `Bash(git log:*)` - History viewing

## Testing
- Added comprehensive test suite for `buildRemoteAgentAllowedToolsString()`
- Verified security boundaries prevent dangerous tool inclusion
- Ensured custom tools and GitHub Actions integration still work
- All existing functionality preserved through MCP layer

## Benefits
- **Enhanced Security**: All commits are signed and authenticated via GitHub API
- **Consistent Attribution**: Proper commit authorship through GitHub's systems
- **Audit Trail**: Complete tracking of all repository modifications
- **Reduced Attack Surface**: No local git configuration or direct repository access

Remote agent mode is now significantly more secure while maintaining full
functionality through the existing MCP infrastructure.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-21 20:20:39 -07:00
Ashwin Bhat
4cdae8adfc prompt improvements 2025-08-12 14:29:04 -07:00
Ashwin Bhat
e5d38c6b74 log prompt 2025-08-12 13:53:52 -07:00
Ashwin Bhat
af398fcc95 only install comment server in tag mode 2025-08-08 08:22:29 -07:00
Ashwin Bhat
aeda2d62c0 version 2025-08-06 13:00:05 -07:00
Ashwin Bhat
2ce0b1c9b2 70 2025-08-06 12:32:34 -07:00
Ashwin Bhat
fd041f9b80 next 2025-08-06 12:23:04 -07:00
Ashwin Bhat
544983d6bf tmp 2025-08-06 10:03:09 -07:00
Ashwin Bhat
4d3cbe2826 test 2025-08-06 09:29:47 -07:00
Ashwin Bhat
52c2f5881b feat: add repository_dispatch event support
- Add new progress MCP server for reporting task status via API
- Support repository_dispatch events with task description and progress endpoint
- Introduce isDispatch flag to unify dispatch event handling
- Make GitHub data optional for dispatch events without issues/PRs
- Update prompt generation with dispatch-specific instructions

Enables triggering Claude via repository_dispatch with:
{
  "event_type": "claude_task",
  "client_payload": {
    "description": "Task description",
    "progress_endpoint": "https://api.example.com/progress"
  }
}
2025-08-05 10:56:07 -07:00
28 changed files with 2763 additions and 77 deletions

2
.npmrc
View File

@@ -1,2 +0,0 @@
engine-strict=true
registry=https://registry.npmjs.org/

View File

@@ -162,6 +162,10 @@ runs:
ADDITIONAL_PERMISSIONS: ${{ inputs.additional_permissions }} ADDITIONAL_PERMISSIONS: ${{ inputs.additional_permissions }}
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }} USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
# Authentication for remote-agent mode
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
CLAUDE_CODE_OAUTH_TOKEN: ${{ inputs.claude_code_oauth_token }}
- name: Install Base Action Dependencies - name: Install Base Action Dependencies
if: steps.prepare.outputs.contains_trigger == 'true' if: steps.prepare.outputs.contains_trigger == 'true'
shell: bash shell: bash
@@ -172,7 +176,7 @@ runs:
echo "Base-action dependencies installed" echo "Base-action dependencies installed"
cd - cd -
# Install Claude Code globally # Install Claude Code globally
bun install -g @anthropic-ai/claude-code@1.0.67 bun install -g @anthropic-ai/claude-code
- name: Setup Network Restrictions - name: Setup Network Restrictions
if: steps.prepare.outputs.contains_trigger == 'true' && inputs.experimental_allowed_domains != '' if: steps.prepare.outputs.contains_trigger == 'true' && inputs.experimental_allowed_domains != ''
@@ -188,7 +192,6 @@ runs:
if: steps.prepare.outputs.contains_trigger == 'true' if: steps.prepare.outputs.contains_trigger == 'true'
shell: bash shell: bash
run: | run: |
# Run the base-action # Run the base-action
bun run ${GITHUB_ACTION_PATH}/base-action/src/index.ts bun run ${GITHUB_ACTION_PATH}/base-action/src/index.ts
env: env:
@@ -206,16 +209,17 @@ runs:
INPUT_CLAUDE_ENV: ${{ inputs.claude_env }} INPUT_CLAUDE_ENV: ${{ inputs.claude_env }}
INPUT_FALLBACK_MODEL: ${{ inputs.fallback_model }} INPUT_FALLBACK_MODEL: ${{ inputs.fallback_model }}
INPUT_EXPERIMENTAL_SLASH_COMMANDS_DIR: ${{ github.action_path }}/slash-commands INPUT_EXPERIMENTAL_SLASH_COMMANDS_DIR: ${{ github.action_path }}/slash-commands
INPUT_STREAM_CONFIG: ${{ steps.prepare.outputs.stream_config }}
# Model configuration # Model configuration
ANTHROPIC_MODEL: ${{ inputs.model || inputs.anthropic_model }} ANTHROPIC_MODEL: ${{ steps.prepare.outputs.anthropic_model || inputs.model || inputs.anthropic_model }}
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
NODE_VERSION: ${{ env.NODE_VERSION }} NODE_VERSION: ${{ env.NODE_VERSION }}
DETAILED_PERMISSION_MESSAGES: "1" DETAILED_PERMISSION_MESSAGES: "1"
# Provider configuration # Provider configuration
ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }} ANTHROPIC_API_KEY: ${{ inputs.anthropic_api_key }}
CLAUDE_CODE_OAUTH_TOKEN: ${{ inputs.claude_code_oauth_token }} CLAUDE_CODE_OAUTH_TOKEN: ${{ steps.prepare.outputs.claude_code_oauth_token || inputs.claude_code_oauth_token }}
ANTHROPIC_BASE_URL: ${{ env.ANTHROPIC_BASE_URL }} ANTHROPIC_BASE_URL: ${{ env.ANTHROPIC_BASE_URL }}
CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }} CLAUDE_CODE_USE_BEDROCK: ${{ inputs.use_bedrock == 'true' && '1' || '' }}
CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }} CLAUDE_CODE_USE_VERTEX: ${{ inputs.use_vertex == 'true' && '1' || '' }}
@@ -238,6 +242,21 @@ runs:
VERTEX_REGION_CLAUDE_3_5_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_5_SONNET }} VERTEX_REGION_CLAUDE_3_5_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_5_SONNET }}
VERTEX_REGION_CLAUDE_3_7_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_7_SONNET }} VERTEX_REGION_CLAUDE_3_7_SONNET: ${{ env.VERTEX_REGION_CLAUDE_3_7_SONNET }}
- name: Report Claude completion
if: steps.prepare.outputs.contains_trigger == 'true' && always()
shell: bash
run: |
bun run ${GITHUB_ACTION_PATH}/src/entrypoints/report-claude-complete.ts
env:
MODE: ${{ inputs.mode }}
STREAM_CONFIG: ${{ steps.prepare.outputs.stream_config }}
CLAUDE_CONCLUSION: ${{ steps.claude-code.outputs.conclusion }}
CLAUDE_START_TIME: ${{ steps.prepare.outputs.claude_start_time }}
CLAUDE_BRANCH: ${{ steps.prepare.outputs.CLAUDE_BRANCH }}
USE_COMMIT_SIGNING: ${{ inputs.use_commit_signing }}
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}
GITHUB_REPOSITORY: ${{ github.repository }}
- name: Update comment with job link - name: Update comment with job link
if: steps.prepare.outputs.contains_trigger == 'true' && steps.prepare.outputs.claude_comment_id && always() if: steps.prepare.outputs.contains_trigger == 'true' && steps.prepare.outputs.claude_comment_id && always()
shell: bash shell: bash

View File

@@ -102,7 +102,7 @@ runs:
- name: Setup Node.js - name: Setup Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # https://github.com/actions/setup-node/releases/tag/v4.4.0 uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # https://github.com/actions/setup-node/releases/tag/v4.4.0
with: with:
node-version: ${{ env.NODE_VERSION || '18.x' }} node-version: ${{ env.NODE_VERSION || '22.x' }}
cache: ${{ inputs.use_node_cache == 'true' && 'npm' || '' }} cache: ${{ inputs.use_node_cache == 'true' && 'npm' || '' }}
- name: Install Bun - name: Install Bun
@@ -118,7 +118,9 @@ runs:
- name: Install Claude Code - name: Install Claude Code
shell: bash shell: bash
run: bun install -g @anthropic-ai/claude-code@1.0.67 run: |
# Install Claude Code
bun install -g @anthropic-ai/claude-code
- name: Run Claude Code Action - name: Run Claude Code Action
shell: bash shell: bash

View File

@@ -30,7 +30,7 @@ async function run() {
appendSystemPrompt: process.env.INPUT_APPEND_SYSTEM_PROMPT, appendSystemPrompt: process.env.INPUT_APPEND_SYSTEM_PROMPT,
claudeEnv: process.env.INPUT_CLAUDE_ENV, claudeEnv: process.env.INPUT_CLAUDE_ENV,
fallbackModel: process.env.INPUT_FALLBACK_MODEL, fallbackModel: process.env.INPUT_FALLBACK_MODEL,
model: process.env.ANTHROPIC_MODEL, streamConfig: process.env.INPUT_STREAM_CONFIG,
}); });
} catch (error) { } catch (error) {
core.setFailed(`Action failed with error: ${error}`); core.setFailed(`Action failed with error: ${error}`);

View File

@@ -4,6 +4,7 @@ import { promisify } from "util";
import { unlink, writeFile, stat } from "fs/promises"; import { unlink, writeFile, stat } from "fs/promises";
import { createWriteStream } from "fs"; import { createWriteStream } from "fs";
import { spawn } from "child_process"; import { spawn } from "child_process";
import { StreamHandler } from "./stream-handler";
const execAsync = promisify(exec); const execAsync = promisify(exec);
@@ -21,7 +22,14 @@ export type ClaudeOptions = {
claudeEnv?: string; claudeEnv?: string;
fallbackModel?: string; fallbackModel?: string;
timeoutMinutes?: string; timeoutMinutes?: string;
model?: string; streamConfig?: string;
};
export type StreamConfig = {
progress_endpoint?: string;
headers?: Record<string, string>;
resume_endpoint?: string;
session_id?: string;
}; };
type PreparedConfig = { type PreparedConfig = {
@@ -95,9 +103,6 @@ export function prepareRunConfig(
if (options.fallbackModel) { if (options.fallbackModel) {
claudeArgs.push("--fallback-model", options.fallbackModel); claudeArgs.push("--fallback-model", options.fallbackModel);
} }
if (options.model) {
claudeArgs.push("--model", options.model);
}
if (options.timeoutMinutes) { if (options.timeoutMinutes) {
const timeoutMinutesNum = parseInt(options.timeoutMinutes, 10); const timeoutMinutesNum = parseInt(options.timeoutMinutes, 10);
if (isNaN(timeoutMinutesNum) || timeoutMinutesNum <= 0) { if (isNaN(timeoutMinutesNum) || timeoutMinutesNum <= 0) {
@@ -106,6 +111,22 @@ export function prepareRunConfig(
); );
} }
} }
// Parse stream config for session_id and resume_endpoint
if (options.streamConfig) {
try {
const streamConfig: StreamConfig = JSON.parse(options.streamConfig);
// Add --session-id if session_id is provided
if (streamConfig.session_id) {
claudeArgs.push("--session-id", streamConfig.session_id);
}
// Only add --teleport if we have both session_id AND resume_endpoint
if (streamConfig.session_id && streamConfig.resume_endpoint) {
claudeArgs.push("--teleport", streamConfig.session_id);
}
} catch (e) {
console.error("Failed to parse stream_config JSON:", e);
}
}
// Parse custom environment variables // Parse custom environment variables
const customEnv = parseCustomEnvVars(options.claudeEnv); const customEnv = parseCustomEnvVars(options.claudeEnv);
@@ -120,6 +141,34 @@ export function prepareRunConfig(
export async function runClaude(promptPath: string, options: ClaudeOptions) { export async function runClaude(promptPath: string, options: ClaudeOptions) {
const config = prepareRunConfig(promptPath, options); const config = prepareRunConfig(promptPath, options);
// Set up streaming if endpoint is provided in stream config
let streamHandler: StreamHandler | null = null;
let streamConfig: StreamConfig | null = null;
if (options.streamConfig) {
try {
streamConfig = JSON.parse(options.streamConfig);
if (streamConfig?.progress_endpoint) {
const customHeaders = streamConfig.headers || {};
console.log("parsed headers", customHeaders);
Object.keys(customHeaders).forEach((key) => {
console.log(`Custom header: ${key} = ${customHeaders[key]}`);
});
streamHandler = new StreamHandler(
streamConfig.progress_endpoint,
customHeaders,
);
console.log(`Streaming output to: ${streamConfig.progress_endpoint}`);
if (Object.keys(customHeaders).length > 0) {
console.log(
`Custom streaming headers: ${Object.keys(customHeaders).join(", ")}`,
);
}
}
} catch (e) {
console.error("Failed to parse stream_config JSON:", e);
}
}
// Create a named pipe // Create a named pipe
try { try {
await unlink(PIPE_PATH); await unlink(PIPE_PATH);
@@ -162,12 +211,31 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
pipeStream.destroy(); pipeStream.destroy();
}); });
// Prepare environment variables
const processEnv = {
...process.env,
...config.env,
};
// If both session_id and resume_endpoint are provided, set environment variables
if (streamConfig?.session_id && streamConfig?.resume_endpoint) {
processEnv.TELEPORT_RESUME_URL = streamConfig.resume_endpoint;
console.log(
`Setting TELEPORT_RESUME_URL to: ${streamConfig.resume_endpoint}`,
);
if (streamConfig.headers && Object.keys(streamConfig.headers).length > 0) {
processEnv.TELEPORT_HEADERS = JSON.stringify(streamConfig.headers);
console.log(`Setting TELEPORT_HEADERS for resume endpoint`);
}
}
// Log the full Claude command being executed
console.log(`Running Claude with args: ${config.claudeArgs.join(" ")}`);
const claudeProcess = spawn("claude", config.claudeArgs, { const claudeProcess = spawn("claude", config.claudeArgs, {
stdio: ["pipe", "pipe", "inherit"], stdio: ["pipe", "pipe", "inherit"],
env: { env: processEnv,
...process.env,
...config.env,
},
}); });
// Handle Claude process errors // Handle Claude process errors
@@ -178,32 +246,51 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
// Capture output for parsing execution metrics // Capture output for parsing execution metrics
let output = ""; let output = "";
claudeProcess.stdout.on("data", (data) => { let lineBuffer = ""; // Buffer for incomplete lines
claudeProcess.stdout.on("data", async (data) => {
const text = data.toString(); const text = data.toString();
output += text;
// Try to parse as JSON and pretty print if it's on a single line // Add new data to line buffer
const lines = text.split("\n"); lineBuffer += text;
lines.forEach((line: string, index: number) => {
if (line.trim() === "") return;
// Split into lines - the last element might be incomplete
const lines = lineBuffer.split("\n");
// The last element is either empty (if text ended with \n) or incomplete
lineBuffer = lines.pop() || "";
// Process complete lines
for (let index = 0; index < lines.length; index++) {
const line = lines[index];
if (!line || line.trim() === "") continue;
// Try to parse as JSON and pretty print if it's on a single line
try { try {
// Check if this line is a JSON object // Check if this line is a JSON object
const parsed = JSON.parse(line); const parsed = JSON.parse(line);
const prettyJson = JSON.stringify(parsed, null, 2); const prettyJson = JSON.stringify(parsed, null, 2);
process.stdout.write(prettyJson); process.stdout.write(prettyJson);
if (index < lines.length - 1 || text.endsWith("\n")) { process.stdout.write("\n");
process.stdout.write("\n");
// Send valid JSON to stream handler if available
if (streamHandler) {
try {
// Send the original line (which is valid JSON) with newline for proper splitting
const dataToSend = line + "\n";
await streamHandler.addOutput(dataToSend);
} catch (error) {
core.warning(`Failed to stream output: ${error}`);
}
} }
} catch (e) { } catch (e) {
// Not a JSON object, print as is // Not a JSON object, print as is
process.stdout.write(line); process.stdout.write(line);
if (index < lines.length - 1 || text.endsWith("\n")) { process.stdout.write("\n");
process.stdout.write("\n"); // Don't send non-JSON lines to stream handler
}
} }
}); }
output += text;
}); });
// Handle stdout errors // Handle stdout errors
@@ -257,8 +344,33 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
} }
}, timeoutMs); }, timeoutMs);
claudeProcess.on("close", (code) => { claudeProcess.on("close", async (code) => {
if (!resolved) { if (!resolved) {
// Process any remaining data in the line buffer
if (lineBuffer.trim()) {
// Try to parse and print the remaining line
try {
const parsed = JSON.parse(lineBuffer);
const prettyJson = JSON.stringify(parsed, null, 2);
process.stdout.write(prettyJson);
process.stdout.write("\n");
// Send valid JSON to stream handler if available
if (streamHandler) {
try {
const dataToSend = lineBuffer + "\n";
await streamHandler.addOutput(dataToSend);
} catch (error) {
core.warning(`Failed to stream final output: ${error}`);
}
}
} catch (e) {
process.stdout.write(lineBuffer);
process.stdout.write("\n");
// Don't send non-JSON lines to stream handler
}
}
clearTimeout(timeoutId); clearTimeout(timeoutId);
resolved = true; resolved = true;
resolve(code || 0); resolve(code || 0);
@@ -275,6 +387,15 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
}); });
}); });
// Clean up streaming
if (streamHandler) {
try {
await streamHandler.close();
} catch (error) {
core.warning(`Failed to close stream handler: ${error}`);
}
}
// Clean up processes // Clean up processes
try { try {
catProcess.kill("SIGTERM"); catProcess.kill("SIGTERM");

View File

@@ -0,0 +1,152 @@
import * as core from "@actions/core";
export function parseStreamHeaders(
headersInput?: string,
): Record<string, string> {
if (!headersInput || headersInput.trim() === "") {
return {};
}
try {
return JSON.parse(headersInput);
} catch (e) {
console.error("Failed to parse stream headers as JSON:", e);
return {};
}
}
export type TokenGetter = (audience: string) => Promise<string>;
export class StreamHandler {
private endpoint: string;
private customHeaders: Record<string, string>;
private tokenGetter: TokenGetter;
private token: string | null = null;
private tokenFetchTime: number = 0;
private buffer: string[] = [];
private flushTimer: NodeJS.Timeout | null = null;
private isClosed = false;
private readonly TOKEN_LIFETIME_MS = 4 * 60 * 1000; // 4 minutes
private readonly BATCH_SIZE = 10;
private readonly BATCH_TIMEOUT_MS = 1000;
private readonly REQUEST_TIMEOUT_MS = 5000;
constructor(
endpoint: string,
customHeaders: Record<string, string> = {},
tokenGetter?: TokenGetter,
) {
this.endpoint = endpoint;
this.customHeaders = customHeaders;
this.tokenGetter = tokenGetter || ((audience) => core.getIDToken(audience));
}
async addOutput(data: string): Promise<void> {
if (this.isClosed) return;
// Split by newlines and add to buffer
const lines = data.split("\n").filter((line) => line.length > 0);
this.buffer.push(...lines);
// Check if we should flush
if (this.buffer.length >= this.BATCH_SIZE) {
await this.flush();
} else {
// Set or reset the timer
this.resetFlushTimer();
}
}
private resetFlushTimer(): void {
if (this.flushTimer) {
clearTimeout(this.flushTimer);
}
this.flushTimer = setTimeout(() => {
this.flush().catch((err) => {
core.warning(`Failed to flush stream buffer: ${err}`);
});
}, this.BATCH_TIMEOUT_MS);
}
private async getToken(): Promise<string> {
const now = Date.now();
// Check if we need a new token
if (!this.token || now - this.tokenFetchTime >= this.TOKEN_LIFETIME_MS) {
try {
this.token = await this.tokenGetter("claude-code-github-action");
this.tokenFetchTime = now;
core.debug("Fetched new OIDC token for streaming");
} catch (error) {
throw new Error(`Failed to get OIDC token: ${error}`);
}
}
return this.token;
}
private async flush(): Promise<void> {
if (this.buffer.length === 0) return;
// Clear the flush timer
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
// Get the current buffer and clear it
const output = [...this.buffer];
this.buffer = [];
try {
const token = await this.getToken();
const payload = {
timestamp: new Date().toISOString(),
output: output,
};
// Create an AbortController for timeout
const controller = new AbortController();
const timeoutId = setTimeout(
() => controller.abort(),
this.REQUEST_TIMEOUT_MS,
);
try {
await fetch(this.endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${token}`,
...this.customHeaders,
},
body: JSON.stringify(payload),
signal: controller.signal,
});
} finally {
clearTimeout(timeoutId);
}
} catch (error) {
// Log but don't throw - we don't want to interrupt Claude's execution
core.warning(`Failed to stream output: ${error}`);
}
}
async close(): Promise<void> {
// Clear any pending timer
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
// Flush any remaining output
if (this.buffer.length > 0) {
await this.flush();
}
// Mark as closed after flushing
this.isClosed = true;
}
}

View File

@@ -0,0 +1,97 @@
import { describe, it, expect } from "bun:test";
import { prepareRunConfig } from "../src/run-claude";
describe("resume endpoint functionality", () => {
it("should add --teleport flag when both session_id and resume_endpoint are provided", () => {
const streamConfig = JSON.stringify({
session_id: "12345",
resume_endpoint: "https://example.com/resume/12345",
});
const config = prepareRunConfig("/path/to/prompt", {
streamConfig,
});
expect(config.claudeArgs).toContain("--teleport");
expect(config.claudeArgs).toContain("12345");
});
it("should not add --teleport flag when no streamConfig is provided", () => {
const config = prepareRunConfig("/path/to/prompt", {
allowedTools: "Edit",
});
expect(config.claudeArgs).not.toContain("--teleport");
});
it("should not add --teleport flag when only session_id is provided without resume_endpoint", () => {
const streamConfig = JSON.stringify({
session_id: "12345",
// No resume_endpoint
});
const config = prepareRunConfig("/path/to/prompt", {
streamConfig,
});
expect(config.claudeArgs).not.toContain("--teleport");
});
it("should not add --teleport flag when only resume_endpoint is provided without session_id", () => {
const streamConfig = JSON.stringify({
resume_endpoint: "https://example.com/resume/12345",
// No session_id
});
const config = prepareRunConfig("/path/to/prompt", {
streamConfig,
});
expect(config.claudeArgs).not.toContain("--teleport");
});
it("should maintain order of arguments with session_id", () => {
const streamConfig = JSON.stringify({
session_id: "12345",
resume_endpoint: "https://example.com/resume/12345",
});
const config = prepareRunConfig("/path/to/prompt", {
allowedTools: "Edit",
streamConfig,
maxTurns: "5",
});
const teleportIndex = config.claudeArgs.indexOf("--teleport");
const maxTurnsIndex = config.claudeArgs.indexOf("--max-turns");
expect(teleportIndex).toBeGreaterThan(-1);
expect(maxTurnsIndex).toBeGreaterThan(-1);
});
it("should handle progress_endpoint and headers in streamConfig", () => {
const streamConfig = JSON.stringify({
progress_endpoint: "https://example.com/progress",
headers: { "X-Test": "value" },
});
const config = prepareRunConfig("/path/to/prompt", {
streamConfig,
});
// This test just verifies parsing doesn't fail - actual streaming logic
// is tested elsewhere as it requires environment setup
expect(config.claudeArgs).toBeDefined();
});
it("should handle session_id with resume_endpoint and headers", () => {
const streamConfig = JSON.stringify({
session_id: "abc123",
resume_endpoint: "https://example.com/resume/abc123",
headers: { Authorization: "Bearer token" },
progress_endpoint: "https://example.com/progress",
});
const config = prepareRunConfig("/path/to/prompt", {
streamConfig,
});
expect(config.claudeArgs).toContain("--teleport");
expect(config.claudeArgs).toContain("abc123");
// Note: Environment variable setup (TELEPORT_RESUME_URL, TELEPORT_HEADERS) is tested in integration tests
});
});

View File

@@ -0,0 +1,364 @@
import { describe, it, expect, beforeEach, mock } from "bun:test";
import {
StreamHandler,
parseStreamHeaders,
type TokenGetter,
} from "../src/stream-handler";
describe("parseStreamHeaders", () => {
it("should return empty object for empty input", () => {
expect(parseStreamHeaders("")).toEqual({});
expect(parseStreamHeaders(undefined)).toEqual({});
expect(parseStreamHeaders(" ")).toEqual({});
});
it("should parse single header", () => {
const result = parseStreamHeaders('{"X-Correlation-Id": "12345"}');
expect(result).toEqual({ "X-Correlation-Id": "12345" });
});
it("should parse multiple headers", () => {
const headers = JSON.stringify({
"X-Correlation-Id": "12345",
"X-Custom-Header": "custom-value",
Authorization: "Bearer token123",
});
const result = parseStreamHeaders(headers);
expect(result).toEqual({
"X-Correlation-Id": "12345",
"X-Custom-Header": "custom-value",
Authorization: "Bearer token123",
});
});
it("should handle headers with spaces", () => {
const headers = JSON.stringify({
"X-Header-One": "value with spaces",
"X-Header-Two": "another value",
});
const result = parseStreamHeaders(headers);
expect(result).toEqual({
"X-Header-One": "value with spaces",
"X-Header-Two": "another value",
});
});
it("should skip empty lines and comments", () => {
const headers = JSON.stringify({
"X-Header-One": "value1",
"X-Header-Two": "value2",
"X-Header-Three": "value3",
});
const result = parseStreamHeaders(headers);
expect(result).toEqual({
"X-Header-One": "value1",
"X-Header-Two": "value2",
"X-Header-Three": "value3",
});
});
it("should skip lines without colons", () => {
const headers = JSON.stringify({
"X-Header-One": "value1",
"X-Header-Two": "value2",
});
const result = parseStreamHeaders(headers);
expect(result).toEqual({
"X-Header-One": "value1",
"X-Header-Two": "value2",
});
});
it("should handle headers with colons in values", () => {
const headers = JSON.stringify({
"X-URL": "https://example.com:8080/path",
"X-Time": "10:30:45",
});
const result = parseStreamHeaders(headers);
expect(result).toEqual({
"X-URL": "https://example.com:8080/path",
"X-Time": "10:30:45",
});
});
});
describe("StreamHandler", () => {
let handler: StreamHandler;
let mockFetch: ReturnType<typeof mock>;
let mockTokenGetter: TokenGetter;
const mockEndpoint = "https://test.example.com/stream";
const mockToken = "mock-oidc-token";
beforeEach(() => {
// Mock fetch
mockFetch = mock(() => Promise.resolve({ ok: true }));
global.fetch = mockFetch as any;
// Mock token getter
mockTokenGetter = mock(() => Promise.resolve(mockToken));
});
describe("basic functionality", () => {
it("should batch lines up to BATCH_SIZE", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// Add 9 lines (less than batch size of 10)
for (let i = 1; i <= 9; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Should not have sent anything yet
expect(mockFetch).not.toHaveBeenCalled();
// Add the 10th line to trigger flush
await handler.addOutput("line 10\n");
// Should have sent the batch
expect(mockFetch).toHaveBeenCalledTimes(1);
expect(mockFetch).toHaveBeenCalledWith(mockEndpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${mockToken}`,
},
body: expect.stringContaining(
'"output":["line 1","line 2","line 3","line 4","line 5","line 6","line 7","line 8","line 9","line 10"]',
),
signal: expect.any(AbortSignal),
});
});
it("should flush on timeout", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// Add a few lines
await handler.addOutput("line 1\n");
await handler.addOutput("line 2\n");
// Should not have sent anything yet
expect(mockFetch).not.toHaveBeenCalled();
// Wait for the timeout to trigger
await new Promise((resolve) => setTimeout(resolve, 1100));
// Should have sent the batch
expect(mockFetch).toHaveBeenCalledTimes(1);
const call = mockFetch.mock.calls[0];
expect(call).toBeDefined();
const body = JSON.parse(call![1].body);
expect(body.output).toEqual(["line 1", "line 2"]);
});
it("should include custom headers", async () => {
const customHeaders = {
"X-Correlation-Id": "12345",
"X-Custom": "value",
};
handler = new StreamHandler(mockEndpoint, customHeaders, mockTokenGetter);
// Trigger a batch
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
expect(mockFetch).toHaveBeenCalledWith(mockEndpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${mockToken}`,
"X-Correlation-Id": "12345",
"X-Custom": "value",
},
body: expect.any(String),
signal: expect.any(AbortSignal),
});
});
it("should include timestamp in payload", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
const beforeTime = new Date().toISOString();
// Trigger a batch
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
const afterTime = new Date().toISOString();
const call = mockFetch.mock.calls[0];
expect(call).toBeDefined();
const body = JSON.parse(call![1].body);
expect(body).toHaveProperty("timestamp");
expect(new Date(body.timestamp).toISOString()).toBe(body.timestamp);
expect(body.timestamp >= beforeTime).toBe(true);
expect(body.timestamp <= afterTime).toBe(true);
});
});
describe("token management", () => {
it("should fetch token on first request", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// Trigger a flush
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
expect(mockTokenGetter).toHaveBeenCalledWith("claude-code-github-action");
expect(mockTokenGetter).toHaveBeenCalledTimes(1);
});
it("should reuse token within 4 minutes", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// First batch
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Second batch immediately (within 4 minutes)
for (let i = 11; i <= 20; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Should have only fetched token once
expect(mockTokenGetter).toHaveBeenCalledTimes(1);
});
it("should handle token fetch errors", async () => {
const errorTokenGetter = mock(() =>
Promise.reject(new Error("Token fetch failed")),
);
handler = new StreamHandler(mockEndpoint, {}, errorTokenGetter);
// Try to send data
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Should not have made fetch request
expect(mockFetch).not.toHaveBeenCalled();
});
});
describe("error handling", () => {
it("should handle fetch errors gracefully", async () => {
mockFetch.mockImplementation(() =>
Promise.reject(new Error("Network error")),
);
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// Send data - should not throw
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Should have attempted to fetch
expect(mockFetch).toHaveBeenCalledTimes(1);
});
it("should continue processing after errors", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// First batch - make it fail
let callCount = 0;
mockFetch.mockImplementation(() => {
callCount++;
if (callCount === 1) {
return Promise.reject(new Error("First batch failed"));
}
return Promise.resolve({ ok: true });
});
for (let i = 1; i <= 10; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Second batch - should work
for (let i = 11; i <= 20; i++) {
await handler.addOutput(`line ${i}\n`);
}
// Should have attempted both batches
expect(mockFetch).toHaveBeenCalledTimes(2);
});
});
describe("close functionality", () => {
it("should flush remaining data on close", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
// Add some data but not enough to trigger batch
await handler.addOutput("line 1\n");
await handler.addOutput("line 2\n");
expect(mockFetch).not.toHaveBeenCalled();
// Close should flush
await handler.close();
expect(mockFetch).toHaveBeenCalledTimes(1);
const call = mockFetch.mock.calls[0];
expect(call).toBeDefined();
const body = JSON.parse(call![1].body);
expect(body.output).toEqual(["line 1", "line 2"]);
});
it("should not accept new data after close", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
await handler.close();
// Try to add data after close
await handler.addOutput("should not be sent\n");
// Should not have sent anything
expect(mockFetch).not.toHaveBeenCalled();
});
});
describe("data handling", () => {
it("should filter out empty lines", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
await handler.addOutput("line 1\n\n\nline 2\n\n");
await handler.close();
const call = mockFetch.mock.calls[0];
expect(call).toBeDefined();
const body = JSON.parse(call![1].body);
expect(body.output).toEqual(["line 1", "line 2"]);
});
it("should handle data without newlines", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
await handler.addOutput("single line");
await handler.close();
const call = mockFetch.mock.calls[0];
expect(call).toBeDefined();
const body = JSON.parse(call![1].body);
expect(body.output).toEqual(["single line"]);
});
it("should handle multi-line input correctly", async () => {
handler = new StreamHandler(mockEndpoint, {}, mockTokenGetter);
await handler.addOutput("line 1\nline 2\nline 3");
await handler.close();
const call = mockFetch.mock.calls[0];
expect(call).toBeDefined();
const body = JSON.parse(call![1].body);
expect(body.output).toEqual(["line 1", "line 2", "line 3"]);
});
});
});

View File

@@ -81,6 +81,44 @@ export function buildAllowedToolsString(
return allAllowedTools; return allAllowedTools;
} }
/**
* Specialized allowed tools string for remote agent mode
* Always uses MCP commit signing and excludes dangerous git commands
*/
export function buildRemoteAgentAllowedToolsString(
customAllowedTools?: string[],
includeActionsTools: boolean = false,
): string {
let baseTools = [...BASE_ALLOWED_TOOLS];
// Always include the comment update tool from the comment server
baseTools.push("mcp__github_comment__update_claude_comment");
// Remote agent mode always uses MCP commit signing
baseTools.push(
"mcp__github_file_ops__commit_files",
"mcp__github_file_ops__delete_files",
);
// Add safe git tools only (read-only operations)
baseTools.push("Bash(git status:*)", "Bash(git diff:*)", "Bash(git log:*)");
// Add GitHub Actions MCP tools if enabled
if (includeActionsTools) {
baseTools.push(
"mcp__github_ci__get_ci_status",
"mcp__github_ci__get_workflow_run_details",
"mcp__github_ci__download_job_log",
);
}
let allAllowedTools = baseTools.join(",");
if (customAllowedTools && customAllowedTools.length > 0) {
allAllowedTools = `${allAllowedTools},${customAllowedTools.join(",")}`;
}
return allAllowedTools;
}
export function buildDisallowedToolsString( export function buildDisallowedToolsString(
customDisallowedTools?: string[], customDisallowedTools?: string[],
allowedTools?: string[], allowedTools?: string[],

View File

@@ -12,7 +12,6 @@ import { createOctokit } from "../github/api/client";
import { parseGitHubContext, isEntityContext } from "../github/context"; import { parseGitHubContext, isEntityContext } from "../github/context";
import { getMode, isValidMode, DEFAULT_MODE } from "../modes/registry"; import { getMode, isValidMode, DEFAULT_MODE } from "../modes/registry";
import type { ModeName } from "../modes/types"; import type { ModeName } from "../modes/types";
import { prepare } from "../prepare";
async function run() { async function run() {
try { try {
@@ -60,7 +59,19 @@ async function run() {
} }
// Step 4: Get mode and check trigger conditions // Step 4: Get mode and check trigger conditions
const mode = getMode(validatedMode, context); let mode;
// TEMPORARY HACK: Always use remote-agent mode for repository_dispatch events
// This ensures backward compatibility while we transition
if (context.eventName === "repository_dispatch") {
console.log(
"🔧 TEMPORARY HACK: Forcing remote-agent mode for repository_dispatch event",
);
mode = getMode("remote-agent", context);
} else {
mode = getMode(context.inputs.mode, context);
}
const containsTrigger = mode.shouldTrigger(context); const containsTrigger = mode.shouldTrigger(context);
// Set output for action.yml to check // Set output for action.yml to check
@@ -72,10 +83,9 @@ async function run() {
} }
// Step 5: Use the new modular prepare function // Step 5: Use the new modular prepare function
const result = await prepare({ const result = await mode.prepare({
context, context,
octokit, octokit,
mode,
githubToken, githubToken,
}); });

View File

@@ -0,0 +1,118 @@
#!/usr/bin/env bun
import * as core from "@actions/core";
import { reportClaudeComplete } from "../modes/remote-agent/system-progress-handler";
import type { SystemProgressConfig } from "../modes/remote-agent/progress-types";
import type { StreamConfig } from "../types/stream-config";
import { commitUncommittedChanges } from "../github/utils/git-common-utils";
async function run() {
try {
// Only run if we're in remote-agent mode
const mode = process.env.MODE;
if (mode !== "remote-agent") {
console.log(
"Not in remote-agent mode, skipping Claude completion reporting",
);
return;
}
// Check if we have stream config with system progress endpoint
const streamConfigStr = process.env.STREAM_CONFIG;
if (!streamConfigStr) {
console.log(
"No stream config available, skipping Claude completion reporting",
);
return;
}
let streamConfig: StreamConfig;
try {
streamConfig = JSON.parse(streamConfigStr);
} catch (e) {
console.error("Failed to parse stream config:", e);
return;
}
if (!streamConfig.system_progress_endpoint) {
console.log(
"No system progress endpoint in stream config, skipping Claude completion reporting",
);
return;
}
// Extract the system progress config
const systemProgressConfig: SystemProgressConfig = {
endpoint: streamConfig.system_progress_endpoint,
headers: streamConfig.headers || {},
};
// Get the OIDC token from Authorization header
const authHeader = systemProgressConfig.headers?.["Authorization"];
if (!authHeader || !authHeader.startsWith("Bearer ")) {
console.error("No valid Authorization header in stream config");
return;
}
const oidcToken = authHeader.substring(7); // Remove "Bearer " prefix
// Get Claude execution status
const claudeConclusion = process.env.CLAUDE_CONCLUSION || "failure";
const exitCode = claudeConclusion === "success" ? 0 : 1;
// Calculate duration if possible
const startTime = process.env.CLAUDE_START_TIME;
let durationMs = 0;
if (startTime) {
durationMs = Date.now() - parseInt(startTime, 10);
}
// Report Claude completion
console.log(
`Reporting Claude completion: exitCode=${exitCode}, duration=${durationMs}ms`,
);
reportClaudeComplete(systemProgressConfig, oidcToken, exitCode, durationMs);
// Ensure that uncommitted changes are committed
const claudeBranch = process.env.CLAUDE_BRANCH;
const useCommitSigning = process.env.USE_COMMIT_SIGNING === "true";
const githubToken = process.env.GITHUB_TOKEN;
// Parse repository from GITHUB_REPOSITORY (format: owner/repo)
const repository = process.env.GITHUB_REPOSITORY;
if (!repository) {
console.log("No GITHUB_REPOSITORY available, skipping branch cleanup");
return;
}
const [repoOwner, repoName] = repository.split("/");
if (claudeBranch && githubToken && repoOwner && repoName) {
console.log(`Checking for uncommitted changes in remote-agent mode...`);
try {
const commitResult = await commitUncommittedChanges(
repoOwner,
repoName,
claudeBranch,
useCommitSigning,
);
if (commitResult) {
console.log(`Committed uncommitted changes: ${commitResult.sha}`);
} else {
console.log("No uncommitted changes found");
}
} catch (error) {
// Don't fail the action if commit fails
core.warning(`Failed to commit changes: ${error}`);
}
}
} catch (error) {
// Don't fail the action if reporting fails
core.warning(`Failed to report Claude completion: ${error}`);
}
}
if (import.meta.main) {
run();
}

View File

@@ -6,6 +6,7 @@ import type {
PullRequestEvent, PullRequestEvent,
PullRequestReviewEvent, PullRequestReviewEvent,
PullRequestReviewCommentEvent, PullRequestReviewCommentEvent,
RepositoryDispatchEvent,
} from "@octokit/webhooks-types"; } from "@octokit/webhooks-types";
// Custom types for GitHub Actions events that aren't webhooks // Custom types for GitHub Actions events that aren't webhooks
export type WorkflowDispatchEvent = { export type WorkflowDispatchEvent = {
@@ -46,7 +47,11 @@ const ENTITY_EVENT_NAMES = [
"pull_request_review_comment", "pull_request_review_comment",
] as const; ] as const;
const AUTOMATION_EVENT_NAMES = ["workflow_dispatch", "schedule"] as const; const AUTOMATION_EVENT_NAMES = [
"workflow_dispatch",
"schedule",
"repository_dispatch",
] as const;
// Derive types from constants for better maintainability // Derive types from constants for better maintainability
type EntityEventName = (typeof ENTITY_EVENT_NAMES)[number]; type EntityEventName = (typeof ENTITY_EVENT_NAMES)[number];
@@ -62,6 +67,17 @@ type BaseContext = {
full_name: string; full_name: string;
}; };
actor: string; actor: string;
payload:
| IssuesEvent
| IssueCommentEvent
| PullRequestEvent
| PullRequestReviewEvent
| PullRequestReviewCommentEvent
| RepositoryDispatchEvent
| WorkflowDispatchEvent
| ScheduleEvent;
entityNumber?: number;
isPR?: boolean;
inputs: { inputs: {
mode: ModeName; mode: ModeName;
triggerPhrase: string; triggerPhrase: string;
@@ -78,6 +94,14 @@ type BaseContext = {
additionalPermissions: Map<string, string>; additionalPermissions: Map<string, string>;
useCommitSigning: boolean; useCommitSigning: boolean;
}; };
progressTracking?: {
headers?: Record<string, string>;
resumeEndpoint?: string;
sessionId?: string;
progressEndpoint: string;
systemProgressEndpoint?: string;
oauthTokenEndpoint?: string;
};
}; };
// Context for entity-based events (issues, PRs, comments) // Context for entity-based events (issues, PRs, comments)
@@ -96,7 +120,7 @@ export type ParsedGitHubContext = BaseContext & {
// Context for automation events (workflow_dispatch, schedule) // Context for automation events (workflow_dispatch, schedule)
export type AutomationContext = BaseContext & { export type AutomationContext = BaseContext & {
eventName: AutomationEventName; eventName: AutomationEventName;
payload: WorkflowDispatchEvent | ScheduleEvent; payload: WorkflowDispatchEvent | ScheduleEvent | RepositoryDispatchEvent;
}; };
// Union type for all contexts // Union type for all contexts
@@ -190,6 +214,66 @@ export function parseGitHubContext(): GitHubContext {
isPR: true, isPR: true,
}; };
} }
case "repository_dispatch": {
const payload = context.payload as RepositoryDispatchEvent;
// Extract task description from client_payload
const clientPayload = payload.client_payload as {
prompt?: string;
stream_endpoint?: string;
headers?: Record<string, string>;
resume_endpoint?: string;
session_id?: string;
endpoints?: {
resume?: string;
progress?: string;
system_progress?: string;
oauth_endpoint?: string;
};
overrideInputs?: {
model?: string;
base_branch?: string;
};
};
// Override directPrompt with the prompt
if (clientPayload.prompt) {
commonFields.inputs.directPrompt = clientPayload.prompt;
}
// Apply input overrides
if (clientPayload.overrideInputs) {
if (clientPayload.overrideInputs.base_branch) {
commonFields.inputs.baseBranch =
clientPayload.overrideInputs.base_branch;
}
}
// Set up progress tracking - prioritize endpoints object if available, fallback to individual fields
let progressTracking: ParsedGitHubContext["progressTracking"] = undefined;
if (clientPayload.endpoints?.progress || clientPayload.stream_endpoint) {
progressTracking = {
progressEndpoint:
clientPayload.endpoints?.progress ||
clientPayload.stream_endpoint ||
"",
headers: clientPayload.headers,
resumeEndpoint:
// clientPayload.endpoints?.resume || clientPayload.resume_endpoint,
clientPayload.resume_endpoint,
sessionId: clientPayload.session_id,
systemProgressEndpoint: clientPayload.endpoints?.system_progress,
oauthTokenEndpoint: clientPayload.endpoints?.oauth_endpoint,
};
}
return {
...commonFields,
eventName: "repository_dispatch",
payload: payload,
progressTracking,
};
}
case "workflow_dispatch": { case "workflow_dispatch": {
return { return {
...commonFields, ...commonFields,
@@ -287,3 +371,9 @@ export function isAutomationContext(
context.eventName as AutomationEventName, context.eventName as AutomationEventName,
); );
} }
export function isRepositoryDispatchEvent(
context: GitHubContext,
): context is GitHubContext & { payload: RepositoryDispatchEvent } {
return context.eventName === "repository_dispatch";
}

View File

@@ -8,7 +8,7 @@
import { $ } from "bun"; import { $ } from "bun";
import * as core from "@actions/core"; import * as core from "@actions/core";
import type { ParsedGitHubContext } from "../context"; import type { GitHubContext } from "../context";
import type { GitHubPullRequest } from "../types"; import type { GitHubPullRequest } from "../types";
import type { Octokits } from "../api/client"; import type { Octokits } from "../api/client";
import type { FetchDataResult } from "../data/fetcher"; import type { FetchDataResult } from "../data/fetcher";
@@ -21,15 +21,15 @@ export type BranchInfo = {
export async function setupBranch( export async function setupBranch(
octokits: Octokits, octokits: Octokits,
githubData: FetchDataResult, githubData: FetchDataResult | null,
context: ParsedGitHubContext, context: GitHubContext,
): Promise<BranchInfo> { ): Promise<BranchInfo> {
const { owner, repo } = context.repository; const { owner, repo } = context.repository;
const entityNumber = context.entityNumber; const entityNumber = context.entityNumber;
const { baseBranch, branchPrefix } = context.inputs; const { baseBranch, branchPrefix } = context.inputs;
const isPR = context.isPR; const isPR = context.isPR;
if (isPR) { if (isPR && githubData) {
const prData = githubData.contextData as GitHubPullRequest; const prData = githubData.contextData as GitHubPullRequest;
const prState = prData.state; const prState = prData.state;
@@ -84,19 +84,27 @@ export async function setupBranch(
sourceBranch = repoResponse.data.default_branch; sourceBranch = repoResponse.data.default_branch;
} }
// Generate branch name for either an issue or closed/merged PR // Generate branch name for either an issue, closed/merged PR, or repository_dispatch event
const entityType = isPR ? "pr" : "issue"; let branchName: string;
// Create Kubernetes-compatible timestamp: lowercase, hyphens only, shorter format if (context.eventName === "repository_dispatch") {
const now = new Date(); // For repository_dispatch events, use run ID for uniqueness since there's no entity number
const timestamp = `${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, "0")}${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}${String(now.getMinutes()).padStart(2, "0")}`; const now = new Date();
const timestamp = `${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, "0")}${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}${String(now.getMinutes()).padStart(2, "0")}`;
branchName = `${branchPrefix}dispatch-${context.runId}-${timestamp}`;
} else {
// For issues and PRs, use the existing logic
const entityType = isPR ? "pr" : "issue";
const now = new Date();
const timestamp = `${now.getFullYear()}${String(now.getMonth() + 1).padStart(2, "0")}${String(now.getDate()).padStart(2, "0")}-${String(now.getHours()).padStart(2, "0")}${String(now.getMinutes()).padStart(2, "0")}`;
branchName = `${branchPrefix}${entityType}-${entityNumber}-${timestamp}`;
}
// Ensure branch name is Kubernetes-compatible: // Ensure branch name is Kubernetes-compatible:
// - Lowercase only // - Lowercase only
// - Alphanumeric with hyphens // - Alphanumeric with hyphens
// - No underscores // - No underscores
// - Max 50 chars (to allow for prefixes) // - Max 50 chars (to allow for prefixes)
const branchName = `${branchPrefix}${entityType}-${entityNumber}-${timestamp}`;
const newBranch = branchName.toLowerCase().substring(0, 50); const newBranch = branchName.toLowerCase().substring(0, 50);
try { try {
@@ -132,8 +140,18 @@ export async function setupBranch(
} }
// For non-signing case, create and checkout the branch locally only // For non-signing case, create and checkout the branch locally only
const entityType =
context.eventName === "repository_dispatch"
? "dispatch"
: isPR
? "pr"
: "issue";
const entityId =
context.eventName === "repository_dispatch"
? context.runId
: entityNumber!.toString();
console.log( console.log(
`Creating local branch ${newBranch} for ${entityType} #${entityNumber} from source branch: ${sourceBranch}...`, `Creating local branch ${newBranch} for ${entityType} ${entityId} from source branch: ${sourceBranch}...`,
); );
// Fetch and checkout the source branch first to ensure we branch from the correct base // Fetch and checkout the source branch first to ensure we branch from the correct base

View File

@@ -6,7 +6,7 @@
*/ */
import { $ } from "bun"; import { $ } from "bun";
import type { ParsedGitHubContext } from "../context"; import type { GitHubContext } from "../context";
import { GITHUB_SERVER_URL } from "../api/config"; import { GITHUB_SERVER_URL } from "../api/config";
type GitUser = { type GitUser = {
@@ -16,7 +16,7 @@ type GitUser = {
export async function configureGitAuth( export async function configureGitAuth(
githubToken: string, githubToken: string,
context: ParsedGitHubContext, context: GitHubContext,
user: GitUser | null, user: GitUser | null,
) { ) {
console.log("Configuring git authentication for non-signing mode"); console.log("Configuring git authentication for non-signing mode");

View File

@@ -0,0 +1,533 @@
/**
* Git Common Utilities
*
* This module provides utilities for Git operations using both GitHub API and CLI.
*
* ## When to use API vs CLI:
*
* ### GitHub API (for signed commits):
* - When commit signing is enabled (`useCommitSigning: true`)
* - Required for signed commits as GitHub Apps can't sign commits locally
* - Functions with "API" in the name use the GitHub REST API
*
* ### Git CLI (for unsigned commits):
* - When commit signing is disabled (`useCommitSigning: false`)
* - Faster for simple operations when signing isn't required
* - Uses local git commands (`git add`, `git commit`, `git push`)
*/
import { readFile } from "fs/promises";
import { join } from "path";
import { $ } from "bun";
import { GITHUB_API_URL } from "../api/config";
import { retryWithBackoff } from "../../utils/retry";
import fetch from "node-fetch";
interface FileEntry {
path: string;
content?: string;
deleted?: boolean;
}
interface CommitResult {
sha: string;
message: string;
}
interface GitHubRef {
object: {
sha: string;
};
}
interface GitHubCommit {
tree: {
sha: string;
};
}
interface GitHubTree {
sha: string;
}
interface GitHubNewCommit {
sha: string;
message: string;
author: {
name: string;
date: string;
};
}
async function getUncommittedFiles(): Promise<FileEntry[]> {
try {
console.log("Getting uncommitted files...");
const gitStatus = await $`git status --porcelain`.quiet();
const statusOutput = gitStatus.stdout.toString().trim();
if (!statusOutput) {
console.log("No uncommitted files found (git status output is empty)");
return [];
}
console.log("Git status output:");
console.log(statusOutput);
const files: FileEntry[] = [];
const lines = statusOutput.split("\n");
console.log(`Found ${lines.length} lines in git status output`);
for (const line of lines) {
const trimmedLine = line.trim();
if (!trimmedLine) {
continue;
}
// Parse git status output
// Format: XY filename (e.g., "M file.txt", "A new.txt", "?? untracked.txt", "D deleted.txt")
const statusCode = trimmedLine.substring(0, 1);
const filePath = trimmedLine.substring(2).trim();
console.log(`Processing: status='${statusCode}' path='${filePath}'`);
// Skip files we shouldn't auto-commit
if (filePath === "output.txt" || filePath.endsWith("/output.txt")) {
console.log(`Skipping temporary file: ${filePath}`);
continue;
}
const isDeleted = statusCode.includes("D");
console.log(`File ${filePath}: deleted=${isDeleted}`);
files.push({
path: filePath,
deleted: isDeleted,
});
}
console.log(`Returning ${files.length} files to commit`);
return files;
} catch (error) {
// If git status fails (e.g., not in a git repo), return empty array
console.error("Error running git status:", error);
return [];
}
}
/**
* Helper function to get or create branch reference via GitHub API
* Used when we need to ensure a branch exists before committing via API
*/
async function getOrCreateBranchRefViaAPI(
owner: string,
repo: string,
branch: string,
githubToken: string,
): Promise<string> {
// Try to get the branch reference
const refUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${branch}`;
const refResponse = await fetch(refUrl, {
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
},
});
if (refResponse.ok) {
const refData = (await refResponse.json()) as GitHubRef;
return refData.object.sha;
}
if (refResponse.status !== 404) {
throw new Error(`Failed to get branch reference: ${refResponse.status}`);
}
const baseBranch = process.env.BASE_BRANCH!;
// Get the SHA of the base branch
const baseRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${baseBranch}`;
const baseRefResponse = await fetch(baseRefUrl, {
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
},
});
let baseSha: string;
if (!baseRefResponse.ok) {
// If base branch doesn't exist, try default branch
const repoUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}`;
const repoResponse = await fetch(repoUrl, {
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
},
});
if (!repoResponse.ok) {
throw new Error(`Failed to get repository info: ${repoResponse.status}`);
}
const repoData = (await repoResponse.json()) as {
default_branch: string;
};
const defaultBranch = repoData.default_branch;
// Try default branch
const defaultRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${defaultBranch}`;
const defaultRefResponse = await fetch(defaultRefUrl, {
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
},
});
if (!defaultRefResponse.ok) {
throw new Error(
`Failed to get default branch reference: ${defaultRefResponse.status}`,
);
}
const defaultRefData = (await defaultRefResponse.json()) as GitHubRef;
baseSha = defaultRefData.object.sha;
} else {
const baseRefData = (await baseRefResponse.json()) as GitHubRef;
baseSha = baseRefData.object.sha;
}
// Create the new branch using the same pattern as octokit
const createRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs`;
const createRefResponse = await fetch(createRefUrl, {
method: "POST",
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
"Content-Type": "application/json",
},
body: JSON.stringify({
ref: `refs/heads/${branch}`,
sha: baseSha,
}),
});
if (!createRefResponse.ok) {
const errorText = await createRefResponse.text();
throw new Error(
`Failed to create branch: ${createRefResponse.status} - ${errorText}`,
);
}
console.log(`Successfully created branch ${branch}`);
return baseSha;
}
/**
* Create a commit via GitHub API with the given files (for signed commits)
* Handles both file updates and deletions
* Used when commit signing is enabled - GitHub Apps can create signed commits via API
*/
async function createCommitViaAPI(
owner: string,
repo: string,
branch: string,
files: Array<string | FileEntry>,
message: string,
REPO_DIR: string = process.cwd(),
): Promise<CommitResult> {
const githubToken = process.env.GITHUB_TOKEN;
if (!githubToken) {
throw new Error("GITHUB_TOKEN environment variable is required");
}
// Normalize file entries
const fileEntries: FileEntry[] = files.map((f) => {
if (typeof f === "string") {
// Legacy string path format
const path = f.startsWith("/") ? f.slice(1) : f;
return { path, deleted: false };
}
// Already a FileEntry
const path = f.path.startsWith("/") ? f.path.slice(1) : f.path;
return { ...f, path };
});
// 1. Get the branch reference (create if doesn't exist)
const baseSha = await getOrCreateBranchRefViaAPI(
owner,
repo,
branch,
githubToken,
);
// 2. Get the base commit
const commitUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/commits/${baseSha}`;
const commitResponse = await fetch(commitUrl, {
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
},
});
if (!commitResponse.ok) {
throw new Error(`Failed to get base commit: ${commitResponse.status}`);
}
const commitData = (await commitResponse.json()) as GitHubCommit;
const baseTreeSha = commitData.tree.sha;
// 3. Create tree entries for all files
const treeEntries = await Promise.all(
fileEntries.map(async (fileEntry) => {
const { path: filePath, deleted } = fileEntry;
// Handle deleted files by setting SHA to null
if (deleted) {
return {
path: filePath,
mode: "100644",
type: "blob" as const,
sha: null,
};
}
const fullPath = filePath.startsWith("/")
? filePath
: join(REPO_DIR, filePath);
// Check if file is binary (images, etc.)
const isBinaryFile =
/\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test(
filePath,
);
if (isBinaryFile) {
// For binary files, create a blob first using the Blobs API
const binaryContent = await readFile(fullPath);
// Create blob using Blobs API (supports encoding parameter)
const blobUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/blobs`;
const blobResponse = await fetch(blobUrl, {
method: "POST",
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
"Content-Type": "application/json",
},
body: JSON.stringify({
content: binaryContent.toString("base64"),
encoding: "base64",
}),
});
if (!blobResponse.ok) {
const errorText = await blobResponse.text();
throw new Error(
`Failed to create blob for ${filePath}: ${blobResponse.status} - ${errorText}`,
);
}
const blobData = (await blobResponse.json()) as { sha: string };
// Return tree entry with blob SHA
return {
path: filePath,
mode: "100644",
type: "blob" as const,
sha: blobData.sha,
};
} else {
// For text files, include content directly in tree
const content = await readFile(fullPath, "utf-8");
return {
path: filePath,
mode: "100644",
type: "blob" as const,
content: content,
};
}
}),
);
// 4. Create a new tree
const treeUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/trees`;
const treeResponse = await fetch(treeUrl, {
method: "POST",
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
"Content-Type": "application/json",
},
body: JSON.stringify({
base_tree: baseTreeSha,
tree: treeEntries,
}),
});
if (!treeResponse.ok) {
const errorText = await treeResponse.text();
throw new Error(
`Failed to create tree: ${treeResponse.status} - ${errorText}`,
);
}
const treeData = (await treeResponse.json()) as GitHubTree;
// 5. Create a new commit
const newCommitUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/commits`;
const newCommitResponse = await fetch(newCommitUrl, {
method: "POST",
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
"Content-Type": "application/json",
},
body: JSON.stringify({
message: message,
tree: treeData.sha,
parents: [baseSha],
}),
});
if (!newCommitResponse.ok) {
const errorText = await newCommitResponse.text();
throw new Error(
`Failed to create commit: ${newCommitResponse.status} - ${errorText}`,
);
}
const newCommitData = (await newCommitResponse.json()) as GitHubNewCommit;
// 6. Update the reference to point to the new commit
const updateRefUrl = `${GITHUB_API_URL}/repos/${owner}/${repo}/git/refs/heads/${branch}`;
// We're seeing intermittent 403 "Resource not accessible by integration" errors
// on certain repos when updating git references. These appear to be transient
// GitHub API issues that succeed on retry.
await retryWithBackoff(
async () => {
const updateRefResponse = await fetch(updateRefUrl, {
method: "PATCH",
headers: {
Accept: "application/vnd.github+json",
Authorization: `Bearer ${githubToken}`,
"X-GitHub-Api-Version": "2022-11-28",
"Content-Type": "application/json",
},
body: JSON.stringify({
sha: newCommitData.sha,
force: false,
}),
});
if (!updateRefResponse.ok) {
const errorText = await updateRefResponse.text();
const error = new Error(
`Failed to update reference: ${updateRefResponse.status} - ${errorText}`,
);
// Only retry on 403 errors - these are the intermittent failures we're targeting
if (updateRefResponse.status === 403) {
throw error;
}
// For non-403 errors, fail immediately without retry
console.error("Non-retryable error:", updateRefResponse.status);
throw error;
}
},
{
maxAttempts: 3,
initialDelayMs: 1000, // Start with 1 second delay
maxDelayMs: 5000, // Max 5 seconds delay
backoffFactor: 2, // Double the delay each time
},
);
return {
sha: newCommitData.sha,
message: newCommitData.message,
};
}
/**
* Commit uncommitted changes - automatically chooses API or CLI based on signing requirement
*
* @param useCommitSigning - If true, uses GitHub API for signed commits. If false, uses git CLI.
*/
export async function commitUncommittedChanges(
owner: string,
repo: string,
branch: string,
useCommitSigning: boolean,
): Promise<CommitResult | null> {
try {
// Check for uncommitted changes
const gitStatus = await $`git status --porcelain`.quiet();
const hasUncommittedChanges = gitStatus.stdout.toString().trim().length > 0;
if (!hasUncommittedChanges) {
console.log("No uncommitted changes found");
return null;
}
console.log("Found uncommitted changes, committing them...");
const runId = process.env.GITHUB_RUN_ID || "unknown";
const commitMessage = `Auto-commit: Save uncommitted changes from Claude\n\nRun ID: ${runId}`;
if (useCommitSigning) {
// Use GitHub API when commit signing is required
console.log("Using GitHub API for signed commit...");
const files = await getUncommittedFiles();
if (files.length === 0) {
console.log("No files to commit");
return null;
}
return await createCommitViaAPI(
owner,
repo,
branch,
files,
commitMessage,
);
} else {
// Use git CLI when commit signing is not required
console.log("Using git CLI for unsigned commit...");
// Add all changes
await $`git add -A`;
// Commit with a descriptive message
await $`git commit -m ${commitMessage}`;
// Push the changes
await $`git push origin ${branch}`;
console.log("✅ Successfully committed and pushed uncommitted changes");
// Get the commit SHA
const commitSha = await $`git rev-parse HEAD`.quiet();
return {
sha: commitSha.stdout.toString().trim(),
message: commitMessage,
};
}
} catch (error) {
// If we can't check git status (e.g., not in a git repo during tests), return null
console.error("Error checking/committing changes:", error);
return null;
}
}

View File

@@ -3,12 +3,20 @@
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { z } from "zod"; import { z } from "zod";
import { readFile } from "fs/promises"; import { readFile, access, stat } from "fs/promises";
import { join } from "path"; import { join } from "path";
import { constants } from "fs";
import { execFile } from "child_process";
import { promisify } from "util";
import fetch from "node-fetch"; import fetch from "node-fetch";
import { GITHUB_API_URL } from "../github/api/config"; import { GITHUB_API_URL } from "../github/api/config";
import { retryWithBackoff } from "../utils/retry"; import { retryWithBackoff } from "../utils/retry";
// NOTE: We should extract out common git utilities into a shared module
// as we need to perform these operations outside of an MCP server. (See git-common-utils.ts)
const execFileAsync = promisify(execFile);
type GitHubRef = { type GitHubRef = {
object: { object: {
sha: string; sha: string;
@@ -162,6 +170,77 @@ async function getOrCreateBranchRef(
return baseSha; return baseSha;
} }
// Get the appropriate Git file mode for a file
async function getFileMode(filePath: string): Promise<string> {
try {
const fileStat = await stat(filePath);
if (fileStat.isFile()) {
// Check if execute bit is set for user
if (fileStat.mode & constants.S_IXUSR) {
return "100755"; // Executable file
} else {
return "100644"; // Regular file
}
} else if (fileStat.isDirectory()) {
return "040000"; // Directory (tree)
} else if (fileStat.isSymbolicLink()) {
return "120000"; // Symbolic link
} else {
// Fallback for unknown file types
return "100644";
}
} catch (error) {
// If we can't stat the file, default to regular file
console.warn(
`Could not determine file mode for ${filePath}, using default: ${error}`,
);
return "100644";
}
}
// Helper function to run pre-commit hooks
async function runPreCommitHooks(repoDir: string): Promise<void> {
const hookPath = join(repoDir, ".git", "hooks", "pre-commit");
try {
// Check if pre-commit hook exists and is executable
await access(hookPath);
console.log("Running pre-commit hook...");
// Execute the pre-commit hook
const { stdout, stderr } = await execFileAsync(hookPath, [], {
cwd: repoDir,
env: {
...process.env,
GIT_INDEX_FILE: join(repoDir, ".git", "index"),
GIT_DIR: join(repoDir, ".git"),
},
});
if (stdout) console.log("Pre-commit hook stdout:", stdout);
if (stderr) console.log("Pre-commit hook stderr:", stderr);
console.log("Pre-commit hook passed");
} catch (error: any) {
if (error.code === "ENOENT") {
// Hook doesn't exist, that's fine
return;
}
if (error.code === "EACCES") {
console.log("Pre-commit hook exists but is not executable, skipping");
return;
}
// Hook failed with non-zero exit code
const errorMessage =
error.stderr || error.message || "Pre-commit hook failed";
throw new Error(`Pre-commit hook failed: ${errorMessage}`);
}
}
// Commit files tool // Commit files tool
server.tool( server.tool(
"commit_files", "commit_files",
@@ -173,8 +252,12 @@ server.tool(
'Array of file paths relative to repository root (e.g. ["src/main.js", "README.md"]). All files must exist locally.', 'Array of file paths relative to repository root (e.g. ["src/main.js", "README.md"]). All files must exist locally.',
), ),
message: z.string().describe("Commit message"), message: z.string().describe("Commit message"),
noVerify: z
.boolean()
.optional()
.describe("Skip pre-commit hooks (equivalent to git commit --no-verify)"),
}, },
async ({ files, message }) => { async ({ files, message, noVerify }) => {
const owner = REPO_OWNER; const owner = REPO_OWNER;
const repo = REPO_NAME; const repo = REPO_NAME;
const branch = BRANCH_NAME; const branch = BRANCH_NAME;
@@ -184,6 +267,11 @@ server.tool(
throw new Error("GITHUB_TOKEN environment variable is required"); throw new Error("GITHUB_TOKEN environment variable is required");
} }
// Run pre-commit hooks unless explicitly skipped
if (!noVerify) {
await runPreCommitHooks(REPO_DIR);
}
const processedFiles = files.map((filePath) => { const processedFiles = files.map((filePath) => {
if (filePath.startsWith("/")) { if (filePath.startsWith("/")) {
return filePath.slice(1); return filePath.slice(1);
@@ -223,6 +311,9 @@ server.tool(
? filePath ? filePath
: join(REPO_DIR, filePath); : join(REPO_DIR, filePath);
// Get the proper file mode based on file permissions
const fileMode = await getFileMode(fullPath);
// Check if file is binary (images, etc.) // Check if file is binary (images, etc.)
const isBinaryFile = const isBinaryFile =
/\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test( /\.(png|jpg|jpeg|gif|webp|ico|pdf|zip|tar|gz|exe|bin|woff|woff2|ttf|eot)$/i.test(
@@ -261,7 +352,7 @@ server.tool(
// Return tree entry with blob SHA // Return tree entry with blob SHA
return { return {
path: filePath, path: filePath,
mode: "100644", mode: fileMode,
type: "blob", type: "blob",
sha: blobData.sha, sha: blobData.sha,
}; };
@@ -270,7 +361,7 @@ server.tool(
const content = await readFile(fullPath, "utf-8"); const content = await readFile(fullPath, "utf-8");
return { return {
path: filePath, path: filePath,
mode: "100644", mode: fileMode,
type: "blob", type: "blob",
content: content, content: content,
}; };

View File

@@ -1,6 +1,6 @@
import * as core from "@actions/core"; import * as core from "@actions/core";
import { GITHUB_API_URL, GITHUB_SERVER_URL } from "../github/api/config"; import { GITHUB_API_URL, GITHUB_SERVER_URL } from "../github/api/config";
import type { ParsedGitHubContext } from "../github/context"; import type { GitHubContext } from "../github/context";
import { Octokit } from "@octokit/rest"; import { Octokit } from "@octokit/rest";
type PrepareConfigParams = { type PrepareConfigParams = {
@@ -12,7 +12,7 @@ type PrepareConfigParams = {
additionalMcpConfig?: string; additionalMcpConfig?: string;
claudeCommentId?: string; claudeCommentId?: string;
allowedTools: string[]; allowedTools: string[];
context: ParsedGitHubContext; context: GitHubContext;
}; };
async function checkActionsReadPermission( async function checkActionsReadPermission(
@@ -73,21 +73,23 @@ export async function prepareMcpConfig(
}; };
// Always include comment server for updating Claude comments // Always include comment server for updating Claude comments
baseMcpConfig.mcpServers.github_comment = { if (context.inputs.mode === "tag") {
command: "bun", baseMcpConfig.mcpServers.github_comment = {
args: [ command: "bun",
"run", args: [
`${process.env.GITHUB_ACTION_PATH}/src/mcp/github-comment-server.ts`, "run",
], `${process.env.GITHUB_ACTION_PATH}/src/mcp/github-comment-server.ts`,
env: { ],
GITHUB_TOKEN: githubToken, env: {
REPO_OWNER: owner, GITHUB_TOKEN: githubToken,
REPO_NAME: repo, REPO_OWNER: owner,
...(claudeCommentId && { CLAUDE_COMMENT_ID: claudeCommentId }), REPO_NAME: repo,
GITHUB_EVENT_NAME: process.env.GITHUB_EVENT_NAME || "", ...(claudeCommentId && { CLAUDE_COMMENT_ID: claudeCommentId }),
GITHUB_API_URL: GITHUB_API_URL, GITHUB_EVENT_NAME: process.env.GITHUB_EVENT_NAME || "",
}, GITHUB_API_URL: GITHUB_API_URL,
}; },
};
}
// Include file ops server when commit signing is enabled // Include file ops server when commit signing is enabled
if (context.inputs.useCommitSigning) { if (context.inputs.useCommitSigning) {

View File

@@ -16,9 +16,15 @@ import { agentMode } from "./agent";
import { reviewMode } from "./review"; import { reviewMode } from "./review";
import type { GitHubContext } from "../github/context"; import type { GitHubContext } from "../github/context";
import { isAutomationContext } from "../github/context"; import { isAutomationContext } from "../github/context";
import { remoteAgentMode } from "./remote-agent";
export const DEFAULT_MODE = "tag" as const; export const DEFAULT_MODE = "tag" as const;
export const VALID_MODES = ["tag", "agent", "experimental-review"] as const; export const VALID_MODES = [
"tag",
"agent",
"remote-agent",
"experimental-review",
] as const;
/** /**
* All available modes. * All available modes.
@@ -28,6 +34,7 @@ const modes = {
tag: tagMode, tag: tagMode,
agent: agentMode, agent: agentMode,
"experimental-review": reviewMode, "experimental-review": reviewMode,
"remote-agent": remoteAgentMode,
} as const satisfies Record<ModeName, Mode>; } as const satisfies Record<ModeName, Mode>;
/** /**
@@ -49,7 +56,13 @@ export function getMode(name: ModeName, context: GitHubContext): Mode {
// Validate mode can handle the event type // Validate mode can handle the event type
if (name === "tag" && isAutomationContext(context)) { if (name === "tag" && isAutomationContext(context)) {
throw new Error( throw new Error(
`Tag mode cannot handle ${context.eventName} events. Use 'agent' mode for automation events.`, `Tag mode cannot handle ${context.eventName} events. Use 'agent' mode for automation events or 'remote-agent' mode for repository_dispatch events.`,
);
}
if (name === "remote-agent" && context.eventName !== "repository_dispatch") {
throw new Error(
`Remote agent mode can only handle repository_dispatch events. Use 'tag' mode for @claude mentions or 'agent' mode for other automation events.`,
); );
} }

View File

@@ -0,0 +1,157 @@
/**
* Branch handling for remote-agent mode with resume support
*/
import { $ } from "bun";
import * as core from "@actions/core";
import type { GitHubContext } from "../../github/context";
import type { Octokits } from "../../github/api/client";
import type { ResumeResponse, ResumeResult } from "../../types/resume";
import {
setupBranch as setupBaseBranch,
type BranchInfo,
} from "../../github/operations/branch";
export type RemoteBranchInfo = BranchInfo & {
resumeMessages?: ResumeResult["messages"];
};
/**
* Attempts to resume from an existing session using the resume endpoint
* @param resumeEndpoint The URL to fetch the resume data from
* @param headers Headers to include in the request (including auth)
* @returns ResumeResult if successful, null otherwise
*/
async function fetchResumeData(
resumeEndpoint: string,
headers?: Record<string, string>,
): Promise<ResumeResult | null> {
try {
console.log(`Attempting to resume from: ${resumeEndpoint}`);
const response = await fetch(resumeEndpoint, {
method: "GET",
headers: headers || {},
});
if (!response.ok) {
console.log(
`Resume endpoint returned ${response.status}: ${response.statusText}`,
);
return null;
}
const data = (await response.json()) as ResumeResponse;
if (!data.log || !Array.isArray(data.log)) {
console.log("Resume endpoint returned invalid data structure");
return null;
}
console.log(
`Successfully fetched resume data with ${data.log.length} messages`,
);
// If a branch is specified in the response, we'll use it
// Otherwise, we'll determine the branch from the current git state
const branchName = data.branch || "";
return {
messages: data.log,
branchName,
};
} catch (error) {
console.error("Failed to fetch resume data:", error);
return null;
}
}
/**
* Setup branch for remote-agent mode with resume support
* @param octokits GitHub API clients
* @param context GitHub context
* @param oidcToken OIDC token for authentication
* @returns Branch information with optional resume messages
*/
export async function setupBranchWithResume(
octokits: Octokits,
context: GitHubContext,
oidcToken: string,
): Promise<RemoteBranchInfo> {
const { owner, repo } = context.repository;
const { baseBranch } = context.inputs;
// Check if we have a resume endpoint
if (context.progressTracking?.resumeEndpoint) {
console.log("Resume endpoint detected, attempting to resume session...");
// Prepare headers with OIDC token
const headers: Record<string, string> = {
...(context.progressTracking.headers || {}),
Authorization: `Bearer ${oidcToken}`,
};
const resumeData = await fetchResumeData(
context.progressTracking.resumeEndpoint,
headers,
);
if (resumeData && resumeData.branchName) {
// Try to checkout the resumed branch
try {
console.log(`Resuming on branch: ${resumeData.branchName}`);
// Fetch the branch from origin
await $`git fetch origin ${resumeData.branchName}`;
// Checkout the branch
await $`git checkout ${resumeData.branchName}`;
console.log(`Successfully resumed on branch: ${resumeData.branchName}`);
// Get the base branch for this branch (we'll use the default branch as fallback)
let resumeBaseBranch = baseBranch;
if (!resumeBaseBranch) {
const repoResponse = await octokits.rest.repos.get({
owner,
repo,
});
resumeBaseBranch = repoResponse.data.default_branch;
}
// Set outputs for GitHub Actions
core.setOutput("CLAUDE_BRANCH", resumeData.branchName);
core.setOutput("BASE_BRANCH", resumeBaseBranch);
return {
baseBranch: resumeBaseBranch,
claudeBranch: resumeData.branchName,
currentBranch: resumeData.branchName,
resumeMessages: resumeData.messages,
};
} catch (error) {
console.error(
`Failed to checkout resumed branch ${resumeData.branchName}:`,
error,
);
console.log("Falling back to creating a new branch...");
// Fall through to normal branch creation
}
} else if (resumeData) {
console.log(
"Resume data fetched but no branch specified, will create new branch",
);
// We have messages but no branch, so we'll create a new branch
// but still pass along the messages
const branchInfo = await setupBaseBranch(octokits, null, context);
return {
...branchInfo,
resumeMessages: resumeData.messages,
};
}
}
// No resume endpoint or resume failed, use normal branch setup
console.log("No resume endpoint or resume failed, creating new branch...");
return setupBaseBranch(octokits, null, context);
}

View File

@@ -0,0 +1,444 @@
import * as core from "@actions/core";
import { mkdir, writeFile } from "fs/promises";
import type { Mode, ModeOptions, ModeResult } from "../types";
import { isRepositoryDispatchEvent } from "../../github/context";
import type { GitHubContext } from "../../github/context";
import { setupBranchWithResume } from "./branch";
import { prepareMcpConfig } from "../../mcp/install-mcp-server";
import { GITHUB_SERVER_URL } from "../../github/api/config";
import {
buildRemoteAgentAllowedToolsString,
buildDisallowedToolsString,
type PreparedContext,
} from "../../create-prompt";
import {
reportWorkflowInitialized,
reportClaudeStarting,
reportWorkflowFailed,
} from "./system-progress-handler";
import type { SystemProgressConfig } from "./progress-types";
import { fetchUserDisplayName } from "../../github/data/fetcher";
import { createOctokit } from "../../github/api/client";
import type { StreamConfig } from "../../types/stream-config";
/**
* Fetches a Claude Code OAuth token from the specified endpoint using OIDC authentication
*/
async function fetchClaudeCodeOAuthToken(
oauthTokenEndpoint: string,
oidcToken?: string,
sessionId?: string,
): Promise<string> {
console.log(`Fetching Claude Code OAuth token from: ${oauthTokenEndpoint}`);
try {
if (!oidcToken) {
throw new Error("OIDC token is required for OAuth authentication");
}
// Make request to OAuth token endpoint
const response = await fetch(oauthTokenEndpoint, {
method: "POST",
headers: {
Authorization: `Bearer ${oidcToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
...(sessionId && { session_id: sessionId }),
}),
});
if (!response.ok) {
throw new Error(
`OAuth token request failed: ${response.status} ${response.statusText}`,
);
}
const data = (await response.json()) as {
oauth_token?: string;
message?: string;
};
if (!data.oauth_token) {
const message = data.message || "Unknown error";
throw new Error(`OAuth token request failed: ${message}`);
}
console.log("Successfully fetched Claude Code OAuth token");
return data.oauth_token;
} catch (error) {
console.error("Failed to fetch Claude Code OAuth token:", error);
throw error;
}
}
/**
* Remote Agent mode implementation.
*
* This mode is specifically designed for repository_dispatch events triggered by external APIs.
* It bypasses the standard trigger checking, comment tracking, and GitHub data fetching used by tag mode,
* making it ideal for automated tasks triggered via API calls with custom payloads.
*/
export const remoteAgentMode: Mode = {
name: "remote-agent",
description: "Remote automation mode for repository_dispatch events",
shouldTrigger(context) {
// Only trigger for repository_dispatch events
return isRepositoryDispatchEvent(context);
},
prepareContext(context, data) {
// Remote agent mode uses minimal context
return {
mode: "remote-agent",
githubContext: context,
baseBranch: data?.baseBranch,
claudeBranch: data?.claudeBranch,
};
},
getAllowedTools() {
return [];
},
getDisallowedTools() {
return [];
},
shouldCreateTrackingComment() {
return false;
},
async prepare({
context,
octokit,
githubToken,
}: ModeOptions): Promise<ModeResult> {
// Remote agent mode handles repository_dispatch events only
if (!isRepositoryDispatchEvent(context)) {
throw new Error(
"Remote agent mode can only handle repository_dispatch events",
);
}
// Extract task details from client_payload
const payload = context.payload;
const clientPayload = payload.client_payload as {
prompt?: string;
stream_endpoint?: string;
headers?: Record<string, string>;
resume_endpoint?: string;
session_id?: string;
endpoints?: {
stream?: string;
progress?: string;
systemProgress?: string;
oauthToken?: string;
};
overrideInputs?: {
model?: string;
base_branch?: string;
};
};
// Get OIDC token for streaming and potential OAuth token fetching
let oidcToken: string;
try {
oidcToken = await core.getIDToken("claude-code-github-action");
} catch (error) {
console.error("Failed to get OIDC token:", error);
throw new Error(
`OIDC token required for remote-agent mode. Please add 'id-token: write' to your workflow permissions. Error: ${error}`,
);
}
// Set up system progress config if endpoint is provided
let systemProgressConfig: SystemProgressConfig | null = null;
if (context.progressTracking?.systemProgressEndpoint) {
systemProgressConfig = {
endpoint: context.progressTracking.systemProgressEndpoint,
headers: context.progressTracking.headers,
};
}
// Handle authentication - fetch OAuth token if needed
const anthropicApiKey = process.env.ANTHROPIC_API_KEY;
const claudeCodeOAuthToken = process.env.CLAUDE_CODE_OAUTH_TOKEN;
if (!anthropicApiKey && !claudeCodeOAuthToken) {
const oauthTokenEndpoint = context.progressTracking?.oauthTokenEndpoint;
if (oauthTokenEndpoint) {
console.log(
"No API key or OAuth token found, fetching OAuth token from endpoint",
);
try {
const fetchedToken = await fetchClaudeCodeOAuthToken(
oauthTokenEndpoint,
oidcToken,
context.progressTracking?.sessionId,
);
core.setOutput("claude_code_oauth_token", fetchedToken);
console.log(
"Successfully fetched and set OAuth token for Claude Code",
);
} catch (error) {
console.error("Failed to fetch OAuth token:", error);
throw new Error(
`Authentication failed: No API key or OAuth token available, and OAuth token fetching failed: ${error}`,
);
}
} else {
throw new Error(
"No authentication available: Missing ANTHROPIC_API_KEY, CLAUDE_CODE_OAUTH_TOKEN, and no OAuth token endpoint provided",
);
}
} else {
console.log("Using existing authentication (API key or OAuth token)");
}
const taskDescription =
clientPayload.prompt ||
context.inputs.directPrompt ||
"No task description provided";
// Setup branch for work isolation with resume support
let branchInfo;
try {
branchInfo = await setupBranchWithResume(octokit, context, oidcToken);
} catch (error) {
// Report failure if we have system progress config
if (systemProgressConfig) {
reportWorkflowFailed(
systemProgressConfig,
oidcToken,
"initialization",
error as Error,
"branch_setup_failed",
);
}
throw error;
}
// Remote agent mode always uses commit signing for security
// No git authentication configuration needed as we use GitHub API
// Handle resume messages if they exist
if (branchInfo.resumeMessages && branchInfo.resumeMessages.length > 0) {
console.log(
`Resumed session with ${branchInfo.resumeMessages.length} previous messages`,
);
// Store resume messages for later use
// These will be prepended to the conversation when Claude starts
core.setOutput(
"resume_messages",
JSON.stringify(branchInfo.resumeMessages),
);
}
// Report workflow initialized
if (systemProgressConfig) {
reportWorkflowInitialized(
systemProgressConfig,
oidcToken,
branchInfo.claudeBranch || branchInfo.currentBranch,
branchInfo.baseBranch,
context.progressTracking?.sessionId,
);
}
// Create prompt directory
await mkdir(`${process.env.RUNNER_TEMP}/claude-prompts`, {
recursive: true,
});
// Fetch trigger user display name from context.actor
let triggerDisplayName: string | null | undefined;
if (context.actor) {
try {
const octokits = createOctokit(githubToken);
triggerDisplayName = await fetchUserDisplayName(
octokits,
context.actor,
);
} catch (error) {
console.warn(
`Failed to fetch user display name for ${context.actor}:`,
error,
);
}
}
// Generate dispatch-specific prompt (just the task description)
const promptContent = generateDispatchPrompt(taskDescription);
console.log("Writing prompt file...");
console.log("Contents: ", promptContent);
// Write the prompt file
await writeFile(
`${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`,
promptContent,
);
console.log(
`Prompt file written successfully to ${process.env.RUNNER_TEMP}/claude-prompts/claude-prompt.txt`,
);
// Set stream configuration for repository_dispatch events
if (context.progressTracking) {
const streamConfig: StreamConfig = {};
if (context.progressTracking.resumeEndpoint) {
streamConfig.resume_endpoint = context.progressTracking.resumeEndpoint;
}
if (context.progressTracking.sessionId) {
streamConfig.session_id = context.progressTracking.sessionId;
}
if (context.progressTracking.progressEndpoint) {
streamConfig.progress_endpoint =
context.progressTracking.progressEndpoint;
}
if (context.progressTracking.systemProgressEndpoint) {
streamConfig.system_progress_endpoint =
context.progressTracking.systemProgressEndpoint;
}
// Merge provided headers with OIDC token
const headers: Record<string, string> = {
...(context.progressTracking.headers || {}),
};
// Use existing OIDC token for streaming
headers["Authorization"] = `Bearer ${oidcToken}`;
if (Object.keys(headers).length > 0) {
streamConfig.headers = headers;
}
console.log("Setting stream config:", streamConfig);
core.setOutput("stream_config", JSON.stringify(streamConfig));
}
// Export tool environment variables for remote agent mode
// Check if we have actions:read permission for CI tools
const hasActionsReadPermission =
context.inputs.additionalPermissions.get("actions") === "read";
const allowedToolsString = buildRemoteAgentAllowedToolsString(
context.inputs.allowedTools,
hasActionsReadPermission,
);
const disallowedToolsString = buildDisallowedToolsString(
context.inputs.disallowedTools,
);
core.exportVariable("ALLOWED_TOOLS", allowedToolsString);
core.exportVariable("DISALLOWED_TOOLS", disallowedToolsString);
// Handle model override from repository_dispatch payload
if (clientPayload.overrideInputs?.model) {
core.setOutput("anthropic_model", clientPayload.overrideInputs.model);
}
// Get minimal MCP configuration for remote agent mode
const additionalMcpConfig = process.env.MCP_CONFIG || "";
const mcpConfig = await prepareMcpConfig({
githubToken,
owner: context.repository.owner,
repo: context.repository.repo,
branch: branchInfo.claudeBranch || branchInfo.currentBranch,
baseBranch: branchInfo.baseBranch,
additionalMcpConfig,
claudeCommentId: "", // No comment ID for remote agent mode
allowedTools: context.inputs.allowedTools,
context,
});
core.setOutput("mcp_config", mcpConfig);
// Report Claude is starting
if (systemProgressConfig) {
reportClaudeStarting(systemProgressConfig, oidcToken);
}
// Track Claude start time for duration calculation
core.setOutput("claude_start_time", Date.now().toString());
// Export system prompt for remote agent mode
const systemPrompt = generateDispatchSystemPrompt(
context,
branchInfo.baseBranch,
branchInfo.claudeBranch,
context.actor,
triggerDisplayName,
);
core.exportVariable("APPEND_SYSTEM_PROMPT", systemPrompt);
return {
commentId: undefined, // No comment tracking for remote agent mode
branchInfo,
mcpConfig,
};
},
generatePrompt(context: PreparedContext): string {
// TODO: update this to generate a more meaningful prompt
return `Repository: ${context.repository}`;
},
};
/**
* Generates a task-focused prompt for repository_dispatch events
*/
function generateDispatchPrompt(taskDescription: string): string {
return taskDescription;
}
/**
* Generates the system prompt portion for repository_dispatch events
*/
function generateDispatchSystemPrompt(
context: GitHubContext,
baseBranch: string,
claudeBranch: string | undefined,
triggerUsername?: string,
triggerDisplayName?: string | null,
): string {
const { repository } = context;
const coAuthorLine =
triggerUsername && (triggerDisplayName || triggerUsername !== "Unknown")
? `Co-authored-by: ${triggerDisplayName ?? triggerUsername} <${triggerUsername}@users.noreply.github.com>`
: "";
// Remote agent mode always uses MCP for commit signing
let commitInstructions = `- Use mcp__github_file_ops__commit_files and mcp__github_file_ops__delete_files to commit and push changes`;
if (coAuthorLine) {
commitInstructions += `
- When pushing changes, include a Co-authored-by trailer in the commit message
- Use: "${coAuthorLine}"`;
}
return `You are Claude, an AI assistant designed to help with GitHub issues and pull requests. Think carefully as you analyze the context and respond appropriately. Here's the context for your current task:
Your task is to complete the request described in the task description.
Instructions:
1. For questions: Research the codebase and provide a detailed answer
2. For implementations: Make the requested changes, commit, and push
Key points:
- You're already on a new branch - NEVER create another branch (this is very important). ${claudeBranch} is the ONLY branch you should work on.
${commitInstructions}
${
claudeBranch
? `- After completing your work, provide a URL to create a PR in this format:
${GITHUB_SERVER_URL}/${repository.owner}/${repository.repo}/compare/${baseBranch}...${claudeBranch}?quick_pull=1`
: ""
}`;
}

View File

@@ -0,0 +1,78 @@
/**
* System progress tracking types for remote agent mode
*/
/**
* Base event structure
*/
type BaseProgressEvent = {
timestamp: string; // ISO 8601
};
/**
* Workflow initializing event
*/
export type WorkflowInitializingEvent = BaseProgressEvent & {
event_type: "workflow_initializing";
data: {
branch: string;
base_branch: string;
session_id?: string;
};
};
/**
* Claude starting event
*/
export type ClaudeStartingEvent = BaseProgressEvent & {
event_type: "claude_starting";
data: Record<string, never>; // No data needed
};
/**
* Claude complete event
*/
export type ClaudeCompleteEvent = BaseProgressEvent & {
event_type: "claude_complete";
data: {
exit_code: number;
duration_ms: number;
};
};
/**
* Workflow failed event
*/
export type WorkflowFailedEvent = BaseProgressEvent & {
event_type: "workflow_failed";
data: {
error: {
phase: "initialization" | "claude_execution";
message: string;
code: string;
};
};
};
/**
* Discriminated union of all progress events
*/
export type ProgressEvent =
| WorkflowInitializingEvent
| ClaudeStartingEvent
| ClaudeCompleteEvent
| WorkflowFailedEvent;
/**
* Payload sent to the system progress endpoint
*/
export type SystemProgressPayload = ProgressEvent;
/**
* Configuration for system progress reporting
*/
export type SystemProgressConfig = {
endpoint: string;
headers?: Record<string, string>;
timeout_ms?: number; // Default: 5000
};

View File

@@ -0,0 +1,149 @@
import * as core from "@actions/core";
import type {
ProgressEvent,
SystemProgressPayload,
SystemProgressConfig,
WorkflowInitializingEvent,
ClaudeStartingEvent,
ClaudeCompleteEvent,
WorkflowFailedEvent,
} from "./progress-types";
/**
* Send a progress event to the system progress endpoint (fire-and-forget)
*/
function sendProgressEvent(
event: ProgressEvent,
config: SystemProgressConfig,
oidcToken: string,
): void {
const payload: SystemProgressPayload = event;
console.log(
`Sending system progress event: ${event.event_type}`,
JSON.stringify(payload, null, 2),
);
// Fire and forget - don't await
Promise.resolve().then(async () => {
try {
// Create an AbortController for timeout
const controller = new AbortController();
const timeoutId = setTimeout(
() => controller.abort(),
config.timeout_ms || 5000,
);
try {
const response = await fetch(config.endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${oidcToken}`,
...config.headers,
},
body: JSON.stringify(payload),
signal: controller.signal,
});
if (!response.ok) {
console.error(
`System progress endpoint returned ${response.status}: ${response.statusText}`,
);
}
} finally {
clearTimeout(timeoutId);
}
} catch (error) {
// Log but don't throw - we don't want progress reporting to interrupt the workflow
core.warning(`Failed to send system progress event: ${error}`);
}
});
}
/**
* Report workflow initialization complete
*/
export function reportWorkflowInitialized(
config: SystemProgressConfig,
oidcToken: string,
branch: string,
baseBranch: string,
sessionId?: string,
): void {
const event: WorkflowInitializingEvent = {
timestamp: new Date().toISOString(),
event_type: "workflow_initializing",
data: {
branch,
base_branch: baseBranch,
...(sessionId && { session_id: sessionId }),
},
};
sendProgressEvent(event, config, oidcToken);
}
/**
* Report Claude is starting
*/
export function reportClaudeStarting(
config: SystemProgressConfig,
oidcToken: string,
): void {
const event: ClaudeStartingEvent = {
timestamp: new Date().toISOString(),
event_type: "claude_starting",
data: {},
};
sendProgressEvent(event, config, oidcToken);
}
/**
* Report Claude completed
*/
export function reportClaudeComplete(
config: SystemProgressConfig,
oidcToken: string,
exitCode: number,
durationMs: number,
): void {
const event: ClaudeCompleteEvent = {
timestamp: new Date().toISOString(),
event_type: "claude_complete",
data: {
exit_code: exitCode,
duration_ms: durationMs,
},
};
sendProgressEvent(event, config, oidcToken);
}
/**
* Report workflow failed
*/
export function reportWorkflowFailed(
config: SystemProgressConfig,
oidcToken: string,
phase: "initialization" | "claude_execution",
error: Error | string,
code: string,
): void {
const errorMessage = error instanceof Error ? error.message : error;
const event: WorkflowFailedEvent = {
timestamp: new Date().toISOString(),
event_type: "workflow_failed",
data: {
error: {
phase,
message: errorMessage,
code,
},
},
};
sendProgressEvent(event, config, oidcToken);
}

View File

@@ -3,7 +3,7 @@ import type { PreparedContext } from "../create-prompt/types";
import type { FetchDataResult } from "../github/data/fetcher"; import type { FetchDataResult } from "../github/data/fetcher";
import type { Octokits } from "../github/api/client"; import type { Octokits } from "../github/api/client";
export type ModeName = "tag" | "agent" | "experimental-review"; export type ModeName = "tag" | "agent" | "remote-agent" | "experimental-review";
export type ModeContext = { export type ModeContext = {
mode: ModeName; mode: ModeName;

29
src/types/resume.ts Normal file
View File

@@ -0,0 +1,29 @@
/**
* Types for resume endpoint functionality
*/
/**
* Message structure from the resume endpoint
* This matches the structure used in Claude CLI's teleport feature
*/
export type ResumeMessage = {
role: "user" | "assistant" | "system";
content: string | Array<{ type: string; text?: string; [key: string]: any }>;
[key: string]: any;
};
/**
* Response structure from the resume endpoint
*/
export type ResumeResponse = {
log: ResumeMessage[];
branch?: string;
};
/**
* Result after processing resume endpoint
*/
export type ResumeResult = {
messages: ResumeMessage[];
branchName: string;
};

View File

@@ -0,0 +1,19 @@
/**
* Configuration for streaming and progress tracking
*/
export type StreamConfig = {
/** Endpoint for streaming Claude execution progress */
progress_endpoint?: string;
/** Endpoint for system-level progress reporting (workflow lifecycle events) */
system_progress_endpoint?: string;
/** Resume endpoint for teleport functionality */
resume_endpoint?: string;
/** Session ID for tracking */
session_id?: string;
/** Headers to include with streaming requests (includes Authorization) */
headers?: Record<string, string>;
};

View File

@@ -7,6 +7,7 @@ import {
getEventTypeAndContext, getEventTypeAndContext,
buildAllowedToolsString, buildAllowedToolsString,
buildDisallowedToolsString, buildDisallowedToolsString,
buildRemoteAgentAllowedToolsString,
} from "../src/create-prompt"; } from "../src/create-prompt";
import type { PreparedContext } from "../src/create-prompt"; import type { PreparedContext } from "../src/create-prompt";
import type { Mode } from "../src/modes/types"; import type { Mode } from "../src/modes/types";
@@ -1149,3 +1150,117 @@ describe("buildDisallowedToolsString", () => {
expect(result).toBe("BadTool1,BadTool2"); expect(result).toBe("BadTool1,BadTool2");
}); });
}); });
describe("buildRemoteAgentAllowedToolsString", () => {
test("should return correct tools for remote agent mode (always uses commit signing)", () => {
const result = buildRemoteAgentAllowedToolsString();
// Base tools should be present
expect(result).toContain("Edit");
expect(result).toContain("Glob");
expect(result).toContain("Grep");
expect(result).toContain("LS");
expect(result).toContain("Read");
expect(result).toContain("Write");
// Comment tool should always be included
expect(result).toContain("mcp__github_comment__update_claude_comment");
// MCP commit signing tools should always be included
expect(result).toContain("mcp__github_file_ops__commit_files");
expect(result).toContain("mcp__github_file_ops__delete_files");
// Safe git tools should be included
expect(result).toContain("Bash(git status:*)");
expect(result).toContain("Bash(git diff:*)");
expect(result).toContain("Bash(git log:*)");
// Dangerous git tools should NOT be included
expect(result).not.toContain("Bash(git commit:*)");
expect(result).not.toContain("Bash(git add:*)");
expect(result).not.toContain("Bash(git push:*)");
expect(result).not.toContain("Bash(git config");
expect(result).not.toContain("Bash(git rm:*)");
});
test("should include custom tools when provided", () => {
const customTools = ["CustomTool1", "CustomTool2"];
const result = buildRemoteAgentAllowedToolsString(customTools);
// Base tools should be present
expect(result).toContain("Edit");
expect(result).toContain("Glob");
// Custom tools should be included
expect(result).toContain("CustomTool1");
expect(result).toContain("CustomTool2");
// MCP commit signing tools should still be included
expect(result).toContain("mcp__github_file_ops__commit_files");
expect(result).toContain("mcp__github_file_ops__delete_files");
// Dangerous git tools should still NOT be included
expect(result).not.toContain("Bash(git commit:*)");
expect(result).not.toContain("Bash(git config");
});
test("should include GitHub Actions tools when includeActionsTools is true", () => {
const result = buildRemoteAgentAllowedToolsString([], true);
// Base tools should be present
expect(result).toContain("Edit");
expect(result).toContain("Glob");
// GitHub Actions tools should be included
expect(result).toContain("mcp__github_ci__get_ci_status");
expect(result).toContain("mcp__github_ci__get_workflow_run_details");
expect(result).toContain("mcp__github_ci__download_job_log");
// MCP commit signing tools should still be included
expect(result).toContain("mcp__github_file_ops__commit_files");
expect(result).toContain("mcp__github_file_ops__delete_files");
// Dangerous git tools should still NOT be included
expect(result).not.toContain("Bash(git commit:*)");
expect(result).not.toContain("Bash(git config");
});
test("should include both custom and Actions tools when both provided", () => {
const customTools = ["CustomTool1"];
const result = buildRemoteAgentAllowedToolsString(customTools, true);
// Base tools should be present
expect(result).toContain("Edit");
// Custom tools should be included
expect(result).toContain("CustomTool1");
// GitHub Actions tools should be included
expect(result).toContain("mcp__github_ci__get_ci_status");
// MCP commit signing tools should still be included
expect(result).toContain("mcp__github_file_ops__commit_files");
// Dangerous git tools should still NOT be included
expect(result).not.toContain("Bash(git commit:*)");
expect(result).not.toContain("Bash(git config");
});
test("should never include dangerous git tools regardless of parameters", () => {
const dangerousCustomTools = ["Bash(git commit:*)", "Bash(git config:*)"];
const result = buildRemoteAgentAllowedToolsString(
dangerousCustomTools,
true,
);
// The function should still include dangerous tools if explicitly provided in custom tools
// This is by design - if someone explicitly adds them, they should be included
expect(result).toContain("Bash(git commit:*)");
expect(result).toContain("Bash(git config:*)");
// But the base function should not add them automatically
const resultWithoutCustom = buildRemoteAgentAllowedToolsString([], true);
expect(resultWithoutCustom).not.toContain("Bash(git commit:*)");
expect(resultWithoutCustom).not.toContain("Bash(git config");
});
});

View File

@@ -39,13 +39,13 @@ describe("Mode Registry", () => {
test("getMode throws error for tag mode with workflow_dispatch event", () => { test("getMode throws error for tag mode with workflow_dispatch event", () => {
expect(() => getMode("tag", mockWorkflowDispatchContext)).toThrow( expect(() => getMode("tag", mockWorkflowDispatchContext)).toThrow(
"Tag mode cannot handle workflow_dispatch events. Use 'agent' mode for automation events.", "Tag mode cannot handle workflow_dispatch events. Use 'agent' mode for automation events or 'remote-agent' mode for repository_dispatch events.",
); );
}); });
test("getMode throws error for tag mode with schedule event", () => { test("getMode throws error for tag mode with schedule event", () => {
expect(() => getMode("tag", mockScheduleContext)).toThrow( expect(() => getMode("tag", mockScheduleContext)).toThrow(
"Tag mode cannot handle schedule events. Use 'agent' mode for automation events.", "Tag mode cannot handle schedule events. Use 'agent' mode for automation events or 'remote-agent' mode for repository_dispatch events.",
); );
}); });
@@ -64,7 +64,7 @@ describe("Mode Registry", () => {
test("getMode throws error for invalid mode", () => { test("getMode throws error for invalid mode", () => {
const invalidMode = "invalid" as unknown as ModeName; const invalidMode = "invalid" as unknown as ModeName;
expect(() => getMode(invalidMode, mockContext)).toThrow( expect(() => getMode(invalidMode, mockContext)).toThrow(
"Invalid mode 'invalid'. Valid modes are: 'tag', 'agent', 'experimental-review'. Please check your workflow configuration.", "Invalid mode 'invalid'. Valid modes are: 'tag', 'agent', 'remote-agent', 'experimental-review'. Please check your workflow configuration.",
); );
}); });
@@ -72,6 +72,7 @@ describe("Mode Registry", () => {
expect(isValidMode("tag")).toBe(true); expect(isValidMode("tag")).toBe(true);
expect(isValidMode("agent")).toBe(true); expect(isValidMode("agent")).toBe(true);
expect(isValidMode("experimental-review")).toBe(true); expect(isValidMode("experimental-review")).toBe(true);
expect(isValidMode("remote-agent")).toBe(true);
}); });
test("isValidMode returns false for invalid mode", () => { test("isValidMode returns false for invalid mode", () => {

View File

@@ -0,0 +1,28 @@
import { describe, test, expect } from "bun:test";
import type { StreamConfig } from "../src/types/stream-config";
describe("report-claude-complete", () => {
test("StreamConfig type should include system_progress_endpoint", () => {
const config: StreamConfig = {
progress_endpoint: "https://example.com/progress",
system_progress_endpoint: "https://example.com/system-progress",
resume_endpoint: "https://example.com/resume",
session_id: "test-session",
headers: {
Authorization: "Bearer test-token",
},
};
expect(config.system_progress_endpoint).toBe(
"https://example.com/system-progress",
);
});
test("StreamConfig type should allow optional fields", () => {
const config: StreamConfig = {};
expect(config.system_progress_endpoint).toBeUndefined();
expect(config.progress_endpoint).toBeUndefined();
expect(config.headers).toBeUndefined();
});
});