Compare commits

..

2 Commits

Author SHA1 Message Date
Claude
a3ac1fb750 chore: clean up markdown formatting
- Remove unnecessary blank lines in documentation files
- Improve readability and consistency across markdown files
- No content changes, only formatting improvements
2025-11-14 17:22:35 +00:00
Claude
f2ceddc1a3 docs: add Examples section to README with links to workflow examples
- Added new "Examples" section highlighting the examples/ folder
- Included descriptions for all example workflow files
- Provides clear guidance on how users can utilize the examples
- Positioned strategically after Solutions & Use Cases section
2025-11-14 17:21:35 +00:00
16 changed files with 18 additions and 951 deletions

View File

@@ -17,7 +17,6 @@ TASK OVERVIEW:
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
2. Next, use gh commands to get context about the issue:
- Use `gh issue view ${{ github.event.issue.number }}` to retrieve the current issue's details
- Use `gh search issues` to find similar issues that might provide context for proper categorization
- You have access to these Bash commands:
@@ -27,7 +26,6 @@ TASK OVERVIEW:
- Bash(gh search:\*) - to search for similar issues
3. Analyze the issue content, considering:
- The issue title and description
- The type of issue (bug report, feature request, question, etc.)
- Technical areas mentioned
@@ -36,7 +34,6 @@ TASK OVERVIEW:
- Components affected
4. Select appropriate labels from the available labels list provided above:
- Choose labels that accurately reflect the issue's nature
- Be specific but comprehensive
- IMPORTANT: Add a priority label (P1, P2, or P3) based on the label descriptions from gh label list

View File

@@ -1,335 +0,0 @@
name: Test Structured Outputs (Optimized)
# This workflow uses EXPLICIT prompts that tell Claude exactly what to return.
# This makes tests fast, deterministic, and focuses on testing OUR code, not Claude's reasoning.
#
# NOTE: Disabled until Agent SDK structured outputs feature is released
# The --json-schema flag is not yet available in public Claude Code releases
on:
# Disabled - uncomment when feature is released
# push:
# branches: [main]
# pull_request:
workflow_dispatch:
permissions:
contents: read
jobs:
test-basic-types:
name: Test Basic Type Conversions
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test with explicit values
id: test
uses: ./base-action
with:
# EXPLICIT: Tell Claude exactly what to return - no reasoning needed
prompt: |
Run this command: echo "test"
Then return EXACTLY these values:
- text_field: "hello"
- number_field: 42
- boolean_true: true
- boolean_false: false
json_schema: |
{
"type": "object",
"properties": {
"text_field": {"type": "string"},
"number_field": {"type": "number"},
"boolean_true": {"type": "boolean"},
"boolean_false": {"type": "boolean"}
},
"required": ["text_field", "number_field", "boolean_true", "boolean_false"]
}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "Bash"
- name: Verify outputs
run: |
# Test string pass-through
if [ "${{ steps.test.outputs.text_field }}" != "hello" ]; then
echo "❌ String: expected 'hello', got '${{ steps.test.outputs.text_field }}'"
exit 1
fi
# Test number → string conversion
if [ "${{ steps.test.outputs.number_field }}" != "42" ]; then
echo "❌ Number: expected '42', got '${{ steps.test.outputs.number_field }}'"
exit 1
fi
# Test boolean → "true" conversion
if [ "${{ steps.test.outputs.boolean_true }}" != "true" ]; then
echo "❌ Boolean true: expected 'true', got '${{ steps.test.outputs.boolean_true }}'"
exit 1
fi
# Test boolean → "false" conversion
if [ "${{ steps.test.outputs.boolean_false }}" != "false" ]; then
echo "❌ Boolean false: expected 'false', got '${{ steps.test.outputs.boolean_false }}'"
exit 1
fi
echo "✅ All basic type conversions correct"
test-complex-types:
name: Test Arrays and Objects
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test complex types
id: test
uses: ./base-action
with:
# EXPLICIT: No file reading, no analysis
prompt: |
Run: echo "ready"
Return EXACTLY:
- items: ["apple", "banana", "cherry"]
- config: {"key": "value", "count": 3}
- empty_array: []
json_schema: |
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {"type": "string"}
},
"config": {"type": "object"},
"empty_array": {"type": "array"}
},
"required": ["items", "config", "empty_array"]
}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "Bash"
- name: Verify JSON stringification
run: |
# Arrays should be JSON stringified
ITEMS='${{ steps.test.outputs.items }}'
if ! echo "$ITEMS" | jq -e '. | length == 3' > /dev/null; then
echo "❌ Array not properly stringified: $ITEMS"
exit 1
fi
# Objects should be JSON stringified
CONFIG='${{ steps.test.outputs.config }}'
if ! echo "$CONFIG" | jq -e '.key == "value"' > /dev/null; then
echo "❌ Object not properly stringified: $CONFIG"
exit 1
fi
# Empty arrays should work
EMPTY='${{ steps.test.outputs.empty_array }}'
if ! echo "$EMPTY" | jq -e '. | length == 0' > /dev/null; then
echo "❌ Empty array not properly stringified: $EMPTY"
exit 1
fi
echo "✅ All complex types JSON stringified correctly"
test-edge-cases:
name: Test Edge Cases
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test edge cases
id: test
uses: ./base-action
with:
prompt: |
Run: echo "test"
Return EXACTLY:
- zero: 0
- empty_string: ""
- negative: -5
- decimal: 3.14
json_schema: |
{
"type": "object",
"properties": {
"zero": {"type": "number"},
"empty_string": {"type": "string"},
"negative": {"type": "number"},
"decimal": {"type": "number"}
},
"required": ["zero", "empty_string", "negative", "decimal"]
}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "Bash"
- name: Verify edge cases
run: |
# Zero should be "0", not empty or falsy
if [ "${{ steps.test.outputs.zero }}" != "0" ]; then
echo "❌ Zero: expected '0', got '${{ steps.test.outputs.zero }}'"
exit 1
fi
# Empty string should be empty (not "null" or missing)
if [ "${{ steps.test.outputs.empty_string }}" != "" ]; then
echo "❌ Empty string: expected '', got '${{ steps.test.outputs.empty_string }}'"
exit 1
fi
# Negative numbers should work
if [ "${{ steps.test.outputs.negative }}" != "-5" ]; then
echo "❌ Negative: expected '-5', got '${{ steps.test.outputs.negative }}'"
exit 1
fi
# Decimals should preserve precision
if [ "${{ steps.test.outputs.decimal }}" != "3.14" ]; then
echo "❌ Decimal: expected '3.14', got '${{ steps.test.outputs.decimal }}'"
exit 1
fi
echo "✅ All edge cases handled correctly"
test-name-sanitization:
name: Test Output Name Sanitization
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Test special characters in field names
id: test
uses: ./base-action
with:
prompt: |
Run: echo "test"
Return EXACTLY: {test-result: "passed", item_count: 10}
json_schema: |
{
"type": "object",
"properties": {
"test-result": {"type": "string"},
"item_count": {"type": "number"}
},
"required": ["test-result", "item_count"]
}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "Bash"
- name: Verify sanitized names work
run: |
# Hyphens should be preserved (GitHub Actions allows them)
if [ "${{ steps.test.outputs.test-result }}" != "passed" ]; then
echo "❌ Hyphenated name failed"
exit 1
fi
# Underscores should work
if [ "${{ steps.test.outputs.item_count }}" != "10" ]; then
echo "❌ Underscore name failed"
exit 1
fi
echo "✅ Name sanitization works"
test-execution-file-structure:
name: Test Execution File Format
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Run with structured output
id: test
uses: ./base-action
with:
prompt: "Run: echo 'complete'. Return: {done: true}"
json_schema: |
{
"type": "object",
"properties": {
"done": {"type": "boolean"}
},
"required": ["done"]
}
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_tools: "Bash"
- name: Verify execution file contains structured_output
run: |
FILE="${{ steps.test.outputs.execution_file }}"
# Check file exists
if [ ! -f "$FILE" ]; then
echo "❌ Execution file missing"
exit 1
fi
# Check for structured_output field
if ! jq -e '.[] | select(.type == "result") | .structured_output' "$FILE" > /dev/null; then
echo "❌ No structured_output in execution file"
cat "$FILE"
exit 1
fi
# Verify the actual value
DONE=$(jq -r '.[] | select(.type == "result") | .structured_output.done' "$FILE")
if [ "$DONE" != "true" ]; then
echo "❌ Wrong value in execution file"
exit 1
fi
echo "✅ Execution file format correct"
test-summary:
name: Summary
runs-on: ubuntu-latest
needs:
- test-basic-types
- test-complex-types
- test-edge-cases
- test-name-sanitization
- test-execution-file-structure
if: always()
steps:
- name: Generate Summary
run: |
echo "# Structured Output Tests (Optimized)" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "Fast, deterministic tests using explicit prompts" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Test | Result |" >> $GITHUB_STEP_SUMMARY
echo "|------|--------|" >> $GITHUB_STEP_SUMMARY
echo "| Basic Types | ${{ needs.test-basic-types.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Complex Types | ${{ needs.test-complex-types.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Edge Cases | ${{ needs.test-edge-cases.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Name Sanitization | ${{ needs.test-name-sanitization.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Execution File | ${{ needs.test-execution-file-structure.result == 'success' && '✅ PASS' || '❌ FAIL' }} |" >> $GITHUB_STEP_SUMMARY
# Check if all passed
ALL_PASSED=${{
needs.test-basic-types.result == 'success' &&
needs.test-complex-types.result == 'success' &&
needs.test-edge-cases.result == 'success' &&
needs.test-name-sanitization.result == 'success' &&
needs.test-execution-file-structure.result == 'success'
}}
if [ "$ALL_PASSED" = "true" ]; then
echo "" >> $GITHUB_STEP_SUMMARY
echo "## ✅ All Tests Passed" >> $GITHUB_STEP_SUMMARY
else
echo "" >> $GITHUB_STEP_SUMMARY
echo "## ❌ Some Tests Failed" >> $GITHUB_STEP_SUMMARY
exit 1
fi

View File

@@ -47,6 +47,21 @@ Looking for specific automation patterns? Check our **[Solutions Guide](./docs/s
Each solution includes complete working examples, configuration details, and expected outcomes.
## 📁 Examples
The [`examples/`](./examples/) folder contains ready-to-use GitHub Actions workflow files that demonstrate various automation patterns:
- **[`pr-review-comprehensive.yml`](./examples/pr-review-comprehensive.yml)** - Complete PR review automation with progress tracking
- **[`pr-review-filtered-paths.yml`](./examples/pr-review-filtered-paths.yml)** - Trigger reviews only for specific file paths
- **[`pr-review-filtered-authors.yml`](./examples/pr-review-filtered-authors.yml)** - Special handling for external contributors
- **[`issue-triage.yml`](./examples/issue-triage.yml)** - Automatic issue categorization and labeling
- **[`issue-deduplication.yml`](./examples/issue-deduplication.yml)** - Detect and handle duplicate issues
- **[`ci-failure-auto-fix.yml`](./examples/ci-failure-auto-fix.yml)** - Automatic CI failure analysis and fixes
- **[`manual-code-analysis.yml`](./examples/manual-code-analysis.yml)** - On-demand code analysis workflows
- **[`claude.yml`](./examples/claude.yml)** - Basic setup for @claude mentions
Copy these examples directly into your `.github/workflows/` directory and customize them for your specific needs. Each example includes detailed comments explaining the configuration and expected behavior.
## Documentation
- **[Solutions Guide](./docs/solutions.md)** - **🎯 Ready-to-use automation patterns**

View File

@@ -113,10 +113,6 @@ inputs:
description: "Newline-separated list of Claude Code plugin marketplace Git URLs to install from (e.g., 'https://github.com/user/marketplace1.git\nhttps://github.com/user/marketplace2.git')"
required: false
default: ""
json_schema:
description: "JSON schema for structured output validation. When provided, Claude will return validated JSON matching this schema, and the action will automatically set GitHub Action outputs for each field."
required: false
default: ""
outputs:
execution_file:
@@ -178,7 +174,6 @@ runs:
TRACK_PROGRESS: ${{ inputs.track_progress }}
ADDITIONAL_PERMISSIONS: ${{ inputs.additional_permissions }}
CLAUDE_ARGS: ${{ inputs.claude_args }}
JSON_SCHEMA: ${{ inputs.json_schema }}
ALL_INPUTS: ${{ toJson(inputs) }}
- name: Install Base Action Dependencies
@@ -194,7 +189,7 @@ runs:
# Install Claude Code if no custom executable is provided
if [ -z "${{ inputs.path_to_claude_code_executable }}" ]; then
echo "Installing Claude Code..."
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.42
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.37
echo "$HOME/.local/bin" >> "$GITHUB_PATH"
else
echo "Using custom Claude Code executable: ${{ inputs.path_to_claude_code_executable }}"
@@ -233,7 +228,6 @@ runs:
INPUT_SHOW_FULL_OUTPUT: ${{ inputs.show_full_output }}
INPUT_PLUGINS: ${{ inputs.plugins }}
INPUT_PLUGIN_MARKETPLACES: ${{ inputs.plugin_marketplaces }}
JSON_SCHEMA: ${{ inputs.json_schema }}
# Model configuration
GITHUB_TOKEN: ${{ steps.prepare.outputs.GITHUB_TOKEN }}

View File

@@ -57,7 +57,6 @@ Thank you for your interest in contributing to Claude Code Base Action! This doc
```
This script:
- Installs `act` if not present (requires Homebrew on macOS)
- Runs the GitHub Action workflow locally using Docker
- Requires your `ANTHROPIC_API_KEY` to be set

View File

@@ -490,7 +490,6 @@ This example shows how to use OIDC authentication with GCP Vertex AI:
To securely use your Anthropic API key:
1. Add your API key as a repository secret:
- Go to your repository's Settings
- Navigate to "Secrets and variables" → "Actions"
- Click "New repository secret"

View File

@@ -67,10 +67,6 @@ inputs:
description: "Newline-separated list of Claude Code plugin marketplace Git URLs to install from (e.g., 'https://github.com/user/marketplace1.git\nhttps://github.com/user/marketplace2.git')"
required: false
default: ""
json_schema:
description: "JSON schema for structured output validation. When provided, Claude will return validated JSON matching this schema, and the action will automatically set GitHub Action outputs for each field (e.g., access via steps.id.outputs.field_name)"
required: false
default: ""
outputs:
conclusion:
@@ -115,7 +111,7 @@ runs:
run: |
if [ -z "${{ inputs.path_to_claude_code_executable }}" ]; then
echo "Installing Claude Code..."
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.42
curl -fsSL https://claude.ai/install.sh | bash -s 2.0.37
else
echo "Using custom Claude Code executable: ${{ inputs.path_to_claude_code_executable }}"
# Add the directory containing the custom executable to PATH

View File

@@ -1,7 +1,7 @@
import * as core from "@actions/core";
import { exec } from "child_process";
import { promisify } from "util";
import { unlink, writeFile, stat, readFile } from "fs/promises";
import { unlink, writeFile, stat } from "fs/promises";
import { createWriteStream } from "fs";
import { spawn } from "child_process";
import { parse as parseShellArgs } from "shell-quote";
@@ -12,11 +12,6 @@ const PIPE_PATH = `${process.env.RUNNER_TEMP}/claude_prompt_pipe`;
const EXECUTION_FILE = `${process.env.RUNNER_TEMP}/claude-execution-output.json`;
const BASE_ARGS = ["--verbose", "--output-format", "stream-json"];
type ExecutionMessage = {
type: string;
structured_output?: Record<string, unknown>;
};
/**
* Sanitizes JSON output to remove sensitive information when full output is disabled
* Returns a safe summary message or null if the message should be completely suppressed
@@ -127,88 +122,6 @@ export function prepareRunConfig(
};
}
/**
* Sanitizes output field names to meet GitHub Actions output naming requirements
* GitHub outputs must be alphanumeric, hyphen, or underscore only
*/
function sanitizeOutputName(name: string): string {
return name.replace(/[^a-zA-Z0-9_-]/g, "_");
}
/**
* Converts values to string format for GitHub Actions outputs
* GitHub outputs must always be strings
*/
function convertToString(value: unknown): string {
switch (typeof value) {
case "string":
return value;
case "boolean":
case "number":
return String(value);
case "object":
return value === null ? "" : JSON.stringify(value);
case "undefined":
return "";
default:
// Handle Symbol, Function, etc.
return String(value);
}
}
/**
* Parses structured_output from execution file and sets GitHub Action outputs
* Only runs if json_schema was explicitly provided by the user
*/
async function parseAndSetStructuredOutputs(
executionFile: string,
): Promise<void> {
try {
const content = await readFile(executionFile, "utf-8");
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
if (!result?.structured_output) {
const error = new Error(
"json_schema was provided but Claude did not return structured_output. " +
"The schema may be invalid or Claude failed to call the StructuredOutput tool.",
);
core.setFailed(error.message);
throw error;
}
// Set GitHub Action output for each field
const entries = Object.entries(result.structured_output);
core.info(`Setting ${entries.length} structured output(s)`);
for (const [key, value] of entries) {
const sanitizedKey = sanitizeOutputName(key);
if (!sanitizedKey) {
core.warning(`Skipping invalid output key: "${key}"`);
continue;
}
const stringValue = convertToString(value);
// Truncate long values in logs for readability
const displayValue =
stringValue.length > 100
? `${stringValue.slice(0, 97)}...`
: stringValue;
core.setOutput(sanitizedKey, stringValue);
core.info(`${sanitizedKey}=${displayValue}`);
}
} catch (error) {
const errorMsg = `Failed to parse structured outputs: ${error}`;
core.setFailed(errorMsg);
throw new Error(errorMsg);
}
}
export async function runClaude(promptPath: string, options: ClaudeOptions) {
const config = prepareRunConfig(promptPath, options);
@@ -397,11 +310,6 @@ export async function runClaude(promptPath: string, options: ClaudeOptions) {
core.setOutput("conclusion", "success");
core.setOutput("execution_file", EXECUTION_FILE);
// Parse and set structured outputs only if user provided json_schema
if (process.env.JSON_SCHEMA) {
await parseAndSetStructuredOutputs(EXECUTION_FILE);
}
} else {
core.setOutput("conclusion", "failure");

View File

@@ -78,19 +78,5 @@ describe("prepareRunConfig", () => {
"stream-json",
]);
});
test("should include json-schema flag when provided", () => {
const options: ClaudeOptions = {
claudeArgs:
'--json-schema \'{"type":"object","properties":{"result":{"type":"boolean"}}}\'',
};
const prepared = prepareRunConfig("/tmp/test-prompt.txt", options);
expect(prepared.claudeArgs).toContain("--json-schema");
expect(prepared.claudeArgs).toContain(
'{"type":"object","properties":{"result":{"type":"boolean"}}}',
);
});
});
});

View File

@@ -1,341 +0,0 @@
#!/usr/bin/env bun
import { describe, test, expect, afterEach } from "bun:test";
import { writeFile, unlink } from "fs/promises";
import { tmpdir } from "os";
import { join } from "path";
// Import the type for testing
type ExecutionMessage = {
type: string;
structured_output?: Record<string, unknown>;
};
// Mock execution file path
const TEST_EXECUTION_FILE = join(tmpdir(), "test-execution-output.json");
// Helper to create mock execution file with structured output
async function createMockExecutionFile(
structuredOutput?: Record<string, unknown>,
includeResult: boolean = true,
): Promise<void> {
const messages: ExecutionMessage[] = [
{ type: "system", subtype: "init" } as any,
{ type: "turn", content: "test" } as any,
];
if (includeResult) {
messages.push({
type: "result",
cost_usd: 0.01,
duration_ms: 1000,
structured_output: structuredOutput,
} as any);
}
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
}
describe("Structured Output - Pure Functions", () => {
afterEach(async () => {
try {
await unlink(TEST_EXECUTION_FILE);
} catch {
// Ignore if file doesn't exist
}
});
describe("sanitizeOutputName", () => {
test("should keep valid characters", () => {
const sanitize = (name: string) => name.replace(/[^a-zA-Z0-9_-]/g, "_");
expect(sanitize("valid_name-123")).toBe("valid_name-123");
});
test("should replace invalid characters with underscores", () => {
const sanitize = (name: string) => name.replace(/[^a-zA-Z0-9_-]/g, "_");
expect(sanitize("invalid@name!")).toBe("invalid_name_");
expect(sanitize("has spaces")).toBe("has_spaces");
expect(sanitize("has.dots")).toBe("has_dots");
});
test("should handle special characters", () => {
const sanitize = (name: string) => name.replace(/[^a-zA-Z0-9_-]/g, "_");
expect(sanitize("$field%name&")).toBe("_field_name_");
expect(sanitize("field[0]")).toBe("field_0_");
});
});
describe("convertToString", () => {
const convertToString = (value: unknown): string => {
switch (typeof value) {
case "string":
return value;
case "boolean":
case "number":
return String(value);
case "object":
return value === null ? "" : JSON.stringify(value);
default:
return JSON.stringify(value);
}
};
test("should keep strings as-is", () => {
expect(convertToString("hello")).toBe("hello");
expect(convertToString("")).toBe("");
});
test("should convert booleans to strings", () => {
expect(convertToString(true)).toBe("true");
expect(convertToString(false)).toBe("false");
});
test("should convert numbers to strings", () => {
expect(convertToString(42)).toBe("42");
expect(convertToString(3.14)).toBe("3.14");
expect(convertToString(0)).toBe("0");
});
test("should convert null to empty string", () => {
expect(convertToString(null)).toBe("");
});
test("should JSON stringify objects", () => {
expect(convertToString({ foo: "bar" })).toBe('{"foo":"bar"}');
});
test("should JSON stringify arrays", () => {
expect(convertToString([1, 2, 3])).toBe("[1,2,3]");
expect(convertToString(["a", "b"])).toBe('["a","b"]');
});
test("should handle nested structures", () => {
const nested = { items: [{ id: 1, name: "test" }] };
expect(convertToString(nested)).toBe(
'{"items":[{"id":1,"name":"test"}]}',
);
});
});
describe("parseAndSetStructuredOutputs integration", () => {
test("should parse and set simple structured outputs", async () => {
await createMockExecutionFile({
is_antonly: true,
confidence: 0.95,
risk: "low",
});
// In a real test, we'd import and call parseAndSetStructuredOutputs
// For now, we simulate the behavior
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result?.structured_output).toEqual({
is_antonly: true,
confidence: 0.95,
risk: "low",
});
});
test("should handle array outputs", async () => {
await createMockExecutionFile({
affected_areas: ["auth", "database", "api"],
severity: "high",
});
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result?.structured_output?.affected_areas).toEqual([
"auth",
"database",
"api",
]);
});
test("should handle nested objects", async () => {
await createMockExecutionFile({
analysis: {
category: "test",
details: { count: 5, passed: true },
},
});
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result?.structured_output?.analysis).toEqual({
category: "test",
details: { count: 5, passed: true },
});
});
test("should handle missing structured_output", async () => {
await createMockExecutionFile(undefined, true);
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result).toBeUndefined();
});
test("should handle empty structured_output", async () => {
await createMockExecutionFile({});
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result?.structured_output).toEqual({});
});
test("should handle all supported types", async () => {
await createMockExecutionFile({
string_field: "hello",
number_field: 42,
boolean_field: true,
null_field: null,
array_field: [1, 2, 3],
object_field: { nested: "value" },
});
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result?.structured_output).toMatchObject({
string_field: "hello",
number_field: 42,
boolean_field: true,
null_field: null,
array_field: [1, 2, 3],
object_field: { nested: "value" },
});
});
});
describe("output naming with prefix", () => {
test("should apply prefix correctly", () => {
const prefix = "CLAUDE_";
const key = "is_antonly";
const sanitizedKey = key.replace(/[^a-zA-Z0-9_-]/g, "_");
const outputName = prefix + sanitizedKey;
expect(outputName).toBe("CLAUDE_is_antonly");
});
test("should handle empty prefix", () => {
const prefix = "";
const key = "result";
const sanitizedKey = key.replace(/[^a-zA-Z0-9_-]/g, "_");
const outputName = prefix + sanitizedKey;
expect(outputName).toBe("result");
});
test("should sanitize and prefix invalid keys", () => {
const prefix = "OUT_";
const key = "invalid@key!";
const sanitizedKey = key.replace(/[^a-zA-Z0-9_-]/g, "_");
const outputName = prefix + sanitizedKey;
expect(outputName).toBe("OUT_invalid_key_");
});
});
describe("error scenarios", () => {
test("should handle malformed JSON", async () => {
await writeFile(TEST_EXECUTION_FILE, "invalid json {");
let error: Error | undefined;
try {
const content = await Bun.file(TEST_EXECUTION_FILE).text();
JSON.parse(content);
} catch (e) {
error = e as Error;
}
expect(error).toBeDefined();
expect(error?.message).toContain("JSON");
});
test("should handle empty execution file", async () => {
await writeFile(TEST_EXECUTION_FILE, "[]");
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const messages = JSON.parse(content) as ExecutionMessage[];
const result = messages.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result).toBeUndefined();
});
test("should handle missing result message", async () => {
const messages = [
{ type: "system", subtype: "init" },
{ type: "turn", content: "test" },
];
await writeFile(TEST_EXECUTION_FILE, JSON.stringify(messages));
const content = await Bun.file(TEST_EXECUTION_FILE).text();
const parsed = JSON.parse(content) as ExecutionMessage[];
const result = parsed.find(
(m) => m.type === "result" && m.structured_output,
);
expect(result).toBeUndefined();
});
});
describe("value truncation in logs", () => {
test("should truncate long string values for display", () => {
const longValue = "a".repeat(150);
const displayValue =
longValue.length > 100 ? `${longValue.slice(0, 97)}...` : longValue;
expect(displayValue).toBe("a".repeat(97) + "...");
expect(displayValue.length).toBe(100);
});
test("should not truncate short values", () => {
const shortValue = "short";
const displayValue =
shortValue.length > 100 ? `${shortValue.slice(0, 97)}...` : shortValue;
expect(displayValue).toBe("short");
});
test("should truncate exactly 100 character values", () => {
const value = "a".repeat(100);
const displayValue =
value.length > 100 ? `${value.slice(0, 97)}...` : value;
expect(displayValue).toBe(value);
});
test("should truncate 101 character values", () => {
const value = "a".repeat(101);
const displayValue =
value.length > 100 ? `${value.slice(0, 97)}...` : value;
expect(displayValue).toBe("a".repeat(97) + "...");
});
});
});

View File

@@ -116,7 +116,6 @@ The `additional_permissions` input allows Claude to access GitHub Actions workfl
To allow Claude to view workflow run results, job logs, and CI status:
1. **Grant the necessary permission to your GitHub token**:
- When using the default `GITHUB_TOKEN`, add the `actions: read` permission to your workflow:
```yaml

View File

@@ -228,12 +228,10 @@ jobs:
The action now automatically detects the appropriate mode:
1. **If `prompt` is provided** → Runs in **automation mode**
- Executes immediately without waiting for @claude mentions
- Perfect for scheduled tasks, PR automation, etc.
2. **If no `prompt` but @claude is mentioned** → Runs in **interactive mode**
- Waits for and responds to @claude mentions
- Creates tracking comments with progress

View File

@@ -31,27 +31,23 @@ The fastest way to create a custom GitHub App is using our pre-configured manife
**🚀 [Download the Quick Setup Tool](./create-app.html)** (Right-click → "Save Link As" or "Download Linked File")
After downloading, open `create-app.html` in your web browser:
- **For Personal Accounts:** Click the "Create App for Personal Account" button
- **For Organizations:** Enter your organization name and click "Create App for Organization"
The tool will automatically configure all required permissions and submit the manifest.
Alternatively, you can use the manifest file directly:
- Use the [`github-app-manifest.json`](../github-app-manifest.json) file from this repository
- Visit https://github.com/settings/apps/new (for personal) or your organization's app settings
- Look for the "Create from manifest" option and paste the JSON content
2. **Complete the creation flow:**
- GitHub will show you a preview of the app configuration
- Confirm the app name (you can customize it)
- Click "Create GitHub App"
- The app will be created with all required permissions automatically configured
3. **Generate and download a private key:**
- After creating the app, you'll be redirected to the app settings
- Scroll down to "Private keys"
- Click "Generate a private key"
@@ -64,7 +60,6 @@ The fastest way to create a custom GitHub App is using our pre-configured manife
If you prefer to configure the app manually or need custom permissions:
1. **Create a new GitHub App:**
- Go to https://github.com/settings/apps (for personal apps) or your organization's settings
- Click "New GitHub App"
- Configure the app with these minimum permissions:
@@ -77,19 +72,16 @@ If you prefer to configure the app manually or need custom permissions:
- Create the app
2. **Generate and download a private key:**
- After creating the app, scroll down to "Private keys"
- Click "Generate a private key"
- Download the `.pem` file (keep this secure!)
3. **Install the app on your repository:**
- Go to the app's settings page
- Click "Install App"
- Select the repositories where you want to use Claude
4. **Add the app credentials to your repository secrets:**
- Go to your repository's Settings → Secrets and variables → Actions
- Add these secrets:
- `APP_ID`: Your GitHub App's ID (found in the app settings)
@@ -138,7 +130,6 @@ For more information on creating GitHub Apps, see the [GitHub documentation](htt
To securely use your Anthropic API key:
1. Add your API key as a repository secret:
- Go to your repository's Settings
- Navigate to "Secrets and variables" → "Actions"
- Click "New repository secret"

View File

@@ -1,113 +0,0 @@
name: Auto-Retry Flaky Tests
# This example demonstrates using structured outputs to detect flaky test failures
# and automatically retry them, reducing noise from intermittent failures.
#
# Use case: When CI fails, automatically determine if it's likely flaky and retry if so.
on:
workflow_run:
workflows: ["CI"]
types: [completed]
permissions:
contents: read
actions: write
jobs:
detect-flaky:
runs-on: ubuntu-latest
if: ${{ github.event.workflow_run.conclusion == 'failure' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Detect flaky test failures
id: detect
uses: anthropics/claude-code-action@main
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
prompt: |
The CI workflow failed: ${{ github.event.workflow_run.html_url }}
Check the logs: gh run view ${{ github.event.workflow_run.id }} --log-failed
Determine if this looks like a flaky test failure by checking for:
- Timeout errors
- Race conditions
- Network errors
- "Expected X but got Y" intermittent failures
- Tests that passed in previous commits
Return:
- is_flaky: true if likely flaky, false if real bug
- confidence: number 0-1 indicating confidence level
- summary: brief one-sentence explanation
json_schema: |
{
"type": "object",
"properties": {
"is_flaky": {
"type": "boolean",
"description": "Whether this appears to be a flaky test failure"
},
"confidence": {
"type": "number",
"minimum": 0,
"maximum": 1,
"description": "Confidence level in the determination"
},
"summary": {
"type": "string",
"description": "One-sentence explanation of the failure"
}
},
"required": ["is_flaky", "confidence", "summary"]
}
# Auto-retry only if flaky AND high confidence (>= 0.7)
- name: Retry flaky tests
if: |
steps.detect.outputs.is_flaky == 'true' &&
steps.detect.outputs.confidence >= '0.7'
env:
GH_TOKEN: ${{ github.token }}
run: |
echo "🔄 Flaky test detected (confidence: ${{ steps.detect.outputs.confidence }})"
echo "Summary: ${{ steps.detect.outputs.summary }}"
echo ""
echo "Triggering automatic retry..."
gh workflow run "${{ github.event.workflow_run.name }}" \
--ref "${{ github.event.workflow_run.head_branch }}"
# Low confidence flaky detection - skip retry
- name: Low confidence detection
if: |
steps.detect.outputs.is_flaky == 'true' &&
steps.detect.outputs.confidence < '0.7'
run: |
echo "⚠️ Possible flaky test but confidence too low (${{ steps.detect.outputs.confidence }})"
echo "Not retrying automatically - manual review recommended"
# Comment on PR if this was a PR build
- name: Comment on PR
if: github.event.workflow_run.event == 'pull_request'
env:
GH_TOKEN: ${{ github.token }}
run: |
pr_number=$(gh pr list --head "${{ github.event.workflow_run.head_branch }}" --json number --jq '.[0].number')
if [ -n "$pr_number" ]; then
gh pr comment "$pr_number" --body "$(cat <<EOF
## ${{ steps.detect.outputs.is_flaky == 'true' && '🔄 Flaky Test Detected' || '❌ Test Failure' }}
**Analysis**: ${{ steps.detect.outputs.summary }}
**Confidence**: ${{ steps.detect.outputs.confidence }}
${{ steps.detect.outputs.is_flaky == 'true' && '✅ Automatically retrying the workflow' || '⚠️ This appears to be a real bug - manual intervention needed' }}
[View workflow run](${{ github.event.workflow_run.html_url }})
EOF
)"
fi

View File

@@ -149,19 +149,6 @@ export const agentMode: Mode = {
claudeArgs = `--mcp-config '${escapedOurConfig}'`;
}
// Add JSON schema if provided
const jsonSchema = process.env.JSON_SCHEMA || "";
if (jsonSchema) {
// Validate it's valid JSON
try {
JSON.parse(jsonSchema);
} catch (e) {
throw new Error(`Invalid JSON schema provided: ${e}`);
}
const escapedSchema = jsonSchema.replace(/'/g, "'\\''");
claudeArgs += ` --json-schema '${escapedSchema}'`;
}
// Append user's claude_args (which may have more --mcp-config flags)
claudeArgs = `${claudeArgs} ${userClaudeArgs}`.trim();

View File

@@ -177,19 +177,6 @@ export const tagMode: Mode = {
// Add required tools for tag mode
claudeArgs += ` --allowedTools "${tagModeTools.join(",")}"`;
// Add JSON schema if provided
const jsonSchema = process.env.JSON_SCHEMA || "";
if (jsonSchema) {
// Validate it's valid JSON
try {
JSON.parse(jsonSchema);
} catch (e) {
throw new Error(`Invalid JSON schema provided: ${e}`);
}
const escapedSchema = jsonSchema.replace(/'/g, "'\\''");
claudeArgs += ` --json-schema '${escapedSchema}'`;
}
// Append user's claude_args (which may have more --mcp-config flags)
if (userClaudeArgs) {
claudeArgs += ` ${userClaudeArgs}`;