-
Notifications
You must be signed in to change notification settings - Fork 3.2k
Expand file tree
/
Copy pathshannon
More file actions
executable file
·375 lines (328 loc) · 13.4 KB
/
shannon
File metadata and controls
executable file
·375 lines (328 loc) · 13.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
#!/bin/bash
# Shannon CLI - AI Penetration Testing Framework
set -e
# Prevent MSYS from converting Unix paths (e.g. /repos/my-repo) to Windows paths
case "$OSTYPE" in
msys*) export MSYS_NO_PATHCONV=1 ;;
esac
# Detect Podman vs Docker and set compose files accordingly
# Podman doesn't support host-gateway, so we only include the Docker override for actual Docker
COMPOSE_BASE="docker-compose.yml"
if command -v podman &>/dev/null; then
# Podman detected (either native or via Docker Desktop shim) - use base config only
COMPOSE_OVERRIDE=""
else
# Docker detected - include extra_hosts override for Linux localhost access
COMPOSE_OVERRIDE="-f docker-compose.docker.yml"
fi
COMPOSE_FILE="$COMPOSE_BASE"
# Load .env if present
if [ -f .env ]; then
set -a
source .env
set +a
fi
show_help() {
cat << 'EOF'
███████╗██╗ ██╗ █████╗ ███╗ ██╗███╗ ██╗ ██████╗ ███╗ ██╗
██╔════╝██║ ██║██╔══██╗████╗ ██║████╗ ██║██╔═══██╗████╗ ██║
███████╗███████║███████║██╔██╗ ██║██╔██╗ ██║██║ ██║██╔██╗ ██║
╚════██║██╔══██║██╔══██║██║╚██╗██║██║╚██╗██║██║ ██║██║╚██╗██║
███████║██║ ██║██║ ██║██║ ╚████║██║ ╚████║╚██████╔╝██║ ╚████║
╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═══╝
AI Penetration Testing Framework
Usage:
./shannon start URL=<url> REPO=<name> Start a pentest workflow
./shannon workspaces List all workspaces
./shannon logs ID=<workflow-id> Tail logs for a specific workflow
./shannon stop Stop all containers
./shannon help Show this help message
Options for 'start':
REPO=<name> Folder name under ./repos/ (e.g. REPO=repo-name)
CONFIG=<path> Configuration file (YAML)
OUTPUT=<path> Output directory for reports (default: ./audit-logs/)
WORKSPACE=<name> Named workspace (auto-resumes if exists, creates if new)
PIPELINE_TESTING=true Use minimal prompts for fast testing
ROUTER=true Route requests through claude-code-router (multi-model support)
Options for 'stop':
CLEAN=true Remove all data including volumes
Examples:
./shannon start URL=https://example.com REPO=repo-name
./shannon start URL=https://example.com REPO=repo-name WORKSPACE=q1-audit
./shannon start URL=https://example.com REPO=repo-name CONFIG=./config.yaml
./shannon start URL=https://example.com REPO=repo-name OUTPUT=./my-reports
./shannon workspaces
./shannon logs ID=example.com_shannon-1234567890
./shannon stop CLEAN=true
Monitor workflows at http://localhost:8233
EOF
}
# Parse KEY=value arguments into variables
parse_args() {
for arg in "$@"; do
case "$arg" in
URL=*) URL="${arg#URL=}" ;;
REPO=*) REPO="${arg#REPO=}" ;;
CONFIG=*) CONFIG="${arg#CONFIG=}" ;;
OUTPUT=*) OUTPUT="${arg#OUTPUT=}" ;;
ID=*) ID="${arg#ID=}" ;;
CLEAN=*) CLEAN="${arg#CLEAN=}" ;;
PIPELINE_TESTING=*) PIPELINE_TESTING="${arg#PIPELINE_TESTING=}" ;;
REBUILD=*) REBUILD="${arg#REBUILD=}" ;;
ROUTER=*) ROUTER="${arg#ROUTER=}" ;;
WORKSPACE=*) WORKSPACE="${arg#WORKSPACE=}" ;;
esac
done
}
# Check if Temporal is running and healthy
is_temporal_ready() {
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T temporal \
temporal operator cluster health --address localhost:7233 2>/dev/null | grep -q "SERVING"
}
# Ensure containers are running with correct mounts
ensure_containers() {
# If custom OUTPUT_DIR is set, always refresh worker to ensure correct volume mount
# Docker compose will only recreate if the mount actually changed
if [ -n "$OUTPUT_DIR" ]; then
echo "Ensuring worker has correct output mount..."
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE up -d worker 2>/dev/null || true
fi
# Quick check: if Temporal is already healthy, we're good
if is_temporal_ready; then
return 0
fi
# Need to start containers
echo "Starting Shannon containers..."
if [ "$REBUILD" = "true" ]; then
# Force rebuild without cache (use when code changes aren't being picked up)
echo "Rebuilding with --no-cache..."
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE build --no-cache worker
fi
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE up -d --build
# Wait for Temporal to be ready
echo "Waiting for Temporal to be ready..."
for i in $(seq 1 30); do
if is_temporal_ready; then
echo "Temporal is ready!"
return 0
fi
if [ "$i" -eq 30 ]; then
echo "Timeout waiting for Temporal"
exit 1
fi
sleep 2
done
}
cmd_start() {
parse_args "$@"
# Validate required vars
if [ -z "$URL" ] || [ -z "$REPO" ]; then
echo "ERROR: URL and REPO are required"
echo "Usage: ./shannon start URL=<url> REPO=<name>"
exit 1
fi
# Check for API key (Bedrock and router modes can bypass this)
if [ -z "$ANTHROPIC_API_KEY" ] && [ -z "$CLAUDE_CODE_OAUTH_TOKEN" ]; then
if [ "$CLAUDE_CODE_USE_BEDROCK" = "1" ]; then
# Bedrock mode — validate required AWS credentials
MISSING=""
[ -z "$AWS_REGION" ] && MISSING="$MISSING AWS_REGION"
[ -z "$AWS_BEARER_TOKEN_BEDROCK" ] && MISSING="$MISSING AWS_BEARER_TOKEN_BEDROCK"
[ -z "$ANTHROPIC_SMALL_MODEL" ] && MISSING="$MISSING ANTHROPIC_SMALL_MODEL"
[ -z "$ANTHROPIC_MEDIUM_MODEL" ] && MISSING="$MISSING ANTHROPIC_MEDIUM_MODEL"
[ -z "$ANTHROPIC_LARGE_MODEL" ] && MISSING="$MISSING ANTHROPIC_LARGE_MODEL"
if [ -n "$MISSING" ]; then
echo "ERROR: Bedrock mode requires the following env vars in .env:$MISSING"
exit 1
fi
elif [ "$CLAUDE_CODE_USE_VERTEX" = "1" ]; then
# Vertex AI mode — validate required GCP credentials
MISSING=""
[ -z "$CLOUD_ML_REGION" ] && MISSING="$MISSING CLOUD_ML_REGION"
[ -z "$ANTHROPIC_VERTEX_PROJECT_ID" ] && MISSING="$MISSING ANTHROPIC_VERTEX_PROJECT_ID"
[ -z "$ANTHROPIC_SMALL_MODEL" ] && MISSING="$MISSING ANTHROPIC_SMALL_MODEL"
[ -z "$ANTHROPIC_MEDIUM_MODEL" ] && MISSING="$MISSING ANTHROPIC_MEDIUM_MODEL"
[ -z "$ANTHROPIC_LARGE_MODEL" ] && MISSING="$MISSING ANTHROPIC_LARGE_MODEL"
if [ -n "$MISSING" ]; then
echo "ERROR: Vertex AI mode requires the following env vars in .env:$MISSING"
exit 1
fi
# Validate service account key file (must be inside ./credentials/ for Docker mount)
if [ -z "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
echo "ERROR: Vertex AI mode requires GOOGLE_APPLICATION_CREDENTIALS in .env"
echo " Place your service account key in ./credentials/ and set:"
echo " GOOGLE_APPLICATION_CREDENTIALS=./credentials/gcp-sa-key.json"
exit 1
fi
if [ ! -f "$GOOGLE_APPLICATION_CREDENTIALS" ]; then
echo "ERROR: Service account key file not found: $GOOGLE_APPLICATION_CREDENTIALS"
echo " Download a key from the GCP Console (IAM > Service Accounts > Keys)"
exit 1
fi
elif [ "$ROUTER" = "true" ] && { [ -n "$OPENAI_API_KEY" ] || [ -n "$OPENROUTER_API_KEY" ]; }; then
# Router mode with alternative provider - set a placeholder for SDK init
export ANTHROPIC_API_KEY="router-mode"
else
echo "ERROR: Set ANTHROPIC_API_KEY or CLAUDE_CODE_OAUTH_TOKEN in .env"
echo " (or use CLAUDE_CODE_USE_BEDROCK=1 for AWS Bedrock,"
echo " CLAUDE_CODE_USE_VERTEX=1 for Google Vertex AI,"
echo " or ROUTER=true with OPENAI_API_KEY or OPENROUTER_API_KEY)"
exit 1
fi
fi
# Determine container path for REPO
# - If REPO is already a container path (/benchmarks/*, /repos/*), use as-is
# - Otherwise, treat as a folder name under ./repos/ (mounted at /repos in container)
case "$REPO" in
/benchmarks/*|/repos/*)
CONTAINER_REPO="$REPO"
;;
*)
if [ ! -d "./repos/$REPO" ]; then
echo "ERROR: Repository not found at ./repos/$REPO"
echo ""
echo "Place your target repository under the ./repos/ directory"
exit 1
fi
CONTAINER_REPO="/repos/$REPO"
;;
esac
# Handle custom OUTPUT directory
# Export OUTPUT_DIR for docker-compose volume mount BEFORE starting containers
if [ -n "$OUTPUT" ]; then
# Create output directory with write permissions for container user (UID 1001)
mkdir -p "$OUTPUT"
chmod 777 "$OUTPUT"
export OUTPUT_DIR="$OUTPUT"
fi
# Handle ROUTER flag - start claude-code-router for multi-model support
if [ "$ROUTER" = "true" ]; then
# Check if router is already running
if docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router ps router 2>/dev/null | grep -q "running"; then
echo "Router already running, skipping startup..."
else
echo "Starting claude-code-router..."
# Check for provider API keys
if [ -z "$OPENAI_API_KEY" ] && [ -z "$OPENROUTER_API_KEY" ]; then
echo "WARNING: No provider API key set (OPENAI_API_KEY or OPENROUTER_API_KEY). Router may not work."
fi
# Start router with profile
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router up -d router
# Give router a few seconds to start (health check disabled for now - TODO: debug later)
echo "Waiting for router to start..."
sleep 5
fi
# Set ANTHROPIC_BASE_URL to route through router
export ANTHROPIC_BASE_URL="http://router:3456"
# Set auth token to match router's APIKEY
export ANTHROPIC_AUTH_TOKEN="shannon-router-key"
fi
# Ensure audit-logs directory exists with write permissions for container user (UID 1001)
mkdir -p ./audit-logs ./credentials
chmod 777 ./audit-logs
# Ensure repo deliverables directory is writable by container user (UID 1001)
if [ -d "./repos/$REPO" ]; then
mkdir -p "./repos/$REPO/deliverables"
chmod 777 "./repos/$REPO/deliverables"
fi
# Ensure containers are running (starts them if needed)
ensure_containers
# Build optional args
ARGS=""
[ -n "$CONFIG" ] && ARGS="$ARGS --config $CONFIG"
# Pass container path for output (where OUTPUT_DIR is mounted)
# Also pass display path so client can show the host path to user
if [ -n "$OUTPUT" ]; then
ARGS="$ARGS --output /app/output --display-output $OUTPUT"
fi
[ "$PIPELINE_TESTING" = "true" ] && ARGS="$ARGS --pipeline-testing"
[ -n "$WORKSPACE" ] && ARGS="$ARGS --workspace $WORKSPACE"
# Run the client to submit workflow
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T worker \
node dist/temporal/client.js "$URL" "$CONTAINER_REPO" $ARGS
}
cmd_logs() {
parse_args "$@"
if [ -z "$ID" ]; then
echo "ERROR: ID is required"
echo "Usage: ./shannon logs ID=<workflow-id>"
exit 1
fi
# Auto-discover the workflow log file
# 1. Check default location first
# 2. Search common output directories
# 3. Fall back to find command
WORKFLOW_LOG=""
if [ -f "./audit-logs/${ID}/workflow.log" ]; then
WORKFLOW_LOG="./audit-logs/${ID}/workflow.log"
else
# For resume workflow IDs (e.g. workspace_resume_123), check the original workspace
WORKSPACE_ID="${ID%%_resume_*}"
if [ "$WORKSPACE_ID" != "$ID" ] && [ -f "./audit-logs/${WORKSPACE_ID}/workflow.log" ]; then
WORKFLOW_LOG="./audit-logs/${WORKSPACE_ID}/workflow.log"
fi
# For named workspace IDs (e.g. workspace_shannon-123), check the workspace name
if [ -z "$WORKFLOW_LOG" ]; then
WORKSPACE_ID="${ID%%_shannon-*}"
if [ "$WORKSPACE_ID" != "$ID" ] && [ -f "./audit-logs/${WORKSPACE_ID}/workflow.log" ]; then
WORKFLOW_LOG="./audit-logs/${WORKSPACE_ID}/workflow.log"
fi
fi
if [ -z "$WORKFLOW_LOG" ]; then
# Search for the workflow directory (handles custom OUTPUT paths)
FOUND=$(find . -maxdepth 3 -path "*/${ID}/workflow.log" -type f 2>/dev/null | head -1)
if [ -n "$FOUND" ]; then
WORKFLOW_LOG="$FOUND"
fi
fi
fi
if [ -n "$WORKFLOW_LOG" ]; then
echo "Tailing workflow log: $WORKFLOW_LOG"
tail -f "$WORKFLOW_LOG"
else
echo "ERROR: Workflow log not found for ID: $ID"
echo ""
echo "Possible causes:"
echo " - Workflow hasn't started yet"
echo " - Workflow ID is incorrect"
echo ""
echo "Check the Temporal Web UI at http://localhost:8233 for workflow details"
exit 1
fi
}
cmd_workspaces() {
# Ensure containers are running (need worker to execute node)
ensure_containers
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE exec -T worker \
node dist/temporal/workspaces.js
}
cmd_stop() {
parse_args "$@"
if [ "$CLEAN" = "true" ]; then
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router down -v
else
docker compose -f "$COMPOSE_FILE" $COMPOSE_OVERRIDE --profile router down
fi
}
# Main command dispatch
case "${1:-help}" in
start)
shift
cmd_start "$@"
;;
logs)
shift
cmd_logs "$@"
;;
workspaces)
shift
cmd_workspaces
;;
stop)
shift
cmd_stop "$@"
;;
help|--help|-h|*)
show_help
;;
esac