|
| 1 | +#!/bin/bash |
| 2 | +set -euo pipefail |
| 3 | + |
| 4 | +# This library processes Bazel Event Protocol output and creates Buildkite annotations |
| 5 | + |
| 6 | +# Function to get random quote for annotation footer |
| 7 | +get_random_quote() { |
| 8 | + local quotes=( |
| 9 | + "\"The best error message is the one that never shows up.\" - Thomas Fuchs" |
| 10 | + "\"First, solve the problem. Then, write the code.\" - John Johnson" |
| 11 | + "\"Make it work, make it right, make it fast.\" - Kent Beck" |
| 12 | + "\"Programming isn't about what you know; it's about what you can figure out.\" - Chris Pine" |
| 13 | + "\"The only way to learn a new programming language is by writing programs in it.\" - Dennis Ritchie" |
| 14 | + "\"Testing can only prove the presence of bugs, not their absence.\" - Edsger W. Dijkstra" |
| 15 | + "\"It's not a bug – it's an undocumented feature.\" - Anonymous" |
| 16 | + "\"Good code is its own best documentation.\" - Steve McConnell" |
| 17 | + "\"Any fool can write code that a computer can understand. Good programmers write code that humans can understand.\" - Martin Fowler" |
| 18 | + "\"The sooner you start to code, the longer the program will take.\" - Roy Carlson" |
| 19 | + "\"Optimism is an occupational hazard of programming; feedback is the treatment.\" - Kent Beck" |
| 20 | + "\"Simplicity is the soul of efficiency.\" - Austin Freeman" |
| 21 | + ) |
| 22 | + |
| 23 | + echo "${quotes[RANDOM % ${#quotes[@]}]}" |
| 24 | +} |
| 25 | + |
| 26 | +# Function to create a Buildkite annotation with the given style and content |
| 27 | +create_annotation() { |
| 28 | + local style="$1" |
| 29 | + local content="$2" |
| 30 | + local context_id="bazel-bep-results" |
| 31 | + |
| 32 | + # Check if we're running in Buildkite |
| 33 | + if [ -n "${BUILDKITE:-}" ] && command -v buildkite-agent >/dev/null 2>&1; then |
| 34 | + echo "Creating Buildkite annotation..." |
| 35 | + buildkite-agent annotate "$content" --style "$style" --context "$context_id" |
| 36 | + else |
| 37 | + # We're not in Buildkite, just display the content on stdout |
| 38 | + echo "Not running in Buildkite. Would create annotation with style '$style':" |
| 39 | + echo "$content" |
| 40 | + fi |
| 41 | +} |
| 42 | + |
| 43 | +# Process the BEP file to extract useful information |
| 44 | +process_bep() { |
| 45 | + local BEP_FILE="$1" |
| 46 | + local BAZEL_COMMAND="${2:-}" |
| 47 | + |
| 48 | + # Count of target statuses |
| 49 | + local success_count=0 |
| 50 | + local fail_count=0 |
| 51 | + local skip_count=0 |
| 52 | + local cached_count=0 |
| 53 | + |
| 54 | + # Arrays for successful targets |
| 55 | + declare -a successful_targets=() |
| 56 | + |
| 57 | + # Arrays for test performance tracking |
| 58 | + declare -a slowest_tests=() |
| 59 | + declare -a slowest_times=() |
| 60 | + local slowest_count=0 |
| 61 | + |
| 62 | + # Collect failures for detailed annotation |
| 63 | + local failure_details="" |
| 64 | + |
| 65 | + # Performance data |
| 66 | + local build_start_time=0 |
| 67 | + local build_end_time=0 |
| 68 | + |
| 69 | + echo "Processing BEP file: $BEP_FILE" |
| 70 | + |
| 71 | + # Parse the JSON stream |
| 72 | + while read -r line; do |
| 73 | + # Skip empty lines or invalid JSON |
| 74 | + if [ -z "$line" ] || ! echo "$line" | jq -e '.' > /dev/null 2>&1; then |
| 75 | + continue |
| 76 | + fi |
| 77 | + |
| 78 | + # Extract build start and finish times |
| 79 | + if echo "$line" | jq -e '.id.buildStarted != null' > /dev/null 2>&1; then |
| 80 | + build_start_time=$(echo "$line" | jq -r '.buildStarted.startTimeMillis // 0') |
| 81 | + fi |
| 82 | + |
| 83 | + if echo "$line" | jq -e '.id.buildFinished != null' > /dev/null 2>&1; then |
| 84 | + build_end_time=$(echo "$line" | jq -r '.buildFinished.finishTimeMillis // 0') |
| 85 | + fi |
| 86 | + |
| 87 | + # Extract target information |
| 88 | + if echo "$line" | jq -e '.id.targetCompleted != null' > /dev/null 2>&1; then |
| 89 | + local label=$(echo "$line" | jq -r '.id.targetCompleted.label // "unknown"') |
| 90 | + local success=$(echo "$line" | jq -r '.completed.success // "false"') |
| 91 | + local is_cached=false |
| 92 | + |
| 93 | + # Check if the target was cached |
| 94 | + if echo "$line" | jq -e '.completed.outputGroup != null and (.completed.outputGroup[] | select(.name == "bazel-out") | .fileSets[] | select(.id != null))' > /dev/null 2>&1; then |
| 95 | + # If it has output files but doesn't have actionExecuted, it's likely cached |
| 96 | + if ! echo "$line" | jq -e '.completed.actionExecuted != null' > /dev/null 2>&1; then |
| 97 | + is_cached=true |
| 98 | + ((cached_count++)) |
| 99 | + fi |
| 100 | + fi |
| 101 | + |
| 102 | + if [ "$success" = "true" ]; then |
| 103 | + ((success_count++)) |
| 104 | + successful_targets+=("$label") |
| 105 | + else |
| 106 | + ((fail_count++)) |
| 107 | + # Get failure details with proper highlighting |
| 108 | + local errors=$(echo "$line" | jq -r '.completed.failureDetail.message // "Unknown error"') |
| 109 | + failure_details+="### ❌ Failed: $label\n\`\`\`diff\n- ERROR: $errors\n\`\`\`\n\n" |
| 110 | + |
| 111 | + # Check for missing dependency or deleted package errors |
| 112 | + if echo "$errors" | grep -q "no such target\|no such package\|Package is considered deleted"; then |
| 113 | + # Extract relevant part of the error message |
| 114 | + local error_detail=$(echo "$errors" | grep -o "'[^']*'\|Package [^:]*" | head -1) |
| 115 | + failure_details+="**🔍 Possible Fix:** $error_detail might be missing, renamed, or deleted. Add it to --deleted_packages flag if it's intentionally deleted.\n\n" |
| 116 | + fi |
| 117 | + fi |
| 118 | + fi |
| 119 | + |
| 120 | + # Also check for configured targets |
| 121 | + if echo "$line" | jq -e '.id.configured != null' > /dev/null 2>&1; then |
| 122 | + local label=$(echo "$line" | jq -r '.id.configured.targetLabel // "unknown"') |
| 123 | + if [[ ! " ${successful_targets[*]} " =~ " ${label} " ]]; then |
| 124 | + # Only add if it's not already counted |
| 125 | + ((success_count++)) |
| 126 | + successful_targets+=("$label") |
| 127 | + fi |
| 128 | + fi |
| 129 | + |
| 130 | + # Extract skipped targets |
| 131 | + if echo "$line" | jq -e '.id.targetSkipped != null' > /dev/null 2>&1; then |
| 132 | + local label=$(echo "$line" | jq -r '.id.targetSkipped.label // "unknown"') |
| 133 | + ((skip_count++)) |
| 134 | + fi |
| 135 | + |
| 136 | + # Extract test results if available |
| 137 | + if echo "$line" | jq -e '.id.testResult != null' > /dev/null 2>&1; then |
| 138 | + local test_label=$(echo "$line" | jq -r '.id.testResult.label // "unknown"') |
| 139 | + local test_status=$(echo "$line" | jq -r '.testResult.status // "UNKNOWN"') |
| 140 | + local test_time=$(echo "$line" | jq -r '.testResult.testActionDurationMillis // 0') |
| 141 | + |
| 142 | + # Default to 1.0s if no duration available |
| 143 | + if [ "$test_time" -eq 0 ]; then |
| 144 | + test_time=1000 |
| 145 | + fi |
| 146 | + |
| 147 | + test_time=$(echo "scale=2; $test_time/1000" | bc) |
| 148 | + |
| 149 | + # Add this test result to counts based on status |
| 150 | + if [ "$test_status" = "PASSED" ]; then |
| 151 | + ((success_count++)) |
| 152 | + successful_targets+=("$test_label (test)") |
| 153 | + elif [ "$test_status" = "FLAKY" ]; then |
| 154 | + # Flaky tests are considered successful but with warning |
| 155 | + ((success_count++)) |
| 156 | + successful_targets+=("$test_label (⚠️ flaky)") |
| 157 | + else |
| 158 | + ((fail_count++)) |
| 159 | + fi |
| 160 | + |
| 161 | + # Always include test duration in the performance tracking |
| 162 | + slowest_tests[$slowest_count]="$test_label" |
| 163 | + slowest_times[$slowest_count]="$test_time" |
| 164 | + ((slowest_count++)) |
| 165 | + |
| 166 | + if [ "$test_status" != "PASSED" ]; then |
| 167 | + # Get test failure details if available |
| 168 | + local test_errors="No detailed logs available" |
| 169 | + if echo "$line" | jq -e 'has("testResult") and .testResult | has("testActionOutput") and .testResult.testActionOutput != null' > /dev/null 2>&1; then |
| 170 | + test_errors=$(echo "$line" | jq -r '.testResult.testActionOutput | if . == null then "No logs available" else (.[] | .name + ": " + .uri) end' 2>/dev/null || echo "No detailed logs available") |
| 171 | + fi |
| 172 | + |
| 173 | + local status_emoji="❌" |
| 174 | + if [ "$test_status" = "FLAKY" ]; then |
| 175 | + status_emoji="⚠️" |
| 176 | + elif [ "$test_status" = "TIMEOUT" ]; then |
| 177 | + status_emoji="⏱️" |
| 178 | + fi |
| 179 | + |
| 180 | + failure_details+="### $status_emoji Failed Test: $test_label ($test_status in ${test_time}s)\n\`\`\`diff\n- $test_errors\n\`\`\`\n\n" |
| 181 | + |
| 182 | + # Add stack trace if available |
| 183 | + if echo "$line" | jq -e 'has("testResult") and .testResult | has("testActionOutput") and .testResult.testActionOutput != null' > /dev/null 2>&1; then |
| 184 | + if echo "$line" | jq -e '.testResult.testActionOutput[] | select(.name == "test.log") | .uri' > /dev/null 2>&1; then |
| 185 | + local log_uri=$(echo "$line" | jq -r '.testResult.testActionOutput[] | select(.name == "test.log") | .uri') |
| 186 | + failure_details+="[View Full Test Log]($log_uri)\n\n" |
| 187 | + fi |
| 188 | + fi |
| 189 | + fi |
| 190 | + fi |
| 191 | + done < "$BEP_FILE" |
| 192 | + |
| 193 | + # Calculate total build time |
| 194 | + local total_build_time=0 |
| 195 | + if [ $build_end_time -gt 0 ] && [ $build_start_time -gt 0 ]; then |
| 196 | + total_build_time=$(( (build_end_time - build_start_time) / 1000 )) |
| 197 | + fi |
| 198 | + |
| 199 | + # Create the summary annotation |
| 200 | + local style="info" |
| 201 | + if [ "$fail_count" -gt 0 ]; then |
| 202 | + style="error" |
| 203 | + fi |
| 204 | + |
| 205 | + # Create a status indicator emoji for the summary line |
| 206 | + local status_emoji="✅" |
| 207 | + if [ "$fail_count" -gt 0 ]; then |
| 208 | + status_emoji="❌" |
| 209 | + elif [ "$skip_count" -gt 0 ] && [ "$success_count" -eq 0 ]; then |
| 210 | + status_emoji="⏭️" |
| 211 | + fi |
| 212 | + |
| 213 | + # Clean header for the output |
| 214 | + local summary="## 🚀 Bazel Results\n\n" |
| 215 | + |
| 216 | + # Add command used |
| 217 | + if [ -n "$BAZEL_COMMAND" ]; then |
| 218 | + summary+="**🏃 Command:** \`$BAZEL_COMMAND\`\n\n" |
| 219 | + fi |
| 220 | + |
| 221 | + if [ $total_build_time -gt 0 ]; then |
| 222 | + summary+="**⏱️ Duration:** ${total_build_time}s | " |
| 223 | + fi |
| 224 | + |
| 225 | + # Build status summary with emoji and counts |
| 226 | + summary+="**Status:** " |
| 227 | + summary+="✅ $success_count " |
| 228 | + if [ "$cached_count" -gt 0 ]; then |
| 229 | + summary+="| 🔄 $cached_count cached " |
| 230 | + fi |
| 231 | + if [ "$fail_count" -gt 0 ]; then |
| 232 | + summary+="| ❌ $fail_count failed " |
| 233 | + fi |
| 234 | + if [ "$skip_count" -gt 0 ]; then |
| 235 | + summary+="| ⏭️ $skip_count skipped " |
| 236 | + fi |
| 237 | + summary+="\n\n" |
| 238 | + |
| 239 | + # Add performance section with test timings in a collapsible section |
| 240 | + if [ ${#slowest_tests[@]} -gt 0 ]; then |
| 241 | + summary+="\n<details>\n<summary><strong>⏱️ Test Durations</strong> (${#slowest_tests[@]} tests)</summary>\n\n" |
| 242 | + |
| 243 | + # First sort the tests by duration (longest first) |
| 244 | + local sorted_indexes=() |
| 245 | + local sorted_times=() |
| 246 | + local sorted_tests=() |
| 247 | + |
| 248 | + # Create a temporary file to sort the data |
| 249 | + local tmp_file=$(mktemp) |
| 250 | + |
| 251 | + # Populate the temp file with "time test_name" format for sorting |
| 252 | + for i in "${!slowest_tests[@]}"; do |
| 253 | + echo "${slowest_times[$i]} ${slowest_tests[$i]}" >> "$tmp_file" |
| 254 | + done |
| 255 | + |
| 256 | + # Sort by the first field (time) in descending order |
| 257 | + while read -r time test; do |
| 258 | + sorted_times+=("$time") |
| 259 | + sorted_tests+=("$test") |
| 260 | + done < <(sort -rn "$tmp_file") |
| 261 | + |
| 262 | + # Clean up |
| 263 | + rm -f "$tmp_file" |
| 264 | + |
| 265 | + # Output the sorted results |
| 266 | + for i in "${!sorted_tests[@]}"; do |
| 267 | + summary+="- \`${sorted_tests[$i]}\`: ${sorted_times[$i]}s\n" |
| 268 | + if [ $i -ge 9 ]; then # Show only top 10 slowest tests |
| 269 | + if [ ${#sorted_tests[@]} -gt 10 ]; then |
| 270 | + summary+="- _...and $((${#sorted_tests[@]} - 10)) more_\n" |
| 271 | + fi |
| 272 | + break |
| 273 | + fi |
| 274 | + done |
| 275 | + summary+="</details>\n" |
| 276 | + fi |
| 277 | + |
| 278 | + # Add list of successful targets in a collapsible section |
| 279 | + if [ ${#successful_targets[@]} -gt 0 ]; then |
| 280 | + summary+="\n<details>\n<summary><strong>✅ Successfully Built</strong> (${#successful_targets[@]} targets)</summary>\n\n" |
| 281 | + |
| 282 | + # Sort the targets for better readability |
| 283 | + IFS=$'\n' successful_targets_sorted=($(sort <<<"${successful_targets[*]}")) |
| 284 | + unset IFS |
| 285 | + |
| 286 | + # Show all targets |
| 287 | + for target in "${successful_targets_sorted[@]}"; do |
| 288 | + summary+="- \`$target\`\n" |
| 289 | + done |
| 290 | + |
| 291 | + summary+="</details>\n" |
| 292 | + fi |
| 293 | + |
| 294 | + # Add details for failures if any in a collapsible section, but auto-expanded |
| 295 | + if [ -n "$failure_details" ]; then |
| 296 | + summary+="\n<details open>\n<summary><strong>❌ Failure Details</strong> ($fail_count failures)</summary>\n\n" |
| 297 | + summary+="$failure_details" |
| 298 | + summary+="</details>\n" |
| 299 | + fi |
| 300 | + |
| 301 | + # Add random inspirational quote |
| 302 | + summary+="\n---\n\n💡 **Random Dev Wisdom:**\n\n_$(get_random_quote)_\n" |
| 303 | + |
| 304 | + # Create the annotation |
| 305 | + create_annotation "$style" "$summary" |
| 306 | +} |
0 commit comments