Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
127 changes: 127 additions & 0 deletions .github/workflows/presubmit.yml
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,133 @@ jobs:
-DVULKAN_LIB_DIR='${{ github.workspace }}'/Vulkan-Loader/build/loader/ \
${CMAKE_ADDITIONAL_CONFIG_ARGS}
cmake --build . --parallel

tests:
name: Run Tests (pocl)
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v6
- name: Parse triggers
id: check_triggers
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
MESSAGES=$(gh api -X GET /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/commits --jq '.[].commit.message')
PR_BODY=$(gh api -X GET /repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }} --jq '.body')
COMMANDS=$(echo -e "$MESSAGES\n$PR_BODY" | grep -oP '^\[run-tests?:\s*\K[^\]]+' | sort -u || true)
if [ -z "$COMMANDS" ]; then
echo "No '[run-test: ...]' triggers found. Skipping job."
echo "run_tests=false" >> $GITHUB_OUTPUT
else
echo "Found the following test triggers:"
echo "$COMMANDS"
echo "run_tests=true" >> $GITHUB_OUTPUT
echo "$COMMANDS" > test_commands.txt
fi
- name: Environment variables
if: steps.check_triggers.outputs.run_tests == 'true'
run: |
echo "pocl=$(realpath ${{ github.workspace }}/../pocl)" >> $GITHUB_ENV
echo "loader=$(realpath ${{ github.workspace }}/../loader)" >> $GITHUB_ENV
echo "cl-headers=$(realpath ${{ github.workspace }}/../cl-headers)" >> $GITHUB_ENV
echo "spv-headers=$(realpath ${{ github.workspace }}/../spv-headers)" >> $GITHUB_ENV
- name: Clone Headers
if: steps.check_triggers.outputs.run_tests == 'true'
run: |
git clone --depth 1 https://github.com/KhronosGroup/OpenCL-Headers.git ${{ env.cl-headers }}
git clone --depth 1 https://github.com/KhronosGroup/SPIRV-Headers.git ${{ env.spv-headers }}
- name: Setup Ninja
uses: seanmiddleditch/gha-setup-ninja@master
- name: Install Vulkan SDK
uses: humbletim/install-vulkan-sdk@main
with:
version: 1.4.309.0
cache: true
- name: Install pocl dependencies
if: steps.check_triggers.outputs.run_tests == 'true'
run: |
sudo apt-get update
export LLVM_VERSION=20
sudo apt-get install -y \
python3-dev libpython3-dev build-essential ocl-icd-libopencl1 \
cmake git pkg-config libclang-${LLVM_VERSION}-dev clang-${LLVM_VERSION} \
llvm-${LLVM_VERSION} make ninja-build ocl-icd-libopencl1 ocl-icd-dev \
ocl-icd-opencl-dev libhwloc-dev zlib1g zlib1g-dev clinfo dialog apt-utils \
libxml2-dev libclang-cpp${LLVM_VERSION}-dev libclang-cpp${LLVM_VERSION} \
llvm-${LLVM_VERSION}-dev
- name: Cache pocl
id: cache-pocl
if: steps.check_triggers.outputs.run_tests == 'true'
uses: actions/cache@v4
with:
path: ${{ env.pocl }}
key: ubuntu-24.04-pocl-v7.1
- name: Build pocl
if: steps.check_triggers.outputs.run_tests == 'true' && steps.cache-pocl.outputs.cache-hit != 'true'
run: |
git clone --depth 1 --branch v7.1 https://github.com/pocl/pocl.git
cmake -B pocl/build -S pocl -G Ninja \
-DCMAKE_INSTALL_PREFIX=${{ env.pocl }} \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_ICD=ON
cmake --build pocl/build
cmake --install pocl/build
- name: Build loader
if: steps.check_triggers.outputs.run_tests == 'true'
shell: bash
run: |
git clone --depth 1 https://github.com/KhronosGroup/OpenCL-ICD-Loader.git
cmake -B OpenCL-ICD-Loader/build -S OpenCL-ICD-Loader -G Ninja \
-DCMAKE_INSTALL_PREFIX=${{ env.loader }} \
-DCMAKE_BUILD_TYPE=Release \
-DOPENCL_ICD_LOADER_HEADERS_DIR='${{ env.cl-headers }}'
cmake --build OpenCL-ICD-Loader/build
cmake --install OpenCL-ICD-Loader/build
- name: Build OpenCL CTS
if: steps.check_triggers.outputs.run_tests == 'true'
shell: bash
run: |
cmake -B build -S . -G Ninja \
-DCMAKE_BUILD_TYPE=Release \
-DCL_INCLUDE_DIR='${{ env.cl-headers }}' \
-DCL_LIB_DIR=${{ env.loader }}/lib \
-DSPIRV_INCLUDE_DIR='${{ env.spv-headers }}' \
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY='${{ github.workspace }}'/bin \
-DOPENCL_LIBRARIES="-lOpenCL -lpthread" \
-DUSE_CL_EXPERIMENTAL=ON \
-DGL_IS_SUPPORTED=OFF \
-DVULKAN_IS_SUPPORTED=OFF
cmake --build build
- name: Run tests
if: steps.check_triggers.outputs.run_tests == 'true'
shell: bash
run: |
export OCL_ICD_ENABLE_TRACE=1
export OCL_ICD_VENDORS=${{ env.pocl }}/etc/OpenCL/vendors
export LD_LIBRARY_PATH=${{ env.loader }}/lib:$LD_LIBRARY_PATH
mkdir -p test_results
while IFS= read -r cmd; do
if [ -n "$cmd" ]; then
SAFE_NAME=$(echo "$cmd" | sed -e 's/[^A-Za-z0-9._-]/_/g')
RESULT_FILE="test_results/${SAFE_NAME}.json"
export CL_CONFORMANCE_RESULTS_FILENAME="$RESULT_FILE"
echo "=================================================="
echo "Executing: ./$cmd"
echo "Writing JSON result to: $RESULT_FILE"
echo "=================================================="
./bin/$cmd || true
cat ${RESULT_FILE}
fi
done < test_commands.txt
- name: Check results
if: steps.check_triggers.outputs.run_tests == 'true'
shell: bash
run: |
ls test_results
python3 ./ci/compare_results.py \
--golden ./ci/pocl/golden.json \
--results-dir test_results/

formatcheck:
name: Check code format
runs-on: ubuntu-22.04
Expand Down
21 changes: 21 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,3 +130,24 @@ PRs to the repository are required to be `clang-format` clean to pass CI.
Developers can either use the `git-clang-format` tool locally to verify this
before contributing, or update their PR based on the diff provided by a failing
CI job.

## Running Targeted CI Tests on Pull Requests

To help verify fixes or check for regressions without running the entire
conformance test suite, our continuous integration pipeline allows contributor
to trigger specific tests on Pull Requests against the `pocl` implementation.

### How to Trigger Tests

Testing is triggered by adding a special tag to either your
**Pull Request description** or in any of your **commit messages**.

The CI parses the text for the following syntax:
`[run-test: <command>]`

Multiples tags for a single Pull Request is supported.

### Examples

```text
[run-test: test_bruteforce exp -w -1]
132 changes: 132 additions & 0 deletions ci/compare_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
#!/usr/bin/env python3
import argparse
import json
import os
import sys
from pathlib import Path

# Terminal Color Codes
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RESET = '\033[0m'

def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Compare PR test results against a golden reference.")
parser.add_argument("--golden", required=True, help="Path to golden reference JSON")
parser.add_argument("--results-dir", required=True, help="Directory containing PR test JSON results")
return parser.parse_args()

def load_golden_reference(filepath):
"""Load and return the golden reference JSON data."""
try:
with open(filepath, 'r') as f:
return json.load(f)
except Exception as e:
print(f"Failed to load golden reference '{filepath}': {e}")
sys.exit(1)

def categorize_difference(cmd, sub_test, expected, actual):
"""Generate a formatted message based on the type of difference."""
if actual == "pass":
return f"{GREEN} FIX: [{cmd}] '{sub_test}' expected '{expected}', but got '{actual}'.{RESET}"
elif actual == "fail":
return f"{RED} REGRESSION: [{cmd}] '{sub_test}' expected '{expected}', but got '{actual}'.{RESET}"
else:
return f"{YELLOW} DIFFERENCE: [{cmd}] '{sub_test}' expected '{expected}', but got '{actual}'.{RESET}"

def compare_test_subset(cmd, actual_results, expected_results, differences, missing_refs):
"""Compare the run subset against expectations. Mutates lists and returns error status."""
has_error = False

for sub_test, actual_status in actual_results.items():
# Rule 1: Everything run must exist in the reference
if sub_test not in expected_results:
missing_refs.append(f"[{cmd}] Sub-test '{sub_test}' not found in golden reference.")
has_error = True
else:
expected_status = expected_results[sub_test]
# Rule 2: Note ANY difference
if actual_status != expected_status:
differences.append(categorize_difference(cmd, sub_test, expected_status, actual_status))
has_error = True
return has_error

def process_all_results(results_dir, golden_data):
"""Iterate through all result JSONs and compare them. Returns aggregate data."""
has_error = False
differences = []
missing_refs = []

if len(list(Path(results_dir).glob("*.json"))) == 0:
print(f"Error: Results directory '{results_dir}' is empty.")
sys.exit(1)
for result_file in Path(results_dir).glob("*.json"):
with open(result_file, 'r') as f:
try:
data = json.load(f)
except json.JSONDecodeError:
print(f"Error parsing {result_file}. Skipping.")
has_error = True
continue

cmd = os.path.basename(data.get("cmd"))
results = data.get("results", {})

if not cmd:
print(f"File {result_file} is missing the 'cmd' key. Skipping.")
has_error = True
continue

if cmd not in golden_data:
missing_refs.append(f"cmd '{cmd}' not found in golden reference.")
has_error = True
continue

# Check the specific results against the reference
subset_error = compare_test_subset(cmd, results, golden_data[cmd], differences, missing_refs)
if subset_error:
has_error = True

return has_error, differences, missing_refs

def print_report_and_exit(has_error, differences, missing_refs):
"""Print the final comparison report and exit with the appropriate status code."""
if missing_refs:
print("\n--- Missing References ---")
for msg in missing_refs:
print(msg)
print("\nPlease update the golden reference to include these missing cmd/tests.")

if differences:
print("\n--- Test Differences ---")
for diff in differences:
print(diff)

if has_error:
print(f"\n{RED} Errors found during comparison. Failing the check.{RESET}")
sys.exit(1)
else:
print(f"\n{GREEN} All run tests match the golden reference perfectly!{RESET}")
sys.exit(0)

def main():
"""Main execution flow."""
args = parse_arguments()

results_dir = Path(args.results_dir)
if not results_dir.is_dir():
print(f"Error: Results directory '{args.results_dir}' does not exist.")
sys.exit(1)

golden_data = load_golden_reference(args.golden)

# Process the files and gather all differences without stopping early
has_error, differences, missing_refs = process_all_results(results_dir, golden_data)

# Output the findings and exit
print_report_and_exit(has_error, differences, missing_refs)

if __name__ == "__main__":
main()
9 changes: 9 additions & 0 deletions ci/pocl/golden.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
{
"test_computeinfo": {
"computeinfo": "pass",
"device_uuid": "skip",
"extended_versioning": "pass",
"conformance_version": "pass",
"pci_bus_info": "skip"
}
}
Loading