chore: add .bestpractices.json + lint workflow (#17) #2
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Best Practices JSON Lint | |
| # Validates .bestpractices.json on every push to main and on PRs that | |
| # touch the file. The BadgeApp at bestpractices.dev pulls this file | |
| # from the repo root (per docs/bestpractices-json.md upstream) when | |
| # the maintainer clicks "Save (and continue) 🤖" on the project's | |
| # edit page, so it must stay parseable and on-schema between manual | |
| # re-ingests. | |
| # | |
| # Lint covers: | |
| # 1. JSON parses (no trailing commas, no malformed escapes). | |
| # 2. All <criterion>_status values are one of: Met, Unmet, N/A, ?. | |
| # 3. Every <criterion>_status has a matching <criterion>_justification | |
| # — the BadgeApp doesn't fail without one but the criterion just | |
| # shows blank. | |
| # 4. The 67 "passing" tier criterion ids from the upstream | |
| # criteria.yml are all answered (or set to ?). | |
| on: | |
| push: | |
| branches: [main] | |
| paths: | |
| - .bestpractices.json | |
| - .github/workflows/bestpractices.yml | |
| pull_request: | |
| paths: | |
| - .bestpractices.json | |
| - .github/workflows/bestpractices.yml | |
| workflow_dispatch: | |
| permissions: | |
| contents: read | |
| jobs: | |
| lint: | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout | |
| # actions/checkout@v4 | |
| uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 | |
| - name: Validate .bestpractices.json | |
| env: | |
| # Authoritative list of passing-tier criterion ids, mirrored | |
| # from coreinfrastructure/best-practices-badge criteria.yml | |
| # (level "0" / passing). Sync this list any time the upstream | |
| # adds, retires, or renames a passing-tier criterion. | |
| PASSING_CRITERIA: | | |
| description_good | |
| interact | |
| contribution | |
| contribution_requirements | |
| floss_license | |
| floss_license_osi | |
| license_location | |
| documentation_basics | |
| documentation_interface | |
| sites_https | |
| discussion | |
| english | |
| maintained | |
| repo_public | |
| repo_track | |
| repo_interim | |
| repo_distributed | |
| version_unique | |
| version_semver | |
| version_tags | |
| release_notes | |
| release_notes_vulns | |
| report_process | |
| report_tracker | |
| report_responses | |
| enhancement_responses | |
| report_archive | |
| vulnerability_report_process | |
| vulnerability_report_private | |
| vulnerability_report_response | |
| build | |
| build_common_tools | |
| build_floss_tools | |
| test | |
| test_invocation | |
| test_most | |
| test_continuous_integration | |
| test_policy | |
| tests_are_added | |
| tests_documented_added | |
| warnings | |
| warnings_fixed | |
| warnings_strict | |
| know_secure_design | |
| know_common_errors | |
| crypto_published | |
| crypto_call | |
| crypto_floss | |
| crypto_keylength | |
| crypto_working | |
| crypto_weaknesses | |
| crypto_pfs | |
| crypto_password_storage | |
| crypto_random | |
| delivery_mitm | |
| delivery_unsigned | |
| vulnerabilities_fixed_60_days | |
| vulnerabilities_critical_fixed | |
| no_leaked_credentials | |
| static_analysis | |
| static_analysis_common_vulnerabilities | |
| static_analysis_fixed | |
| static_analysis_often | |
| dynamic_analysis | |
| dynamic_analysis_unsafe | |
| dynamic_analysis_enable_assertions | |
| dynamic_analysis_fixed | |
| run: | | |
| set -euo pipefail | |
| python3 - <<'PY' | |
| import json, os, sys | |
| path = ".bestpractices.json" | |
| with open(path) as f: | |
| data = json.load(f) | |
| if not isinstance(data, dict): | |
| sys.exit(f"{path}: top-level must be an object, got {type(data).__name__}") | |
| allowed = {"Met", "Unmet", "N/A", "?"} | |
| errors = [] | |
| # 1. status values are valid | |
| for k, v in data.items(): | |
| if k.endswith("_status") and v not in allowed: | |
| errors.append(f"{k} = {v!r}; expected one of {sorted(allowed)}") | |
| # 2. each _status has a matching _justification (warn-only; | |
| # BadgeApp accepts blank justifications, but they're useless). | |
| for k in data: | |
| if k.endswith("_status"): | |
| base = k[: -len("_status")] | |
| jk = base + "_justification" | |
| if jk not in data: | |
| errors.append(f"missing {jk} for {k}") | |
| elif not isinstance(data[jk], str): | |
| errors.append(f"{jk} must be a string") | |
| # 3. every passing-tier criterion is present | |
| criteria = [c.strip() for c in os.environ["PASSING_CRITERIA"].splitlines() if c.strip()] | |
| missing = [c for c in criteria if (c + "_status") not in data] | |
| for m in missing: | |
| errors.append(f"passing criterion {m!r} not answered (add {m}_status + {m}_justification)") | |
| # 4. unknown criterion keys (likely typos) | |
| known = set(criteria) | |
| # tolerate a few additional metadata keys | |
| meta = {"_comment", "name", "description", "homepage_url", "repo_url", | |
| "license", "homepage_url_status", "homepage_url_justification"} | |
| for k in data: | |
| if k in meta: | |
| continue | |
| if k.endswith("_status"): | |
| base = k[: -len("_status")] | |
| if base not in known: | |
| errors.append(f"unknown criterion key: {k} (typo? upstream rename?)") | |
| if errors: | |
| for e in errors: | |
| print(f"ERROR: {e}", file=sys.stderr) | |
| sys.exit(f"\n{len(errors)} error(s) in {path}") | |
| met = sum(1 for k, v in data.items() if k.endswith("_status") and v == "Met") | |
| unmet = sum(1 for k, v in data.items() if k.endswith("_status") and v == "Unmet") | |
| na = sum(1 for k, v in data.items() if k.endswith("_status") and v == "N/A") | |
| unk = sum(1 for k, v in data.items() if k.endswith("_status") and v == "?") | |
| total = met + unmet + na + unk | |
| print(f"OK: {path} parses cleanly") | |
| print(f" {total} criteria answered: {met} Met / {unmet} Unmet / {na} N/A / {unk} ?") | |
| PY |