-
Notifications
You must be signed in to change notification settings - Fork 0
183 lines (169 loc) · 6.67 KB
/
bestpractices.yml
File metadata and controls
183 lines (169 loc) · 6.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
name: Best Practices JSON Lint
# Validates .bestpractices.json on every push to main and on PRs that
# touch the file. The BadgeApp at bestpractices.dev pulls this file
# from the repo root (per docs/bestpractices-json.md upstream) when
# the maintainer clicks "Save (and continue) 🤖" on the project's
# edit page, so it must stay parseable and on-schema between manual
# re-ingests.
#
# Lint covers:
# 1. JSON parses (no trailing commas, no malformed escapes).
# 2. All <criterion>_status values are one of: Met, Unmet, N/A, ?.
# 3. Every <criterion>_status has a matching <criterion>_justification
# — the BadgeApp doesn't fail without one but the criterion just
# shows blank.
# 4. The 67 "passing" tier criterion ids from the upstream
# criteria.yml are all answered (or set to ?).
on:
push:
branches: [main]
paths:
- .bestpractices.json
- .github/workflows/bestpractices.yml
pull_request:
paths:
- .bestpractices.json
- .github/workflows/bestpractices.yml
workflow_dispatch:
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
# actions/checkout@v4
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Validate .bestpractices.json
env:
# Authoritative list of passing-tier criterion ids, mirrored
# from coreinfrastructure/best-practices-badge criteria.yml
# (level "0" / passing). Sync this list any time the upstream
# adds, retires, or renames a passing-tier criterion.
PASSING_CRITERIA: |
description_good
interact
contribution
contribution_requirements
floss_license
floss_license_osi
license_location
documentation_interface
sites_https
discussion
english
maintained
repo_public
repo_track
repo_interim
repo_distributed
version_unique
version_semver
version_tags
release_notes
release_notes_vulns
report_process
report_tracker
report_responses
enhancement_responses
report_archive
vulnerability_report_process
vulnerability_report_private
vulnerability_report_response
build
build_common_tools
build_floss_tools
test
test_invocation
test_most
test_continuous_integration
test_policy
tests_are_added
tests_documented_added
warnings
warnings_fixed
warnings_strict
know_secure_design
know_common_errors
crypto_published
crypto_call
crypto_floss
crypto_keylength
crypto_working
crypto_weaknesses
crypto_pfs
crypto_password_storage
crypto_random
delivery_mitm
delivery_unsigned
vulnerabilities_fixed_60_days
vulnerabilities_critical_fixed
no_leaked_credentials
static_analysis
static_analysis_common_vulnerabilities
static_analysis_fixed
static_analysis_often
dynamic_analysis
dynamic_analysis_unsafe
dynamic_analysis_enable_assertions
dynamic_analysis_fixed
run: |
set -euo pipefail
python3 - <<'PY'
import json, os, sys
path = ".bestpractices.json"
with open(path) as f:
data = json.load(f)
if not isinstance(data, dict):
sys.exit(f"{path}: top-level must be an object, got {type(data).__name__}")
allowed = {"Met", "Unmet", "N/A", "?"}
errors = []
# 1. status values are valid
for k, v in data.items():
if k.endswith("_status") and v not in allowed:
errors.append(f"{k} = {v!r}; expected one of {sorted(allowed)}")
# 2. each _status has a matching _justification (warn-only;
# BadgeApp accepts blank justifications, but they're useless).
for k in data:
if k.endswith("_status"):
base = k[: -len("_status")]
jk = base + "_justification"
if jk not in data:
errors.append(f"missing {jk} for {k}")
elif not isinstance(data[jk], str):
errors.append(f"{jk} must be a string")
# 3. every passing-tier criterion is present
criteria = [c.strip() for c in os.environ["PASSING_CRITERIA"].splitlines() if c.strip()]
missing = [c for c in criteria if (c + "_status") not in data]
for m in missing:
errors.append(f"passing criterion {m!r} not answered (add {m}_status + {m}_justification)")
# 4. unknown criterion keys (likely typos)
known = set(criteria)
# tolerate a few additional metadata keys. The non-criterion
# fields ($schema, project_id, level, badge_url,
# project_page_url, evidence, audit) mirror the schema used
# by other RandomCodeSpace projects on bestpractices.dev so
# the BadgeApp's auto-ingest reads our intent unambiguously.
meta = {"_comment", "$schema", "name", "description", "homepage_url",
"repo_url", "license", "project_id", "level", "badge_url",
"project_page_url", "evidence", "audit",
"homepage_url_status", "homepage_url_justification"}
for k in data:
if k in meta:
continue
if k.endswith("_status"):
base = k[: -len("_status")]
if base not in known:
errors.append(f"unknown criterion key: {k} (typo? upstream rename?)")
if errors:
for e in errors:
print(f"ERROR: {e}", file=sys.stderr)
sys.exit(f"\n{len(errors)} error(s) in {path}")
met = sum(1 for k, v in data.items() if k.endswith("_status") and v == "Met")
unmet = sum(1 for k, v in data.items() if k.endswith("_status") and v == "Unmet")
na = sum(1 for k, v in data.items() if k.endswith("_status") and v == "N/A")
unk = sum(1 for k, v in data.items() if k.endswith("_status") and v == "?")
total = met + unmet + na + unk
print(f"OK: {path} parses cleanly")
print(f" {total} criteria answered: {met} Met / {unmet} Unmet / {na} N/A / {unk} ?")
PY