HS tests: scrape v0.9.90 upstream in full, flip silent stubs to loud SKIPs
- scrape-hs-upstream.py: new scraper walks /tmp/hs-upstream/test/**/*.js
and emits body-style records for all 1,496 v0.9.90 tests (up from 831).
Widens coverage into 66 previously-missing categories — templates,
reactivity, behavior, worker, classRef, make, throw, htmx, tailwind,
viewTransition, and more.
- build-hs-manifest.py + hyperscript-upstream-manifest.{json,md}:
coverage manifest tagging each upstream test with a status
(runnable / skip-listed / untranslated / missing) and block reason.
- generate-sx-tests.py: emit (error "SKIP (...)") instead of silent
(hs-cleanup!) no-op for both skip-listed tests and generator-
untranslatable bodies. Stub counter now reports both buckets.
- hyperscript-feature-audit-0.9.90.md: gap audit against the 0.9.90
spec; pre-0.9.90.json backs up prior 831-test snapshot.
New honest baseline (ocaml runner, test-hyperscript-behavioral):
831 -> 1,496 tests; 645 -> 1,013 passing (67.7% conformance).
483 failures split: 45 skip-list, 151 untranslated, 287 real.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
549
tests/playwright/build-hs-manifest.py
Normal file
549
tests/playwright/build-hs-manifest.py
Normal file
@@ -0,0 +1,549 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Build coverage manifest for _hyperscript v0.9.90 upstream tests.
|
||||
|
||||
Strategy:
|
||||
1. Parse every test file under /tmp/hs-upstream/test/ for `test(...)` and
|
||||
`test.describe(...)` calls (Playwright style).
|
||||
2. Stack nested describes into name prefixes (the describe is the category
|
||||
marker — but our JSON keys by filename basename, so we use that).
|
||||
3. Match against /root/rose-ash/spec/tests/hyperscript-upstream-tests.json.
|
||||
4. Classify complexity, status, block_reason.
|
||||
5. Emit manifest JSON + markdown summary.
|
||||
"""
|
||||
import json, os, re, sys
|
||||
from collections import Counter, defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
HS_ROOT = Path('/tmp/hs-upstream')
|
||||
TEST_ROOT = HS_ROOT / 'test'
|
||||
OUR_JSON = Path('/root/rose-ash/spec/tests/hyperscript-upstream-tests.json')
|
||||
GEN_PY = Path('/root/rose-ash/tests/playwright/generate-sx-tests.py')
|
||||
OUT_JSON = Path('/root/rose-ash/spec/tests/hyperscript-upstream-manifest.json')
|
||||
OUT_MD = Path('/root/rose-ash/spec/tests/hyperscript-upstream-manifest.md')
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Load SKIP_TEST_NAMES from generator
|
||||
# ---------------------------------------------------------------------------
|
||||
gen_src = GEN_PY.read_text()
|
||||
m = re.search(r'SKIP_TEST_NAMES\s*=\s*\{(.*?)\n\}', gen_src, re.DOTALL)
|
||||
assert m, "could not find SKIP_TEST_NAMES"
|
||||
skip_block = m.group(1)
|
||||
SKIP_NAMES = set()
|
||||
for line in skip_block.splitlines():
|
||||
line = line.strip().rstrip(',')
|
||||
if not line or line.startswith('#'):
|
||||
continue
|
||||
# line is a quoted string literal
|
||||
try:
|
||||
# carefully parse the first string literal on the line
|
||||
# use a tiny regex because names contain both single and double quotes-escaped
|
||||
mm = re.match(r'^(["\'])(.*)\1\s*,?\s*(#.*)?$', line)
|
||||
if mm:
|
||||
raw = mm.group(2)
|
||||
# python-style unescape: mainly \\' and \\"
|
||||
raw = raw.encode('utf-8').decode('unicode_escape')
|
||||
SKIP_NAMES.add(raw)
|
||||
except Exception:
|
||||
pass
|
||||
print(f"Loaded {len(SKIP_NAMES)} skip names", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Load our snapshot JSON
|
||||
# ---------------------------------------------------------------------------
|
||||
our_tests = json.load(OUR_JSON.open())
|
||||
our_map = {}
|
||||
def norm(s):
|
||||
return re.sub(r'\s+', ' ', s.strip()).lower()
|
||||
for t in our_tests:
|
||||
key = (t['category'], norm(t['name']))
|
||||
our_map[key] = t
|
||||
print(f"Our JSON: {len(our_tests)} tests, {len(our_map)} unique (cat,name)", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Extract upstream tests
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pattern: test(<quoted-string>, ... or test.describe(<quoted-string>, ...
|
||||
# We need to parse balanced parens to find the body snippet for classification.
|
||||
|
||||
SKIP_FILES = {'fixtures.js', 'global-setup.js', 'global-teardown.js',
|
||||
'entry.js', 'htmx-fixtures.js', 'playwright.config.js'}
|
||||
|
||||
def parse_string_literal(src, i):
|
||||
"""src[i] must be quote; return (value, next_i)."""
|
||||
q = src[i]
|
||||
assert q in ('"', "'", '`'), f"not a quote at {i}: {src[i-5:i+5]!r}"
|
||||
i += 1
|
||||
out = []
|
||||
while i < len(src):
|
||||
c = src[i]
|
||||
if c == '\\':
|
||||
nxt = src[i+1] if i+1 < len(src) else ''
|
||||
# preserve common escapes
|
||||
if nxt == 'n':
|
||||
out.append('\n'); i += 2
|
||||
elif nxt == 't':
|
||||
out.append('\t'); i += 2
|
||||
elif nxt == '\\':
|
||||
out.append('\\'); i += 2
|
||||
elif nxt == q:
|
||||
out.append(q); i += 2
|
||||
else:
|
||||
out.append(nxt); i += 2
|
||||
elif c == q:
|
||||
return ''.join(out), i + 1
|
||||
else:
|
||||
out.append(c); i += 1
|
||||
raise ValueError(f"unterminated string starting at {i}")
|
||||
|
||||
def find_matching_paren(src, start):
|
||||
"""start is index of '('; return index of matching ')'."""
|
||||
depth = 0
|
||||
i = start
|
||||
while i < len(src):
|
||||
c = src[i]
|
||||
if c in ('"', "'", '`'):
|
||||
_, i = parse_string_literal(src, i)
|
||||
continue
|
||||
if c == '/' and i+1 < len(src):
|
||||
# regex or comment
|
||||
if src[i+1] == '/':
|
||||
# line comment
|
||||
j = src.find('\n', i)
|
||||
i = len(src) if j == -1 else j + 1
|
||||
continue
|
||||
if src[i+1] == '*':
|
||||
j = src.find('*/', i)
|
||||
i = len(src) if j == -1 else j + 2
|
||||
continue
|
||||
# regex literal: heuristic — only treat as regex if preceded by an
|
||||
# operator/paren/comma/etc
|
||||
prev = ''
|
||||
k = i - 1
|
||||
while k >= 0 and src[k].isspace(): k -= 1
|
||||
prev = src[k] if k >= 0 else ''
|
||||
if prev in '(,;=!?&|:+-*/<>%^~{[' or prev == '' or (k >= 0 and src[k-5:k+1] == 'return' if k>=5 else False):
|
||||
# scan regex
|
||||
j = i + 1
|
||||
while j < len(src):
|
||||
cc = src[j]
|
||||
if cc == '\\':
|
||||
j += 2; continue
|
||||
if cc == '[':
|
||||
while j < len(src) and src[j] != ']':
|
||||
if src[j] == '\\': j += 2
|
||||
else: j += 1
|
||||
j += 1; continue
|
||||
if cc == '/':
|
||||
j += 1
|
||||
while j < len(src) and src[j].isalpha(): j += 1
|
||||
break
|
||||
if cc == '\n':
|
||||
break
|
||||
j += 1
|
||||
i = j
|
||||
continue
|
||||
if c == '(':
|
||||
depth += 1; i += 1
|
||||
elif c == ')':
|
||||
depth -= 1
|
||||
if depth == 0:
|
||||
return i
|
||||
i += 1
|
||||
else:
|
||||
i += 1
|
||||
return -1
|
||||
|
||||
def extract_tests_from_file(path):
|
||||
src = path.read_text()
|
||||
tests = [] # list of dicts: name, body, describe_name
|
||||
# Find all test.describe( and test( calls, tracking nesting.
|
||||
# We do this by regex+scan approach: iterate over the file and at each match
|
||||
# track current describe stack (via start/end of the describe's body).
|
||||
|
||||
# Simpler: find test.describe(...) blocks first, then within each find test(...)
|
||||
describes = [] # (name, start_idx_of_body_lbrace, end_idx)
|
||||
i = 0
|
||||
while i < len(src):
|
||||
m = re.search(r'test\.describe\s*\(\s*', src[i:])
|
||||
if not m:
|
||||
break
|
||||
start = i + m.start()
|
||||
paren = i + m.end() - 1
|
||||
# paren points at '(' after describe
|
||||
# parse quoted string right after
|
||||
j = paren + 1
|
||||
# skip whitespace
|
||||
while j < len(src) and src[j].isspace(): j += 1
|
||||
if j >= len(src) or src[j] not in ('"', "'", '`'):
|
||||
i = start + len(m.group(0))
|
||||
continue
|
||||
try:
|
||||
name, j = parse_string_literal(src, j)
|
||||
except Exception:
|
||||
i = start + len(m.group(0))
|
||||
continue
|
||||
# find matching close paren
|
||||
endp = find_matching_paren(src, paren)
|
||||
if endp == -1:
|
||||
i = start + len(m.group(0))
|
||||
continue
|
||||
describes.append((name, paren, endp))
|
||||
i = paren + 1
|
||||
|
||||
# Now extract tests; each test is `test(` or `test(<tag>,` not `test.describe`
|
||||
for dname, dstart, dend in describes:
|
||||
region = src[dstart:dend]
|
||||
# Iterate matches of test( inside region that are not test.describe
|
||||
k = 0
|
||||
while k < len(region):
|
||||
m = re.search(r'(?<![a-zA-Z0-9_.])test\s*\(', region[k:])
|
||||
if not m:
|
||||
break
|
||||
abs_paren = dstart + k + m.end() - 1
|
||||
# Skip if this is actually test.describe (shouldn't happen due to negative lookbehind)
|
||||
# Also skip test.skip, test.only etc
|
||||
# Skip if it's test.fixme / test.describe inside nested
|
||||
check_before = region[k + m.start(): k + m.end()]
|
||||
if 'describe' in check_before:
|
||||
k += m.end(); continue
|
||||
# parse name
|
||||
j = abs_paren + 1
|
||||
while j < len(src) and src[j].isspace(): j += 1
|
||||
if j >= len(src) or src[j] not in ('"', "'", '`'):
|
||||
k += m.end(); continue
|
||||
try:
|
||||
tname, j = parse_string_literal(src, j)
|
||||
except Exception:
|
||||
k += m.end(); continue
|
||||
endp = find_matching_paren(src, abs_paren)
|
||||
if endp == -1:
|
||||
k += m.end(); continue
|
||||
body = src[abs_paren:endp+1]
|
||||
tests.append({'name': tname, 'body': body, 'describe': dname})
|
||||
# advance past this test
|
||||
k = (endp + 1) - dstart
|
||||
return tests
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Extract from all files
|
||||
# ---------------------------------------------------------------------------
|
||||
upstream_tests = []
|
||||
for path in sorted(TEST_ROOT.rglob('*.js')):
|
||||
if path.name in SKIP_FILES:
|
||||
continue
|
||||
if 'vendor' in path.parts or 'node_modules' in path.parts or 'manual' in path.parts:
|
||||
continue
|
||||
rel = path.relative_to(HS_ROOT)
|
||||
category = path.stem # filename without .js
|
||||
# Special cases for organization
|
||||
tests = extract_tests_from_file(path)
|
||||
for t in tests:
|
||||
upstream_tests.append({
|
||||
'category': category,
|
||||
'name': t['name'],
|
||||
'body': t['body'],
|
||||
'describe': t['describe'],
|
||||
'upstream_file': str(rel),
|
||||
})
|
||||
|
||||
print(f"Extracted {len(upstream_tests)} upstream tests", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Identify post-snapshot additions
|
||||
# ---------------------------------------------------------------------------
|
||||
# A test is "post-snapshot" if it appears in a file modified in the window
|
||||
# AND not in our JSON. We'll compute based on the in_our_json check instead.
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Classify each test
|
||||
# ---------------------------------------------------------------------------
|
||||
def classify_complexity(body, name, existing):
|
||||
if existing:
|
||||
return existing.get('complexity', 'simple')
|
||||
b = body
|
||||
if 'sinon.' in b:
|
||||
return 'sinon'
|
||||
if '<script type="text/hyperscript"' in b or "<script type='text/hyperscript'" in b:
|
||||
return 'script-tag'
|
||||
if 'showModal' in b or 'HTMLDialogElement' in b or '.close()' in b and '<dialog' in b.lower():
|
||||
return 'dialog'
|
||||
if 'new Promise' in b or 'async function' in b or ' await ' in b and ('setTimeout' in b or 'resolve' in b):
|
||||
# many tests use async functions trivially; require an actual Promise / delay
|
||||
if 'new Promise' in b or 'setTimeout' in b:
|
||||
return 'promise'
|
||||
if 'evaluate' in b and 'html' not in b and 'find' not in b:
|
||||
return 'eval-only'
|
||||
if '_hyperscript.evaluate' in b or 'evaluate(' in b:
|
||||
return 'evaluate'
|
||||
if 'runCmd' in b or "_hyperscript('" in b or '_hyperscript("' in b:
|
||||
return 'run-eval'
|
||||
return 'simple'
|
||||
|
||||
def classify_block_reason(body, name, category):
|
||||
"""Return (block_reason, unlocks_hint) or (None, None)."""
|
||||
# sinon
|
||||
if 'sinon.' in body:
|
||||
return 'needs-sinon'
|
||||
# script tag
|
||||
if '<script type="text/hyperscript"' in body or "<script type='text/hyperscript'" in body:
|
||||
return 'needs-script-tag'
|
||||
if '<script type="text/hypertemplate"' in body or "<script type='text/hypertemplate'" in body:
|
||||
return 'needs-script-tag'
|
||||
# dialog
|
||||
if 'showModal' in body or '<dialog' in body.lower():
|
||||
return 'needs-dialog-api'
|
||||
# worker
|
||||
if 'Worker(' in body or 'worker.js' in body:
|
||||
return 'needs-worker'
|
||||
# eventsource
|
||||
if 'EventSource' in body or 'eventsource' in body.lower():
|
||||
return 'needs-eventsource'
|
||||
# Server-sent events
|
||||
if 'WebSocket' in body or category == 'socket':
|
||||
return 'needs-websocket'
|
||||
# mutation observers / focus/blur bubbling / transition events
|
||||
if 'MutationObserver' in body or 'mutationobserver' in body.lower():
|
||||
return 'needs-dom-mutation-observer'
|
||||
if 'transitionend' in body or 'transition-' in body or 'viewTransition' in body or category == 'transition' or category == 'viewTransition':
|
||||
return 'needs-css-transitions'
|
||||
if '.focus()' in body or '.blur()' in body or 'focusin' in body or 'focusout' in body:
|
||||
if category == 'focus':
|
||||
return 'needs-dom-focus'
|
||||
if 'resize' in body and 'ResizeObserver' in body:
|
||||
return 'needs-resize-observer'
|
||||
if 'IntersectionObserver' in body:
|
||||
return 'needs-intersection-observer'
|
||||
# patterns in hs strings
|
||||
hs_strings = re.findall(r"_=(['\"])([^'\"]*)\1", body) + re.findall(r"['\"]on\s+[^'\"]+['\"]", body)
|
||||
# Grab any quoted hs fragment
|
||||
all_quotes = re.findall(r"['\"`]([^'\"`]{3,})['\"`]", body)
|
||||
combined = ' '.join(all_quotes)
|
||||
# positional expr `<sel/>`
|
||||
if re.search(r'<[^<>]*\/>', combined) and not '/>' in body.split('expect')[0][:1000]:
|
||||
# detect <sel/>
|
||||
if re.search(r"<[a-zA-Z#.][^<>]*\/>", combined):
|
||||
return 'needs-pattern:<sel/>'
|
||||
# template interpolation ${}
|
||||
if '${' in combined:
|
||||
return 'needs-pattern:${}'
|
||||
# {css}
|
||||
if re.search(r'\{[^{}]*[:\s][^{}]+\}', combined):
|
||||
# css-like block literal inside HS string
|
||||
if re.search(r"'\s*\{", combined) or re.search(r'"\s*\{', combined):
|
||||
pass # too noisy
|
||||
if re.search(r'\[@[\w-]+', combined):
|
||||
return 'needs-pattern:[@attr]'
|
||||
# DOM required
|
||||
if 'html(' in body and 'find(' in body:
|
||||
return 'translation-TBD'
|
||||
return 'translation-TBD'
|
||||
|
||||
# Files touched in post-snapshot window
|
||||
POST_SNAPSHOT_FILES = set()
|
||||
import subprocess
|
||||
out = subprocess.check_output(
|
||||
['git', 'log', '--after=2026-04-09', '--before=2026-04-14',
|
||||
'--name-only', '--pretty=format:', '--', 'test/'],
|
||||
cwd=str(HS_ROOT)
|
||||
).decode()
|
||||
for line in out.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith('test/') and line.endswith('.js'):
|
||||
POST_SNAPSHOT_FILES.add(line)
|
||||
print(f"Post-snapshot touched files: {len(POST_SNAPSHOT_FILES)}", file=sys.stderr)
|
||||
|
||||
OUR_CATEGORIES = set(t['category'] for t in our_tests)
|
||||
|
||||
# Build manifest
|
||||
manifest = []
|
||||
for t in upstream_tests:
|
||||
key = (t['category'], norm(t['name']))
|
||||
existing = our_map.get(key)
|
||||
in_our_json = existing is not None
|
||||
complexity = classify_complexity(t['body'], t['name'], existing)
|
||||
# Status
|
||||
if not in_our_json:
|
||||
status = 'missing'
|
||||
elif t['name'] in SKIP_NAMES:
|
||||
status = 'skip-listed'
|
||||
else:
|
||||
status = 'runnable'
|
||||
# Block reason
|
||||
if status == 'runnable':
|
||||
block_reason = None
|
||||
else:
|
||||
# First check for concrete infra gaps (sinon, worker, patterns, etc.)
|
||||
br = classify_block_reason(t['body'], t['name'], t['category'])
|
||||
if br != 'translation-TBD':
|
||||
block_reason = br
|
||||
elif status == 'missing':
|
||||
# No concrete infra gap — categorise by why it's missing
|
||||
if t['category'] not in OUR_CATEGORIES:
|
||||
block_reason = 'unscraped-category'
|
||||
elif t['upstream_file'] in POST_SNAPSHOT_FILES:
|
||||
block_reason = 'added-post-snapshot'
|
||||
else:
|
||||
block_reason = 'unscraped-in-known-category'
|
||||
else:
|
||||
block_reason = br
|
||||
manifest.append({
|
||||
'category': t['category'],
|
||||
'name': t['name'],
|
||||
'complexity': complexity,
|
||||
'status': status,
|
||||
'block_reason': block_reason,
|
||||
'upstream_file': t['upstream_file'],
|
||||
'in_our_json': in_our_json,
|
||||
})
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# De-duplicate (a test could be collected twice if nested describes overlap)
|
||||
# ---------------------------------------------------------------------------
|
||||
seen = set()
|
||||
dedup = []
|
||||
for m in manifest:
|
||||
k = (m['upstream_file'], m['category'], m['name'])
|
||||
if k in seen:
|
||||
continue
|
||||
seen.add(k)
|
||||
dedup.append(m)
|
||||
manifest = dedup
|
||||
print(f"Final manifest: {len(manifest)} tests", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Write JSON
|
||||
# ---------------------------------------------------------------------------
|
||||
with OUT_JSON.open('w') as f:
|
||||
json.dump(manifest, f, indent=2, ensure_ascii=False)
|
||||
f.write('\n')
|
||||
print(f"Wrote {OUT_JSON}", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stats + markdown
|
||||
# ---------------------------------------------------------------------------
|
||||
total = len(manifest)
|
||||
runnable = sum(1 for m in manifest if m['status'] == 'runnable')
|
||||
skip_listed = sum(1 for m in manifest if m['status'] == 'skip-listed')
|
||||
missing = sum(1 for m in manifest if m['status'] == 'missing')
|
||||
|
||||
cat_stats = defaultdict(lambda: {'total': 0, 'runnable': 0, 'skip-listed': 0, 'missing': 0})
|
||||
for m in manifest:
|
||||
cat_stats[m['category']]['total'] += 1
|
||||
cat_stats[m['category']][m['status']] += 1
|
||||
|
||||
# Unlock tables
|
||||
missing_by_reason = Counter()
|
||||
example_by_reason = {}
|
||||
for m in manifest:
|
||||
if m['status'] == 'missing':
|
||||
r = m['block_reason'] or 'translation-TBD'
|
||||
missing_by_reason[r] += 1
|
||||
if r not in example_by_reason:
|
||||
example_by_reason[r] = f"{m['category']} / {m['name']}"
|
||||
|
||||
skip_by_reason = Counter()
|
||||
skip_example = {}
|
||||
for m in manifest:
|
||||
if m['status'] == 'skip-listed':
|
||||
r = m['block_reason'] or 'translation-TBD'
|
||||
skip_by_reason[r] += 1
|
||||
if r not in skip_example:
|
||||
skip_example[r] = f"{m['category']} / {m['name']}"
|
||||
|
||||
EFFORT = {
|
||||
'needs-sinon': 'medium — build fetch-mock shim keyed by URL/response',
|
||||
'needs-script-tag': 'medium — emit <script type="text/hyperscript"> wrapper in generator',
|
||||
'needs-dialog-api': 'low — stub showModal/close on HTMLDialogElement in fixture DOM',
|
||||
'needs-worker': 'high — Web Worker host adapter',
|
||||
'needs-eventsource': 'high — EventSource mock + streaming',
|
||||
'needs-websocket': 'high — WebSocket mock server',
|
||||
'needs-dom-mutation-observer': 'medium — hook MutationObserver into event queue',
|
||||
'needs-css-transitions': 'medium — transitionend event dispatch in fixtures',
|
||||
'needs-dom-focus': 'low — focus/blur dispatch in fixture browser',
|
||||
'needs-resize-observer': 'medium — ResizeObserver mock',
|
||||
'needs-intersection-observer': 'medium — IntersectionObserver mock',
|
||||
'needs-pattern:<sel/>': 'low — parser rule for <sel/> positional expr',
|
||||
'needs-pattern:${}': 'low — template string interpolation in HS parser',
|
||||
'needs-pattern:[@attr]': 'low — attribute-ref parser rule',
|
||||
'needs-dom': 'medium — fixture DOM extensions',
|
||||
'added-post-snapshot': 'low — re-scrape upstream, bump JSON snapshot',
|
||||
'unscraped-category': 'low — extend scraper to cover these upstream files',
|
||||
'unscraped-in-known-category': 'low — re-scrape; file was walked but these cases missed',
|
||||
'translation-TBD': 'unknown — needs case-by-case generator work',
|
||||
}
|
||||
|
||||
md = []
|
||||
md.append("# _hyperscript v0.9.90 upstream test coverage manifest")
|
||||
md.append("")
|
||||
md.append(f"- Upstream tag: `v0.9.90` (commit `a13de2ca`, 2026-04-13)")
|
||||
md.append(f"- Our snapshot JSON: `spec/tests/hyperscript-upstream-tests.json` (831 tests, scraped 2026-04-09)")
|
||||
md.append(f"- Total upstream tests: **{total}**")
|
||||
md.append(f"- Runnable (present + not skip-listed): **{runnable}** ({runnable*100/total:.1f}%)")
|
||||
md.append(f"- Skip-listed (present but guarded in generator): **{skip_listed}** ({skip_listed*100/total:.1f}%)")
|
||||
md.append(f"- Missing from our snapshot: **{missing}** ({missing*100/total:.1f}%)")
|
||||
md.append("")
|
||||
md.append(f"Current conformance: runnable / total = **{runnable}/{total} = {runnable*100/total:.1f}%**.")
|
||||
md.append("")
|
||||
md.append("## Per-category")
|
||||
md.append("")
|
||||
md.append("| Category | Upstream | Runnable | Skip-listed | Missing |")
|
||||
md.append("|---|---:|---:|---:|---:|")
|
||||
for cat in sorted(cat_stats.keys()):
|
||||
s = cat_stats[cat]
|
||||
md.append(f"| {cat} | {s['total']} | {s['runnable']} | {s['skip-listed']} | {s['missing']} |")
|
||||
md.append(f"| **TOTAL** | **{total}** | **{runnable}** | **{skip_listed}** | **{missing}** |")
|
||||
md.append("")
|
||||
md.append("## What unlocks how many — MISSING tests by block_reason")
|
||||
md.append("")
|
||||
md.append("| Block reason | Missing | Example | Est. effort |")
|
||||
md.append("|---|---:|---|---|")
|
||||
for r, n in missing_by_reason.most_common():
|
||||
ex = example_by_reason.get(r, '')
|
||||
if len(ex) > 70: ex = ex[:67] + '...'
|
||||
md.append(f"| `{r}` | {n} | {ex} | {EFFORT.get(r, 'unknown')} |")
|
||||
md.append("")
|
||||
md.append("## Skip-listed tests by block_reason")
|
||||
md.append("")
|
||||
md.append("| Block reason | Skipped | Example | Est. effort |")
|
||||
md.append("|---|---:|---|---|")
|
||||
for r, n in skip_by_reason.most_common():
|
||||
ex = skip_example.get(r, '')
|
||||
if len(ex) > 70: ex = ex[:67] + '...'
|
||||
md.append(f"| `{r}` | {n} | {ex} | {EFFORT.get(r, 'unknown')} |")
|
||||
md.append("")
|
||||
md.append("## How this was built")
|
||||
md.append("")
|
||||
md.append("1. `git clone --depth 1 --branch v0.9.90 https://github.com/bigskysoftware/_hyperscript /tmp/hs-upstream`; `git fetch --unshallow` for dated history.")
|
||||
md.append("2. Walked `test/` (excluding `vendor/`, `manual/`, `fixtures.js`, `global-*.js`, `entry.js`, `htmx-fixtures.js`, `playwright.config.js`).")
|
||||
md.append("3. For each `.js` file, a small Python parser finds every `test.describe(...)` block, then every `test(...)` within it — balanced-paren scan that ignores strings, regex literals, line/block comments.")
|
||||
md.append("4. Category = filename stem (e.g. `add.js` → `add`). Test name = the first string literal argument of `test(...)`.")
|
||||
md.append("5. Matched each upstream test against `spec/tests/hyperscript-upstream-tests.json` using `(category, name-normalized)` keys (whitespace-collapsed lowercase). Copied `complexity` when found; inferred it otherwise from body content (sinon./script-tag/dialog/Promise/evaluate).")
|
||||
md.append("6. `status = runnable` if present and name not in generator's `SKIP_TEST_NAMES`; `skip-listed` if present and in that set; `missing` otherwise.")
|
||||
md.append("7. `block_reason` classified from body content — sinon./script tag/dialog/worker/eventsource/WebSocket/MutationObserver/transition/focus/ResizeObserver patterns or `<sel/>`/`${}`/`[@attr]` HS syntax. Missing tests in files touched between 2026-04-09 and 2026-04-14 (`git log --after --before -- test/`) are tagged `added-post-snapshot`.")
|
||||
md.append("8. Regenerate: `python3 /tmp/build_manifest.py`.")
|
||||
md.append("")
|
||||
md.append("## Untranslated caveat")
|
||||
md.append("")
|
||||
md.append("The markdown reports `runnable = present + not skip-listed`. Empirical baseline is 645 pass / 109 fail / 77 skip on 831 present tests. The ~109 failures are in-scope but reveal implementation gaps; they are not statically identifiable from upstream source without running the generator. This manifest therefore does not surface an `untranslated` bucket — treat the 109 empirical fails as the lower bound of that bucket inside the `runnable` count.")
|
||||
md.append("")
|
||||
|
||||
OUT_MD.write_text('\n'.join(md))
|
||||
print(f"Wrote {OUT_MD}", file=sys.stderr)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Print summary
|
||||
# ---------------------------------------------------------------------------
|
||||
print()
|
||||
print(f"TOTAL upstream: {total}")
|
||||
print(f" runnable: {runnable}")
|
||||
print(f" skip-listed: {skip_listed}")
|
||||
print(f" missing: {missing}")
|
||||
print(f" conformance: {runnable*100/total:.1f}%")
|
||||
print()
|
||||
print("Top missing by block_reason:")
|
||||
for r, n in missing_by_reason.most_common(8):
|
||||
print(f" {r:35s} {n:4d} ({example_by_reason.get(r,'')[:50]})")
|
||||
print()
|
||||
print("Skip-listed by block_reason:")
|
||||
for r, n in skip_by_reason.most_common(8):
|
||||
print(f" {r:35s} {n:4d}")
|
||||
@@ -299,9 +299,22 @@ def parse_action(action, ref):
|
||||
exprs.append(f'(dom-dispatch {ref(m.group(1))} "click" nil)')
|
||||
continue
|
||||
|
||||
m = re.match(r'(\w+)\.dispatchEvent\(new CustomEvent\("([\w:.-]+)"', part)
|
||||
m = re.match(r'(\w+)\.dispatchEvent\(new CustomEvent\("([\w:.-]+)"\s*(?:,\s*\{(.*)\})?', part)
|
||||
if m:
|
||||
exprs.append(f'(dom-dispatch {ref(m.group(1))} "{m.group(2)}" nil)')
|
||||
detail_expr = 'nil'
|
||||
body = m.group(3)
|
||||
if body:
|
||||
dm = re.search(r'detail:\s*"([^"]*)"', body)
|
||||
if dm:
|
||||
detail_expr = f'"{dm.group(1)}"'
|
||||
else:
|
||||
dm = re.search(r'detail:\s*\{([^}]*)\}', body)
|
||||
if dm:
|
||||
pairs = re.findall(r'(\w+):\s*"([^"]*)"', dm.group(1))
|
||||
if pairs:
|
||||
items = ' '.join(f':{k} "{v}"' for k, v in pairs)
|
||||
detail_expr = '{' + items + '}'
|
||||
exprs.append(f'(dom-dispatch {ref(m.group(1))} "{m.group(2)}" {detail_expr})')
|
||||
continue
|
||||
|
||||
m = re.match(r'(\w+)\.setAttribute\("([\w-]+)",\s*"([^"]*)"\)', part)
|
||||
@@ -844,13 +857,27 @@ def emit_element_setup(lines, elements, var_names, root='(dom-body)', indent='
|
||||
|
||||
|
||||
def emit_skip_test(test):
|
||||
"""Emit a trivial passing deftest for tests that depend on unimplemented
|
||||
hyperscript features. Keeps coverage in the source JSON but lets the run
|
||||
move on."""
|
||||
"""Emit a deftest that raises a SKIP error for tests depending on
|
||||
unimplemented hyperscript features. The test runner records these as
|
||||
failures so the pass rate reflects real coverage — grep the run output
|
||||
for 'SKIP:' to enumerate them."""
|
||||
name = sx_name(test['name'])
|
||||
raw = test['name'].replace('"', "'")
|
||||
return (
|
||||
f' (deftest "{name}"\n'
|
||||
f' (hs-cleanup!))'
|
||||
f' (error "SKIP (skip-list): {raw}"))'
|
||||
)
|
||||
|
||||
|
||||
def emit_untranslatable_test(test):
|
||||
"""Emit a deftest that raises a SKIP error for tests whose upstream body
|
||||
our generator could not translate to SX. Same loud-fail semantics as
|
||||
emit_skip_test; different tag so we can tell the two buckets apart."""
|
||||
name = sx_name(test['name'])
|
||||
raw = test['name'].replace('"', "'")
|
||||
return (
|
||||
f' (deftest "{name}"\n'
|
||||
f' (error "SKIP (untranslated): {raw}"))'
|
||||
)
|
||||
|
||||
|
||||
@@ -1486,10 +1513,13 @@ for cat, tests in categories.items():
|
||||
output.append(sx)
|
||||
total += 1
|
||||
cat_gen += 1
|
||||
# SKIP emissions still go through generate_test() → emit_skip_test;
|
||||
# detect them here so the counter reports real coverage.
|
||||
if 'SKIP (' in sx:
|
||||
cat_stub += 1
|
||||
cat_gen -= 1
|
||||
else:
|
||||
safe_name = t['name'].replace('"', "'")
|
||||
output.append(f' (deftest "{safe_name}"')
|
||||
output.append(f' (hs-cleanup!))')
|
||||
output.append(emit_untranslatable_test(t))
|
||||
total += 1
|
||||
cat_stub += 1
|
||||
|
||||
|
||||
297
tests/playwright/scrape-hs-upstream.py
Normal file
297
tests/playwright/scrape-hs-upstream.py
Normal file
@@ -0,0 +1,297 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Scrape every test from _hyperscript v0.9.90 upstream into our JSON format.
|
||||
|
||||
Walks /tmp/hs-upstream/test/**/*.js, parses `test.describe(...)` and `test(...)`
|
||||
calls with balanced-paren scanning, extracts the arrow function body, and the
|
||||
first html(...) argument. Emits /root/rose-ash/spec/tests/hyperscript-upstream-tests.json
|
||||
in body-style Playwright format (matching existing body entries).
|
||||
"""
|
||||
import json, os, re, sys
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
|
||||
HS_ROOT = Path('/tmp/hs-upstream')
|
||||
TEST_ROOT = HS_ROOT / 'test'
|
||||
OUT_JSON = Path('/root/rose-ash/spec/tests/hyperscript-upstream-tests.json')
|
||||
BACKUP = Path('/root/rose-ash/spec/tests/hyperscript-upstream-tests.pre-0.9.90.json')
|
||||
|
||||
SKIP_FILES = {'fixtures.js', 'global-setup.js', 'global-teardown.js',
|
||||
'entry.js', 'htmx-fixtures.js', 'playwright.config.js'}
|
||||
|
||||
# --- tokeniser-ish balanced-paren scanner -----------------------------------
|
||||
|
||||
def parse_string_literal(src, i):
|
||||
"""src[i] must be quote; return (value, next_i). Handles template literals with ${...}."""
|
||||
q = src[i]
|
||||
i += 1
|
||||
out = []
|
||||
while i < len(src):
|
||||
c = src[i]
|
||||
if c == '\\':
|
||||
nxt = src[i+1] if i+1 < len(src) else ''
|
||||
if nxt == 'n': out.append('\n'); i += 2
|
||||
elif nxt == 't': out.append('\t'); i += 2
|
||||
elif nxt == 'r': out.append('\r'); i += 2
|
||||
elif nxt == '\\': out.append('\\'); i += 2
|
||||
elif nxt == q: out.append(q); i += 2
|
||||
else:
|
||||
out.append(nxt); i += 2
|
||||
elif c == q:
|
||||
return ''.join(out), i + 1
|
||||
elif q == '`' and c == '$' and i+1 < len(src) and src[i+1] == '{':
|
||||
# template interpolation — skip balanced braces
|
||||
out.append('${'); i += 2
|
||||
depth = 1
|
||||
while i < len(src) and depth > 0:
|
||||
cc = src[i]
|
||||
if cc in ('"', "'", '`'):
|
||||
_, i = parse_string_literal(src, i)
|
||||
continue
|
||||
if cc == '{': depth += 1
|
||||
elif cc == '}': depth -= 1
|
||||
out.append(cc); i += 1
|
||||
else:
|
||||
out.append(c); i += 1
|
||||
raise ValueError("unterminated string")
|
||||
|
||||
def skip_comment_or_regex(src, i):
|
||||
"""If src[i:] starts a // comment, /* block */, or regex literal, return next index. Else None."""
|
||||
if src[i] != '/' or i+1 >= len(src):
|
||||
return None
|
||||
nxt = src[i+1]
|
||||
if nxt == '/':
|
||||
j = src.find('\n', i)
|
||||
return len(src) if j == -1 else j + 1
|
||||
if nxt == '*':
|
||||
j = src.find('*/', i)
|
||||
return len(src) if j == -1 else j + 2
|
||||
# regex heuristic: preceding non-space char is operator-ish
|
||||
k = i - 1
|
||||
while k >= 0 and src[k].isspace(): k -= 1
|
||||
prev = src[k] if k >= 0 else ''
|
||||
if prev and prev not in '(,;=!?&|:+-*/<>%^~{[\n' and prev not in '' :
|
||||
# not regex context — looks like division
|
||||
return None
|
||||
j = i + 1
|
||||
while j < len(src):
|
||||
cc = src[j]
|
||||
if cc == '\\':
|
||||
j += 2; continue
|
||||
if cc == '[':
|
||||
j += 1
|
||||
while j < len(src) and src[j] != ']':
|
||||
if src[j] == '\\': j += 2
|
||||
else: j += 1
|
||||
if j < len(src): j += 1
|
||||
continue
|
||||
if cc == '/':
|
||||
j += 1
|
||||
while j < len(src) and src[j].isalpha(): j += 1
|
||||
return j
|
||||
if cc == '\n':
|
||||
return None
|
||||
j += 1
|
||||
return None
|
||||
|
||||
def find_matching(src, start, open_c='(', close_c=')'):
|
||||
"""start is index of open_c; return index of matching close_c."""
|
||||
depth = 0
|
||||
i = start
|
||||
while i < len(src):
|
||||
c = src[i]
|
||||
if c in ('"', "'", '`'):
|
||||
try:
|
||||
_, i = parse_string_literal(src, i)
|
||||
except ValueError:
|
||||
return -1
|
||||
continue
|
||||
j = skip_comment_or_regex(src, i)
|
||||
if j is not None:
|
||||
i = j
|
||||
continue
|
||||
if c == open_c:
|
||||
depth += 1; i += 1
|
||||
elif c == close_c:
|
||||
depth -= 1
|
||||
if depth == 0:
|
||||
return i
|
||||
i += 1
|
||||
else:
|
||||
i += 1
|
||||
return -1
|
||||
|
||||
# --- test extraction --------------------------------------------------------
|
||||
|
||||
def extract_arrow_body(call_src):
|
||||
"""Given the full `(...args...)` source of test(name, fn), extract the fn body.
|
||||
Returns the content between { and } of the arrow function body, or None."""
|
||||
# Find the arrow
|
||||
arrow = call_src.find('=>')
|
||||
if arrow == -1:
|
||||
return None
|
||||
# Find the first { after =>
|
||||
j = arrow + 2
|
||||
while j < len(call_src) and call_src[j].isspace(): j += 1
|
||||
if j >= len(call_src) or call_src[j] != '{':
|
||||
return None
|
||||
end = find_matching(call_src, j, '{', '}')
|
||||
if end == -1:
|
||||
return None
|
||||
body = call_src[j+1:end]
|
||||
# Strip leading newline + common indentation (for readability)
|
||||
return body
|
||||
|
||||
def extract_first_html(body):
|
||||
"""Find the first html(...) call in body and extract its literal string argument.
|
||||
Supports html("x" + "y"), html(`x`), html("x"). Returns '' if not findable."""
|
||||
m = re.search(r'\bhtml\s*\(', body)
|
||||
if not m:
|
||||
return ''
|
||||
lp = m.end() - 1
|
||||
rp = find_matching(body, lp, '(', ')')
|
||||
if rp == -1:
|
||||
return ''
|
||||
args = body[lp+1:rp].strip()
|
||||
# Args should be a string or concatenation of strings.
|
||||
parts = []
|
||||
i = 0
|
||||
while i < len(args):
|
||||
c = args[i]
|
||||
if c.isspace() or c == '+':
|
||||
i += 1; continue
|
||||
if c in ('"', "'", '`'):
|
||||
try:
|
||||
val, i = parse_string_literal(args, i)
|
||||
parts.append(val)
|
||||
except ValueError:
|
||||
return ''
|
||||
else:
|
||||
# not a pure string concatenation — bail
|
||||
return ''
|
||||
return ''.join(parts)
|
||||
|
||||
def extract_tests_from_file(path, rel_category):
|
||||
src = path.read_text()
|
||||
# Find every test( call (not test.describe, not test.skip.)
|
||||
tests = []
|
||||
i = 0
|
||||
while i < len(src):
|
||||
m = re.search(r'(?<![a-zA-Z0-9_$.])test\s*\(', src[i:])
|
||||
if not m:
|
||||
break
|
||||
abs_start = i + m.start()
|
||||
abs_paren = i + m.end() - 1
|
||||
# Ensure this is not test.describe / test.only / test.skip
|
||||
# The lookbehind prevents .describe case. But test( is fine.
|
||||
# parse name arg
|
||||
j = abs_paren + 1
|
||||
while j < len(src) and src[j].isspace(): j += 1
|
||||
if j >= len(src) or src[j] not in ('"', "'", '`'):
|
||||
i = abs_paren + 1
|
||||
continue
|
||||
try:
|
||||
tname, j2 = parse_string_literal(src, j)
|
||||
except ValueError:
|
||||
i = abs_paren + 1
|
||||
continue
|
||||
endp = find_matching(src, abs_paren, '(', ')')
|
||||
if endp == -1:
|
||||
i = abs_paren + 1
|
||||
continue
|
||||
call_src = src[abs_paren:endp+1]
|
||||
body = extract_arrow_body(call_src)
|
||||
if body is None:
|
||||
i = endp + 1
|
||||
continue
|
||||
html = extract_first_html(body)
|
||||
tests.append({
|
||||
'category': rel_category,
|
||||
'name': tname,
|
||||
'html': html,
|
||||
'body': body,
|
||||
'async': True,
|
||||
'complexity': classify_complexity(body),
|
||||
})
|
||||
i = endp + 1
|
||||
return tests
|
||||
|
||||
def classify_complexity(body):
|
||||
if 'sinon.' in body:
|
||||
return 'sinon'
|
||||
if '<script type="text/hyperscript"' in body or "<script type='text/hyperscript'" in body:
|
||||
return 'script-tag'
|
||||
if '<script type="text/hypertemplate"' in body or "<script type='text/hypertemplate'" in body:
|
||||
return 'script-tag'
|
||||
if 'showModal' in body or '<dialog' in body.lower():
|
||||
return 'dialog'
|
||||
if 'new Promise' in body or '.resolves' in body or 'Promise.' in body:
|
||||
return 'promise'
|
||||
if 'html(' not in body:
|
||||
if '_hyperscript.evaluate' in body or re.search(r'\bevaluate\s*\(', body):
|
||||
return 'eval-only'
|
||||
if re.search(r'\brun\s*\(', body):
|
||||
return 'run-eval'
|
||||
return 'simple'
|
||||
|
||||
# --- main -------------------------------------------------------------------
|
||||
|
||||
def rel_category(path):
|
||||
"""For test/commands/foo.js, test/features/foo.js → 'foo'.
|
||||
For test/core/foo.js → 'core/foo'. test/templates/foo.js → 'templates/foo' etc."""
|
||||
rel = path.relative_to(TEST_ROOT)
|
||||
parts = rel.parts
|
||||
stem = path.stem
|
||||
if len(parts) == 1:
|
||||
# Top-level — shouldn't happen since all tests are in subdirs
|
||||
return stem
|
||||
top = parts[0]
|
||||
if top in ('commands', 'features'):
|
||||
return stem
|
||||
# Single subdir like core/api.js → 'core/api'
|
||||
if len(parts) == 2:
|
||||
return f'{top}/{stem}'
|
||||
# Deeper nesting — join all parts except final extension
|
||||
return '/'.join(parts[:-1] + (stem,))
|
||||
|
||||
def main():
|
||||
# Back up existing JSON
|
||||
if OUT_JSON.exists() and not BACKUP.exists():
|
||||
import shutil
|
||||
shutil.copy2(OUT_JSON, BACKUP)
|
||||
print(f'Backed up existing JSON to {BACKUP}', file=sys.stderr)
|
||||
|
||||
all_tests = []
|
||||
file_count = 0
|
||||
for path in sorted(TEST_ROOT.rglob('*.js')):
|
||||
if path.name in SKIP_FILES:
|
||||
continue
|
||||
if any(p in ('vendor', 'node_modules', 'manual') for p in path.parts):
|
||||
continue
|
||||
cat = rel_category(path)
|
||||
tests = extract_tests_from_file(path, cat)
|
||||
all_tests.extend(tests)
|
||||
file_count += 1
|
||||
|
||||
# Dedup by (category, name) — stable
|
||||
seen = {}
|
||||
for t in all_tests:
|
||||
key = (t['category'], t['name'])
|
||||
if key not in seen:
|
||||
seen[key] = t
|
||||
deduped = list(seen.values())
|
||||
deduped.sort(key=lambda t: (t['category'], t['name']))
|
||||
|
||||
# Stats
|
||||
cat_counts = Counter(t['category'] for t in deduped)
|
||||
print(f'Scanned {file_count} files, extracted {len(all_tests)} tests ({len(deduped)} unique)')
|
||||
print(f'Categories: {len(cat_counts)}')
|
||||
for cat, n in cat_counts.most_common():
|
||||
print(f' {cat:40s} {n:4d}')
|
||||
|
||||
with OUT_JSON.open('w') as f:
|
||||
json.dump(deduped, f, indent=2, ensure_ascii=False)
|
||||
f.write('\n')
|
||||
print(f'\nWrote {OUT_JSON} ({len(deduped)} tests)')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user