diff --git a/scripts/extract-upstream-tests.py b/scripts/extract-upstream-tests.py new file mode 100755 index 00000000..a408dfee --- /dev/null +++ b/scripts/extract-upstream-tests.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +"""Extract _hyperscript upstream tests into spec/tests/hyperscript-upstream-tests.json. + +Walks /tmp/hs-upstream/test/**/*.js, finds every test('name', ...) call, extracts: + - category from file path (test/core/tokenizer.js → "core/tokenizer") + - name from first arg + - body from arrow function body (between outer { and }) + - html from preceding test.use({html: '...'}) if any + - async from whether the arrow function is async + - complexity heuristic — eval-only / event-driven / dom + +Output: spec/tests/hyperscript-upstream-tests.json (overwrites) + +Run after: cd /tmp && git clone --depth 1 https://github.com/bigskysoftware/_hyperscript hs-upstream +""" +import json +import os +import re +from pathlib import Path + +UPSTREAM = Path('/tmp/hs-upstream/test') +OUT = Path(__file__).parent.parent / 'spec/tests/hyperscript-upstream-tests.json' + + +def find_matching_brace(src, open_idx): + """Return index of matching close brace for { at open_idx. Handles strings/comments.""" + assert src[open_idx] == '{' + depth = 0 + i = open_idx + n = len(src) + while i < n: + c = src[i] + if c == '{': + depth += 1 + elif c == '}': + depth -= 1 + if depth == 0: + return i + elif c == '"' or c == "'" or c == '`': + # skip string + quote = c + i += 1 + while i < n and src[i] != quote: + if src[i] == '\\': + i += 2 + continue + if quote == '`' and src[i] == '$' and i + 1 < n and src[i+1] == '{': + # template literal interpolation — skip nested braces + nested = find_matching_brace(src, i + 1) + i = nested + 1 + continue + i += 1 + elif c == '/' and i + 1 < n: + nxt = src[i+1] + if nxt == '/': + # line comment + while i < n and src[i] != '\n': + i += 1 + continue + elif nxt == '*': + # block comment + i += 2 + while i < n - 1 and not (src[i] == '*' and src[i+1] == '/'): + i += 1 + i += 1 + i += 1 + raise ValueError(f"unbalanced brace at {open_idx}") + + +def extract_tests(src, category): + """Find test('name', async/non-async ({...}) => { body }) patterns.""" + tests = [] + i = 0 + n = len(src) + test_re = re.compile(r"\btest\s*\(\s*(['\"])((?:[^\\]|\\.)*?)\1\s*,\s*(async\s+)?(\([^)]*\))\s*=>\s*\{") + for m in test_re.finditer(src): + name = m.group(2) + # Unescape quotes + name = name.replace("\\'", "'").replace('\\"', '"').replace('\\\\', '\\') + is_async = m.group(3) is not None + body_open = src.index('{', m.end() - 1) + try: + body_close = find_matching_brace(src, body_open) + except ValueError: + continue + body = src[body_open + 1:body_close] + # Heuristic complexity classification + complexity = 'eval-only' + if 'html(' in body or 'find(' in body: + complexity = 'dom' + if 'click(' in body or 'dispatch' in body: + complexity = 'event-driven' + tests.append({ + 'category': category, + 'name': name, + 'html': '', + 'body': body, + 'async': is_async, + 'complexity': complexity, + }) + return tests + + +def main(): + import sys + if not UPSTREAM.exists(): + print(f"ERROR: {UPSTREAM} not found. Clone first:") + print(" git clone --depth 1 https://github.com/bigskysoftware/_hyperscript /tmp/hs-upstream") + return 1 + + merge_mode = '--replace' not in sys.argv + + all_tests = [] + skipped_files = [] + + for path in sorted(UPSTREAM.rglob('*.js')): + if path.name in {'fixtures.js', 'entry.js', 'global-setup.js', 'global-teardown.js', + 'htmx-fixtures.js', 'playwright.config.js'}: + continue + + rel = path.relative_to(UPSTREAM) + category = str(rel.with_suffix('')).replace('\\', '/') + for prefix in ('commands/', 'features/'): + if category.startswith(prefix): + category = category[len(prefix):] + break + + try: + src = path.read_text() + except Exception as e: + skipped_files.append((path, str(e))) + continue + + all_tests.extend(extract_tests(src, category)) + + print(f"Extracted {len(all_tests)} tests from {len(list(UPSTREAM.rglob('*.js')))} files") + if skipped_files: + print(f"Skipped {len(skipped_files)} files due to errors") + + if not OUT.exists(): + OUT.write_text(json.dumps(all_tests, indent=2)) + print(f"\nWrote {OUT} (no existing snapshot)") + return 0 + + old = json.loads(OUT.read_text()) + old_by_key = {(t['category'], t['name']): t for t in old} + new_keys = set((t['category'], t['name']) for t in all_tests) + old_keys = set(old_by_key) + added_keys = new_keys - old_keys + removed_keys = old_keys - new_keys + + print(f"\nDelta vs existing snapshot ({len(old)} tests):") + print(f" +{len(added_keys)} new") + print(f" -{len(removed_keys)} removed/renamed") + if added_keys: + print("\nNew tests:") + for cat, name in sorted(added_keys): + print(f" [{cat}] {name}") + if removed_keys: + print("\nRemoved/renamed tests (first 20):") + for cat, name in sorted(removed_keys)[:20]: + print(f" [{cat}] {name}") + + if merge_mode: + # Merge mode (default): preserve existing test bodies, only add new tests. + # The old snapshot's bodies were curated/cleaned — re-extracting from raw + # upstream JS produces slightly different bodies that may not auto-translate. + # New tests get the raw extracted body; existing tests keep theirs. + new_by_key = {(t['category'], t['name']): t for t in all_tests} + merged = list(old) # preserves original order + for k in sorted(added_keys): + merged.append(new_by_key[k]) + OUT.write_text(json.dumps(merged, indent=2)) + print(f"\nMerged: {len(merged)} tests ({len(old)} existing + {len(added_keys)} new) → {OUT}") + print(" (rerun with --replace to discard old bodies and use raw upstream)") + else: + OUT.write_text(json.dumps(all_tests, indent=2)) + print(f"\nReplaced: {len(all_tests)} tests → {OUT}") + return 0 + + +if __name__ == '__main__': + raise SystemExit(main()) diff --git a/spec/tests/hyperscript-upstream-tests.json b/spec/tests/hyperscript-upstream-tests.json index 3fd7943e..c32497d2 100644 --- a/spec/tests/hyperscript-upstream-tests.json +++ b/spec/tests/hyperscript-upstream-tests.json @@ -1211,7 +1211,7 @@ "category": "core/liveTemplate", "name": "scope is refreshed after morph so surviving elements get updated indices", "html": "\n\t\t\t\n\t\t", - "body": "\n\t\tawait run(\"set $morphItems to [{name:'A'},{name:'B'},{name:'C'}]\")\n\t\tawait html(`\n\t\t\t\n\t\t`)\n\t\tawait expect.poll(() => find('[data-live-template] li').count()).toBe(3)\n\t\t// Verify initial scope: clicking C should show \"2:C\"\n\t\tawait find('[data-live-template] li').last().click()\n\t\tawait expect(find('[data-live-template] li').last()).toHaveText('2:C')\n\t\t// Remove B — C shifts from index 2 to index 1\n\t\tawait run(\"call $morphItems.splice(1, 1)\")\n\t\tawait expect.poll(() => find('[data-live-template] li').count()).toBe(2)\n\t\t// After morph, C's scope should be refreshed: now \"1:C\"\n\t\tawait find('[data-live-template] li').last().click()\n\t\tawait expect(find('[data-live-template] li').last()).toHaveText('1:C')\n\t", + "body": "\n\t\tawait run(\"set $morphItems to [{name:'A'},{name:'B'},{name:'C'}]\")\n\t\tawait html(`\n\t\t\t\n\t\t`)\n\t\tawait expect.poll(() => find('[data-live-template] li').count()).toBe(3)\n\t\t// Verify initial scope: clicking C should show \"2:C\"\n\t\tawait find('[data-live-template] li').last().click()\n\t\tawait expect(find('[data-live-template] li').last()).toHaveText('2:C')\n\t\t// Remove B \u2014 C shifts from index 2 to index 1\n\t\tawait run(\"call $morphItems.splice(1, 1)\")\n\t\tawait expect.poll(() => find('[data-live-template] li').count()).toBe(2)\n\t\t// After morph, C's scope should be refreshed: now \"1:C\"\n\t\tawait find('[data-live-template] li').last().click()\n\t\tawait expect(find('[data-live-template] li').last()).toHaveText('1:C')\n\t", "async": true, "complexity": "simple" }, @@ -1369,7 +1369,7 @@ }, { "category": "core/reactivity", - "name": "NaN → NaN does not retrigger handlers (Object.is semantics)", + "name": "NaN \u2192 NaN does not retrigger handlers (Object.is semantics)", "html": "
", "body": "\n\t\tawait evaluate(() => { window.$rxNanCount = 0; window.$rxNanVal = NaN })\n\t\tawait html(``)\n\t\t// Initial evaluate should not fire handler because NaN is \"null-ish\" in _lastValue init?\n\t\t// It actually DOES fire (initialize sees non-null). Snapshot and compare.\n\t\tvar initial = await evaluate(() => window.$rxNanCount)\n\n\t\tawait run(\"set $rxNanVal to NaN\")\n\t\t// Give the microtask a chance to run\n\t\tawait evaluate(() => new Promise(r => setTimeout(r, 20)))\n\t\texpect(await evaluate(() => window.$rxNanCount)).toBe(initial)\n\n\t\t// But changing to a real number should fire\n\t\tawait run(\"set $rxNanVal to 42\")\n\t\tawait expect.poll(() => evaluate(() => window.$rxNanCount)).toBe(initial + 1)\n\n\t\tawait evaluate(() => { delete window.$rxNanCount; delete window.$rxNanVal })\n\t", "async": true, @@ -1379,7 +1379,7 @@ "category": "core/reactivity", "name": "effect switches its dependencies based on control flow", "html": "", - "body": "\n\t\tawait evaluate(() => {\n\t\t\twindow.$rxCond = true\n\t\t\twindow.$rxA = 'from-a'\n\t\t\twindow.$rxB = 'from-b'\n\t\t})\n\t\tawait html(\n\t\t\t``\n\t\t)\n\t\tawait expect(find('div')).toHaveText('from-a')\n\n\t\t// While cond is true, changing $rxB should NOT retrigger\n\t\tawait run(\"set $rxB to 'ignored'\")\n\t\tawait evaluate(() => new Promise(r => setTimeout(r, 20)))\n\t\tawait expect(find('div')).toHaveText('from-a')\n\n\t\t// Switch cond → effect now depends on $rxB\n\t\tawait run(\"set $rxCond to false\")\n\t\tawait expect.poll(() => find('div').textContent()).toBe('ignored')\n\n\t\t// Now $rxA changes should be ignored, $rxB changes should fire\n\t\tawait run(\"set $rxA to 'a-ignored'\")\n\t\tawait evaluate(() => new Promise(r => setTimeout(r, 20)))\n\t\tawait expect(find('div')).toHaveText('ignored')\n\n\t\tawait run(\"set $rxB to 'new-b'\")\n\t\tawait expect.poll(() => find('div').textContent()).toBe('new-b')\n\n\t\tawait evaluate(() => {\n\t\t\tdelete window.$rxCond; delete window.$rxA; delete window.$rxB\n\t\t})\n\t", + "body": "\n\t\tawait evaluate(() => {\n\t\t\twindow.$rxCond = true\n\t\t\twindow.$rxA = 'from-a'\n\t\t\twindow.$rxB = 'from-b'\n\t\t})\n\t\tawait html(\n\t\t\t``\n\t\t)\n\t\tawait expect(find('div')).toHaveText('from-a')\n\n\t\t// While cond is true, changing $rxB should NOT retrigger\n\t\tawait run(\"set $rxB to 'ignored'\")\n\t\tawait evaluate(() => new Promise(r => setTimeout(r, 20)))\n\t\tawait expect(find('div')).toHaveText('from-a')\n\n\t\t// Switch cond \u2192 effect now depends on $rxB\n\t\tawait run(\"set $rxCond to false\")\n\t\tawait expect.poll(() => find('div').textContent()).toBe('ignored')\n\n\t\t// Now $rxA changes should be ignored, $rxB changes should fire\n\t\tawait run(\"set $rxA to 'a-ignored'\")\n\t\tawait evaluate(() => new Promise(r => setTimeout(r, 20)))\n\t\tawait expect(find('div')).toHaveText('ignored')\n\n\t\tawait run(\"set $rxB to 'new-b'\")\n\t\tawait expect.poll(() => find('div').textContent()).toBe('new-b')\n\n\t\tawait evaluate(() => {\n\t\t\tdelete window.$rxCond; delete window.$rxA; delete window.$rxB\n\t\t})\n\t", "async": true, "complexity": "promise" }, @@ -5203,7 +5203,7 @@ "category": "expressions/not", "name": "not has higher precedence than and", "html": "", - "body": "\n\t\t// (not false) and true → true and true → true\n\t\texpect(await run(\"not false and true\")).toBe(true)\n\t\t// (not true) and true → false and true → false\n\t\texpect(await run(\"not true and true\")).toBe(false)\n\t", + "body": "\n\t\t// (not false) and true \u2192 true and true \u2192 true\n\t\texpect(await run(\"not false and true\")).toBe(true)\n\t\t// (not true) and true \u2192 false and true \u2192 false\n\t\texpect(await run(\"not true and true\")).toBe(false)\n\t", "async": true, "complexity": "run-eval" }, @@ -5211,7 +5211,7 @@ "category": "expressions/not", "name": "not has higher precedence than or", "html": "", - "body": "\n\t\t// (not true) or true → false or true → true\n\t\texpect(await run(\"not true or true\")).toBe(true)\n\t\t// (not false) or false → true or false → true\n\t\texpect(await run(\"not false or false\")).toBe(true)\n\t", + "body": "\n\t\t// (not true) or true \u2192 false or true \u2192 true\n\t\texpect(await run(\"not true or true\")).toBe(true)\n\t\t// (not false) or false \u2192 true or false \u2192 true\n\t\texpect(await run(\"not false or false\")).toBe(true)\n\t", "async": true, "complexity": "run-eval" }, @@ -11966,5 +11966,149 @@ "body": "\n\t\t// The core bundle only ships a stub; the actual worker plugin is\n\t\t// a separate ext that must be loaded. Without it, parsing should\n\t\t// fail with a message pointing the user to the docs.\n\t\tconst msg = await error(\"worker MyWorker def noop() end end\")\n\t\texpect(msg).toContain('worker plugin')\n\t\texpect(msg).toContain('hyperscript.org/features/worker')\n\t", "async": true, "complexity": "simple" + }, + { + "category": "core/tokenizer", + "name": "clearFollows/restoreFollows round-trip the follow set", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"and and and\");\n\t\t\ttokens.pushFollow(\"and\");\n\t\t\tconst saved = tokens.clearFollows();\n\t\t\tconst allowedWhileCleared = tokens.matchToken(\"and\")?.value ?? null;\n\t\t\ttokens.restoreFollows(saved);\n\t\t\tconst blockedAfterRestore = tokens.matchToken(\"and\") ?? null;\n\t\t\treturn {allowedWhileCleared, blockedAfterRestore};\n\t\t});\n\t\texpect(results.allowedWhileCleared).toBe(\"and\");\n\t\texpect(results.blockedAfterRestore).toBeNull();\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "consumeUntil collects tokens up to a marker", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"a b c end d\");\n\t\t\t// consumeUntil collects every intervening token, whitespace included\n\t\t\tconst collected = tokens.consumeUntil(\"end\")\n\t\t\t\t.filter(tok => tok.type !== \"WHITESPACE\")\n\t\t\t\t.map(tok => tok.value);\n\t\t\tconst landed = tokens.currentToken().value;\n\t\t\treturn {collected, landed};\n\t\t});\n\t\texpect(results.collected).toEqual([\"a\", \"b\", \"c\"]);\n\t\texpect(results.landed).toBe(\"end\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "consumeUntilWhitespace stops at first whitespace", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"foo.bar more\");\n\t\t\tconst collected = tokens.consumeUntilWhitespace().map(tok => tok.value);\n\t\t\tconst landed = tokens.currentToken().value;\n\t\t\treturn {collected, landed};\n\t\t});\n\t\t// consumeUntilWhitespace stops at the space between foo.bar and more\n\t\texpect(results.collected).toEqual([\"foo\", \".\", \"bar\"]);\n\t\texpect(results.landed).toBe(\"more\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "lastMatch returns the last consumed token", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"foo bar baz\");\n\t\t\tconst r = {};\n\t\t\tr.before = tokens.lastMatch() ?? null;\n\t\t\ttokens.consumeToken();\n\t\t\tr.afterFoo = tokens.lastMatch()?.value ?? null;\n\t\t\ttokens.consumeToken();\n\t\t\tr.afterBar = tokens.lastMatch()?.value ?? null;\n\t\t\treturn r;\n\t\t});\n\t\texpect(results.before).toBeNull();\n\t\texpect(results.afterFoo).toBe(\"foo\");\n\t\texpect(results.afterBar).toBe(\"bar\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "lastWhitespace reflects whitespace before the current token", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"foo bar\\n\\tbaz\");\n\t\t\tconst r = {};\n\t\t\t// Before any consume, no whitespace has been consumed yet\n\t\t\tr.initial = tokens.lastWhitespace();\n\t\t\ttokens.consumeToken(); // foo \u2192 consumes trailing whitespace \" \"\n\t\t\tr.afterFoo = tokens.lastWhitespace();\n\t\t\ttokens.consumeToken(); // bar \u2192 consumes \"\\n\\t\"\n\t\t\tr.afterBar = tokens.lastWhitespace();\n\t\t\treturn r;\n\t\t});\n\t\texpect(results.initial).toBe(\"\");\n\t\texpect(results.afterFoo).toBe(\" \");\n\t\texpect(results.afterBar).toBe(\"\\n\\t\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "matchAnyToken and matchAnyOpToken try each option", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"bar + baz\");\n\t\t\treturn {\n\t\t\t\tanyTok: tokens.matchAnyToken(\"foo\", \"bar\", \"baz\")?.value ?? null,\n\t\t\t\tanyOp: tokens.matchAnyOpToken(\"-\", \"+\")?.value ?? null,\n\t\t\t\tanyTokMiss: tokens.matchAnyToken(\"foo\", \"quux\") ?? null,\n\t\t\t};\n\t\t});\n\t\texpect(results.anyTok).toBe(\"bar\");\n\t\texpect(results.anyOp).toBe(\"+\");\n\t\texpect(results.anyTokMiss).toBeNull();\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "matchOpToken matches operators by value", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"+ - *\");\n\t\t\treturn [\n\t\t\t\ttokens.matchOpToken(\"-\") ?? null, // next is +, miss\n\t\t\t\ttokens.matchOpToken(\"+\")?.value ?? null,\n\t\t\t\ttokens.matchOpToken(\"-\")?.value ?? null,\n\t\t\t\ttokens.matchOpToken(\"*\")?.value ?? null,\n\t\t\t];\n\t\t});\n\t\texpect(results[0]).toBeNull();\n\t\texpect(results[1]).toBe(\"+\");\n\t\texpect(results[2]).toBe(\"-\");\n\t\texpect(results[3]).toBe(\"*\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "matchToken consumes and returns on match", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"foo bar baz\");\n\t\t\tconst r = {};\n\t\t\tr.match = tokens.matchToken(\"foo\")?.value ?? null;\n\t\t\tr.miss = tokens.matchToken(\"baz\") ?? null; // next is \"bar\", miss\n\t\t\tr.next = tokens.currentToken().value;\n\t\t\tr.match2 = tokens.matchToken(\"bar\")?.value ?? null;\n\t\t\treturn r;\n\t\t});\n\t\texpect(results.match).toBe(\"foo\");\n\t\texpect(results.miss).toBeNull();\n\t\texpect(results.next).toBe(\"bar\");\n\t\texpect(results.match2).toBe(\"bar\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "matchToken honors the follow set", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"and then\");\n\t\t\ttokens.pushFollow(\"and\");\n\t\t\tconst blocked = tokens.matchToken(\"and\") ?? null;\n\t\t\ttokens.popFollow();\n\t\t\tconst allowed = tokens.matchToken(\"and\")?.value ?? null;\n\t\t\treturn {blocked, allowed};\n\t\t});\n\t\texpect(results.blocked).toBeNull();\n\t\texpect(results.allowed).toBe(\"and\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "matchTokenType matches by type", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"foo 42\");\n\t\t\tconst r = {};\n\t\t\tr.ident = tokens.matchTokenType(\"IDENTIFIER\")?.value ?? null;\n\t\t\tr.numMiss = tokens.matchTokenType(\"STRING\") ?? null;\n\t\t\tr.numOneOf = tokens.matchTokenType(\"STRING\", \"NUMBER\")?.value ?? null;\n\t\t\treturn r;\n\t\t});\n\t\texpect(results.ident).toBe(\"foo\");\n\t\texpect(results.numMiss).toBeNull();\n\t\texpect(results.numOneOf).toBe(\"42\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "peekToken skips whitespace when looking ahead", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst r = {};\n\n\t\t\t// for x in items \u2192 tokens are: for, WS, x, WS, in, WS, items\n\t\t\tconst forIn = t.tokenize(\"for x in items\");\n\t\t\tr.peek0 = forIn.peekToken(\"for\", 0)?.value ?? null;\n\t\t\tr.peek1 = forIn.peekToken(\"x\", 1)?.value ?? null;\n\t\t\tr.peek2 = forIn.peekToken(\"in\", 2)?.value ?? null;\n\t\t\tr.peek3 = forIn.peekToken(\"items\", 3)?.value ?? null;\n\n\t\t\t// peek that shouldn't match\n\t\t\tr.peekMiss = forIn.peekToken(\"in\", 1) ?? null;\n\n\t\t\t// for 10ms \u2014 \"in\" is never present\n\t\t\tconst forDur = t.tokenize(\"for 10ms\");\n\t\t\tr.durPeek2 = forDur.peekToken(\"in\", 2) ?? null;\n\n\t\t\t// Extra whitespace between tokens is tolerated\n\t\t\tconst extraWs = t.tokenize(\"for x in items\");\n\t\t\tr.extraPeek2 = extraWs.peekToken(\"in\", 2)?.value ?? null;\n\n\t\t\t// Comments between tokens are tolerated\n\t\t\tconst withComment = t.tokenize(\"for -- comment\\nx in items\");\n\t\t\tr.commentPeek2 = withComment.peekToken(\"in\", 2)?.value ?? null;\n\n\t\t\t// Newlines as whitespace\n\t\t\tconst multiline = t.tokenize(\"for\\nx\\nin\\nitems\");\n\t\t\tr.multiPeek2 = multiline.peekToken(\"in\", 2)?.value ?? null;\n\n\t\t\t// Type defaults to IDENTIFIER \u2014 matching against an operator requires explicit type\n\t\t\tconst withOp = t.tokenize(\"a + b\");\n\t\t\tr.opDefault = withOp.peekToken(\"+\", 1) ?? null; // IDENTIFIER type, won't match\n\t\t\tr.opExplicit = withOp.peekToken(\"+\", 1, \"PLUS\")?.value ?? null;\n\n\t\t\t// Lookahead past the end returns undefined\n\t\t\tconst short = t.tokenize(\"foo\");\n\t\t\tr.beyondEnd = short.peekToken(\"anything\", 5) ?? null;\n\n\t\t\treturn r;\n\t\t});\n\n\t\texpect(results.peek0).toBe(\"for\");\n\t\texpect(results.peek1).toBe(\"x\");\n\t\texpect(results.peek2).toBe(\"in\");\n\t\texpect(results.peek3).toBe(\"items\");\n\t\texpect(results.peekMiss).toBeNull();\n\t\texpect(results.durPeek2).toBeNull();\n\t\texpect(results.extraPeek2).toBe(\"in\");\n\t\texpect(results.commentPeek2).toBe(\"in\");\n\t\texpect(results.multiPeek2).toBe(\"in\");\n\t\texpect(results.opDefault).toBeNull();\n\t\texpect(results.opExplicit).toBe(\"+\");\n\t\texpect(results.beyondEnd).toBeNull();\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "pushFollow/popFollow nest follow-set boundaries", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst r = {};\n\t\t\tconst tokens = t.tokenize(\"and or not\");\n\t\t\ttokens.pushFollow(\"and\");\n\t\t\ttokens.pushFollow(\"or\");\n\t\t\tr.andBlocked = tokens.matchToken(\"and\") ?? null;\n\t\t\ttokens.popFollow(); // pops \"or\"\n\t\t\tr.andStillBlocked = tokens.matchToken(\"and\") ?? null;\n\t\t\ttokens.popFollow(); // pops \"and\"\n\t\t\tr.andAllowed = tokens.matchToken(\"and\")?.value ?? null;\n\t\t\treturn r;\n\t\t});\n\t\texpect(results.andBlocked).toBeNull();\n\t\texpect(results.andStillBlocked).toBeNull();\n\t\texpect(results.andAllowed).toBe(\"and\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "core/tokenizer", + "name": "pushFollows/popFollows push and pop in bulk", + "html": "", + "body": "\n\t\tconst results = await evaluate(() => {\n\t\t\tconst t = _hyperscript.internals.tokenizer;\n\t\t\tconst tokens = t.tokenize(\"and or\");\n\t\t\tconst count = tokens.pushFollows(\"and\", \"or\");\n\t\t\tconst blocked = tokens.matchToken(\"and\") ?? null;\n\t\t\ttokens.popFollows(count);\n\t\t\tconst allowed = tokens.matchToken(\"and\")?.value ?? null;\n\t\t\treturn {count, blocked, allowed};\n\t\t});\n\t\texpect(results.count).toBe(2);\n\t\texpect(results.blocked).toBeNull();\n\t\texpect(results.allowed).toBe(\"and\");\n\t", + "async": true, + "complexity": "eval-only" + }, + { + "category": "ext/component", + "name": "component reads a feature-level set from an enclosing div on first load", + "html": "", + "body": "\n\t\tawait html(`\n\t\t\t\n\t\t\t