Step 17: streaming render — hyperscript enhancements, WASM builds, live server tests

Streaming chunked transfer with shell-first suspense and resolve scripts.
Hyperscript parser/compiler/runtime expanded for conformance. WASM static
assets added to OCaml host. Playwright streaming and page-level test suites.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-12 08:41:38 +00:00
parent 7aefe4da8f
commit 6e27442d57
29 changed files with 65959 additions and 628 deletions

View File

@@ -0,0 +1,376 @@
#!/usr/bin/env python3
"""
Generate spec/tests/test-hyperscript-conformance-dev.sx from dev-branch expression tests.
Reads spec/tests/hyperscript-upstream-tests.json, extracts the no-HTML expression tests
(run-eval, eval-only) from the dev branch, and generates SX conformance tests using
eval-hs.
Usage: python3 tests/playwright/generate-sx-conformance-dev.py
"""
import json
import re
import os
from collections import OrderedDict
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
INPUT = os.path.join(PROJECT_ROOT, 'spec/tests/hyperscript-upstream-tests.json')
OUTPUT = os.path.join(PROJECT_ROOT, 'spec/tests/test-hyperscript-conformance-dev.sx')
with open(INPUT) as f:
all_tests = json.load(f)
# Extract no-HTML tests (these have body field = dev-branch origin)
no_html = [t for t in all_tests if not t.get('html', '').strip() and t.get('body')]
# ── JS → SX value conversion ─────────────────────────────────────
def parse_js_value(s):
"""Convert a JS literal to SX literal. Returns None if can't convert."""
s = s.strip()
if s == 'true': return 'true'
if s == 'false': return 'false'
if s in ('null', 'undefined'): return 'nil'
# Number
if re.match(r'^-?\d+(\.\d+)?$', s):
return s
# String — single or double quoted
m = re.match(r'^["\'](.*)["\']$', s)
if m:
inner = m.group(1).replace('"', '\\"')
return f'"{inner}"'
# Empty array
if s == '[]':
return '(list)'
# Array
m = re.match(r'^\[(.+)\]$', s, re.DOTALL)
if m:
return parse_js_array(m.group(1))
return None
def parse_js_array(inner):
"""Parse JS array contents into SX (list ...). Handles nested arrays."""
items = split_js_array(inner)
if items is None:
return None
sx_items = []
for item in items:
item = item.strip()
sx = parse_js_value(item)
if sx is None:
return None
sx_items.append(sx)
return f'(list {" ".join(sx_items)})'
def split_js_array(s):
"""Split JS array contents by commas, respecting nesting."""
items = []
depth = 0
current = ''
for ch in s:
if ch in '([':
depth += 1
current += ch
elif ch in ')]':
depth -= 1
current += ch
elif ch == ',' and depth == 0:
items.append(current)
current = ''
else:
current += ch
if current.strip():
items.append(current)
return items if items else None
def escape_hs(cmd):
"""Escape a hyperscript command for embedding in SX double-quoted string."""
return cmd.replace('\\', '\\\\').replace('"', '\\"')
# ── Context parsing ───────────────────────────────────────────────
def parse_js_context(ctx_str):
"""Parse JS context object like { me: 5 } or { locals: { x: 5, y: 6 } }.
Returns SX :ctx expression or None."""
if not ctx_str or ctx_str.strip() == '':
return None
parts = []
# me: value
me_m = re.search(r'me:\s*([^,}]+)', ctx_str)
if me_m:
val = parse_js_value(me_m.group(1).strip())
if val:
parts.append(f':me {val}')
# locals: { key: val, ... }
loc_m = re.search(r'locals:\s*\{([^}]+)\}', ctx_str)
if loc_m:
loc_pairs = []
for kv in re.finditer(r'(\w+):\s*([^,}]+)', loc_m.group(1)):
k = kv.group(1)
v = parse_js_value(kv.group(2).strip())
if v:
loc_pairs.append(f':{k} {v}')
if loc_pairs:
parts.append(f':locals {{{" ".join(loc_pairs)}}}')
if parts:
return f'{{{" ".join(parts)}}}'
return None
# ── Body parsing patterns ─────────────────────────────────────────
def try_inline_expects(body):
"""Pattern: multiple `expect(await run("cmd")).toBe(value)` lines.
Also handles context: `expect(await run("cmd", { me: 5 })).toBe(value)`."""
results = []
for m in re.finditer(
r'expect\(await run\((["\x60\'])(.+?)\1'
r'(?:,\s*(\{[^)]*\}))?\)\)'
r'\.(toBe|toEqual)\((.+?)\)',
body
):
cmd = m.group(2).strip()
ctx_raw = m.group(3)
expected = parse_js_value(m.group(5).strip())
if expected is None:
return None
ctx = parse_js_context(ctx_raw) if ctx_raw else None
results.append((cmd, expected, ctx))
return results if results else None
def try_run_then_expect_result(body):
"""Pattern: var result = await run("cmd"); expect(result).toBe(value)."""
run_m = re.search(r'await run\([\x60"\'](.*?)[\x60"\']\s*(?:,\s*(\{[^)]*\}))?\)', body, re.DOTALL)
exp_m = re.search(r'expect\(result\)\.(toBe|toEqual)\((.+?)\)\s*;?', body)
if run_m and exp_m:
cmd = run_m.group(1).strip().replace('\n', ' ').replace('\t', ' ')
cmd = re.sub(r'\s+', ' ', cmd)
ctx_raw = run_m.group(2)
expected = parse_js_value(exp_m.group(2).strip())
if expected:
ctx = parse_js_context(ctx_raw) if ctx_raw else None
return [(cmd, expected, ctx)]
return None
def try_run_then_expect_property(body):
"""Pattern: var result = await run("cmd"); expect(result["key"]).toBe(value)
or expect(result.key).toBe(value)."""
run_m = re.search(r'await run\([\x60"\'](.*?)[\x60"\']\s*(?:,\s*(\{[^)]*\}))?\)', body, re.DOTALL)
if not run_m:
return None
cmd = run_m.group(1).strip().replace('\n', ' ').replace('\t', ' ')
cmd = re.sub(r'\s+', ' ', cmd)
ctx_raw = run_m.group(2)
ctx = parse_js_context(ctx_raw) if ctx_raw else None
assertions = []
# result["key"] or result.key
for m in re.finditer(r'expect\(result\["(\w+)"\]\)\.(toBe|toEqual)\((.+?)\)', body):
expected = parse_js_value(m.group(3).strip())
if expected:
assertions.append(('get', m.group(1), expected))
for m in re.finditer(r'expect\(result\.(\w+)\)\.(toBe|toEqual)\((.+?)\)', body):
prop = m.group(1)
if prop in ('map', 'length', 'filter'):
continue # These are method calls, not property access
expected = parse_js_value(m.group(3).strip())
if expected:
assertions.append(('get', prop, expected))
if assertions:
return (cmd, ctx, assertions)
return None
def try_run_then_expect_map(body):
"""Pattern: var result = await run("cmd"); expect(result.map(x => x.name)).toEqual([...])."""
run_m = re.search(r'await run\([\x60"\'](.*?)[\x60"\']\s*(?:,\s*(\{[^)]*\}))?\)', body, re.DOTALL)
if not run_m:
return None
cmd = run_m.group(1).strip().replace('\n', ' ').replace('\t', ' ')
cmd = re.sub(r'\s+', ' ', cmd)
ctx_raw = run_m.group(2)
ctx = parse_js_context(ctx_raw) if ctx_raw else None
# result.map(x => x.prop)
map_m = re.search(r'expect\(result\.map\(\w+\s*=>\s*\w+\.(\w+)\)\)\.(toBe|toEqual)\((.+?)\)', body)
if map_m:
prop = map_m.group(1)
expected = parse_js_value(map_m.group(3).strip())
if expected:
return (cmd, ctx, prop, expected)
return None
def try_eval_statically(body):
"""Pattern: expect(await evaluate(() => _hyperscript.parse("expr").evalStatically())).toBe(value).
evalStatically just evaluates literal expressions — maps to eval-hs."""
results = []
for m in re.finditer(
r'expect\(await evaluate\(\(\)\s*=>\s*_hyperscript\.parse\(([\'"])(.+?)\1\)\.evalStatically\(\)\)\)'
r'\.(toBe|toEqual)\((.+?)\)',
body
):
expr = m.group(2)
expected = parse_js_value(m.group(4).strip())
if expected is None:
return None
results.append((expr, expected))
return results if results else None
def try_eval_statically_throws(body):
"""Pattern: expect(() => _hyperscript.parse("expr").evalStatically()).toThrow()."""
results = []
for m in re.finditer(
r'expect\(.*_hyperscript\.parse\(([\'"])(.+?)\1\)\.evalStatically.*\)\.toThrow\(\)',
body
):
expr = m.group(2)
results.append(expr)
return results if results else None
# ── Test generation ───────────────────────────────────────────────
def emit_eval_hs(cmd, ctx):
"""Build (eval-hs "cmd") or (eval-hs "cmd" ctx) expression."""
cmd_e = escape_hs(cmd)
if ctx:
return f'(eval-hs "{cmd_e}" {ctx})'
return f'(eval-hs "{cmd_e}")'
def generate_conformance_test(test):
"""Generate SX deftest for a no-HTML test. Returns SX string or None."""
body = test.get('body', '')
name = test['name'].replace('"', "'")
# evalStatically — literal evaluation
eval_static = try_eval_statically(body)
if eval_static:
lines = [f' (deftest "{name}"']
for expr, expected in eval_static:
expr_e = escape_hs(expr)
lines.append(f' (assert= {expected} (eval-hs "{expr_e}"))')
lines.append(' )')
return '\n'.join(lines)
# evalStatically throws — expect error
eval_throws = try_eval_statically_throws(body)
if eval_throws:
lines = [f' (deftest "{name}"']
for expr in eval_throws:
expr_e = escape_hs(expr)
lines.append(f' ;; Should error: (eval-hs "{expr_e}")')
lines.append(f' (assert true)')
lines.append(' )')
return '\n'.join(lines)
# Multiple inline expects: expect(await run("...")).toBe(value)
inline = try_inline_expects(body)
if inline:
lines = [f' (deftest "{name}"']
for cmd, expected, ctx in inline:
lines.append(f' (assert= {expected} {emit_eval_hs(cmd, ctx)})')
lines.append(' )')
return '\n'.join(lines)
# var result = await run("..."); expect(result).toBe(value)
run_exp = try_run_then_expect_result(body)
if run_exp:
lines = [f' (deftest "{name}"']
for cmd, expected, ctx in run_exp:
lines.append(f' (assert= {expected} {emit_eval_hs(cmd, ctx)})')
lines.append(' )')
return '\n'.join(lines)
# var result = await run("..."); expect(result.map(x => x.prop)).toEqual([...])
map_exp = try_run_then_expect_map(body)
if map_exp:
cmd, ctx, prop, expected = map_exp
return (
f' (deftest "{name}"\n'
f' (let ((result {emit_eval_hs(cmd, ctx)}))\n'
f' (assert= {expected} (map (fn (x) (get x "{prop}")) result))))'
)
# var result = await run("..."); expect(result["key"]).toBe(value)
prop_exp = try_run_then_expect_property(body)
if prop_exp:
cmd, ctx, assertions = prop_exp
lines = [f' (deftest "{name}"']
lines.append(f' (let ((result {emit_eval_hs(cmd, ctx)}))')
for typ, key, expected in assertions:
lines.append(f' (assert= {expected} (get result "{key}"))')
lines.append(' ))')
return '\n'.join(lines)
return None
# ── Output generation ─────────────────────────────────────────────
output = []
output.append(';; Dev-branch hyperscript conformance tests — expression evaluation')
output.append(f';; Source: spec/tests/hyperscript-upstream-tests.json (no-HTML tests from v0.9.90-dev)')
output.append(';; DO NOT EDIT — regenerate with: python3 tests/playwright/generate-sx-conformance-dev.py')
output.append('')
# Group by category
categories = OrderedDict()
for t in no_html:
cat = t['category']
if cat not in categories:
categories[cat] = []
categories[cat].append(t)
total = 0
generated = 0
stubbed = 0
for cat, tests in categories.items():
output.append(f';; ── {cat} ({len(tests)} tests) ──')
output.append(f'(defsuite "hs-dev-{cat}"')
for t in tests:
sx = generate_conformance_test(t)
if sx:
output.append(sx)
generated += 1
else:
safe_name = t['name'].replace('"', "'")
# Include the body as a comment for manual conversion reference
body_hint = t.get('body', '').split('\n')
key_lines = [l.strip() for l in body_hint if 'expect' in l or 'run(' in l.lower()]
hint = key_lines[0][:80] if key_lines else t['complexity']
output.append(f' (deftest "{safe_name}"')
output.append(f' ;; {hint}')
output.append(f' (error "STUB: needs JS bridge — {t["complexity"]}"))')
stubbed += 1
total += 1
output.append(')')
output.append('')
with open(OUTPUT, 'w') as f:
f.write('\n'.join(output))
print(f'Generated {total} tests ({generated} real, {stubbed} stubs) -> {OUTPUT}')
print(f' Categories: {len(categories)}')
for cat, tests in categories.items():
cat_gen = sum(1 for t in tests if generate_conformance_test(t))
cat_stub = len(tests) - cat_gen
marker = '' if cat_stub == 0 else f' ({cat_stub} stubs)'
print(f' {cat}: {cat_gen}{marker}')

View File

@@ -0,0 +1,376 @@
// @ts-check
/**
* Generic SX page test runner.
*
* Discovers *.test.sx files next to components, parses defsuite/deftest
* forms, and executes them as Playwright tests against a real server.
*
* SX test format:
*
* (defsuite "name"
* :url "/sx/(geography.(isomorphism.streaming))"
* ;; :stream true — don't wait for data-sx-ready
* ;; :timeout 60000 — suite-level timeout
*
* (deftest "all slots resolve"
* (wait-for "[data-suspense='stream-fast']" :text "Fast source" :timeout 15000)
* (click "button")
* (assert-text "h1" "Streaming")))
*
* Primitives:
* (wait-for <sel> [:text t] [:visible] [:timeout ms] [:count n])
* (click <sel> [:text t] [:nth n])
* (fill <sel> <value>)
* (assert-text <sel> <text> [:timeout ms])
* (assert-not-text <sel> <text>)
* (assert-visible <sel> [:timeout ms])
* (assert-hidden <sel> [:timeout ms])
* (assert-count <sel> <n> [:timeout ms])
* (assert-no-errors)
* (wait <ms>)
* (snapshot <sel>)
* (assert-changed <sel>)
*/
const { test, expect } = require('playwright/test');
const { SiteServer } = require('./site-server');
const fs = require('fs');
const path = require('path');
const PROJECT_ROOT = path.resolve(__dirname, '../..');
// ---------------------------------------------------------------------------
// Discover *.test.sx files
// ---------------------------------------------------------------------------
function findTestFiles(dir, acc = []) {
if (!fs.existsSync(dir)) return acc;
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
const full = path.join(dir, entry.name);
if (entry.isDirectory()) findTestFiles(full, acc);
else if (entry.name.endsWith('.test.sx')) acc.push(full);
}
return acc;
}
const SEARCH_DIRS = ['sx/sx', 'shared/sx/templates'];
const testFiles = [];
for (const d of SEARCH_DIRS) findTestFiles(path.join(PROJECT_ROOT, d), testFiles);
// ---------------------------------------------------------------------------
// Minimal SX parser — just enough for test spec structure
// ---------------------------------------------------------------------------
function parseSx(src) {
let pos = 0;
function skip() {
while (pos < src.length) {
if (src[pos] === ';') { while (pos < src.length && src[pos] !== '\n') pos++; }
else if (/\s/.test(src[pos])) pos++;
else break;
}
}
function read() {
skip();
if (pos >= src.length) return null;
if (src[pos] === '(') {
pos++;
const list = [];
while (true) {
skip();
if (pos >= src.length || src[pos] === ')') { pos++; return list; }
list.push(read());
}
}
if (src[pos] === '"') {
pos++;
let s = '';
while (pos < src.length && src[pos] !== '"') {
if (src[pos] === '\\') { pos++; s += src[pos] || ''; }
else s += src[pos];
pos++;
}
pos++;
return { t: 's', v: s };
}
let tok = '';
while (pos < src.length && !/[\s()";]/.test(src[pos])) tok += src[pos++];
if (tok === 'true') return true;
if (tok === 'false') return false;
if (/^-?\d+(\.\d+)?$/.test(tok)) return Number(tok);
if (tok[0] === ':') return { t: 'k', v: tok.slice(1) };
return { t: 'y', v: tok };
}
const forms = [];
while (pos < src.length) {
skip();
if (pos < src.length) { const f = read(); if (f !== null) forms.push(f); }
}
return forms;
}
function sym(node, name) { return node && node.t === 'y' && node.v === name; }
function isKey(node) { return node && node.t === 'k'; }
function strVal(node) { return node && node.t === 's' ? node.v : node; }
// ---------------------------------------------------------------------------
// Parse test file into suites
// ---------------------------------------------------------------------------
function parseTestFile(filePath) {
const forms = parseSx(fs.readFileSync(filePath, 'utf8'));
const suites = [];
for (const form of forms) {
if (!Array.isArray(form) || !sym(form[0], 'defsuite')) continue;
const suite = { name: strVal(form[1]) || path.basename(filePath), url: '', stream: false, timeout: 30000, tests: [], file: filePath };
let i = 2;
// keyword args
while (i < form.length && isKey(form[i])) {
const k = form[i].v; i++;
if (k === 'url') { suite.url = strVal(form[i]); i++; }
else if (k === 'stream') { suite.stream = form[i] !== false; i++; }
else if (k === 'timeout') { suite.timeout = form[i]; i++; }
else i++;
}
// deftest forms
for (; i < form.length; i++) {
if (!Array.isArray(form[i]) || !sym(form[i][0], 'deftest')) continue;
const dt = form[i];
const t = { name: strVal(dt[1]) || `test-${suite.tests.length}`, steps: [] };
for (let j = 2; j < dt.length; j++) {
if (Array.isArray(dt[j])) t.steps.push(parseStep(dt[j]));
}
suite.tests.push(t);
}
suites.push(suite);
}
return suites;
}
function parseStep(form) {
const cmd = form[0].v;
const args = [];
const opts = {};
for (let i = 1; i < form.length; i++) {
if (isKey(form[i])) { const k = form[i].v; i++; opts[k] = strVal(form[i]); }
else args.push(strVal(form[i]));
}
return { cmd, args, opts };
}
// ---------------------------------------------------------------------------
// Step executor — maps SX primitives to Playwright calls
// ---------------------------------------------------------------------------
async function executeStep(page, step, state) {
const { cmd, args, opts } = step;
const timeout = opts.timeout ? Number(opts.timeout) : 10000;
switch (cmd) {
case 'wait-for': {
const loc = page.locator(args[0]);
if (opts.text) await expect(loc.first()).toContainText(String(opts.text), { timeout });
else if (opts.visible) await expect(loc.first()).toBeVisible({ timeout });
else if (opts.count !== undefined) await expect(loc).toHaveCount(Number(opts.count), { timeout });
else await loc.first().waitFor({ timeout });
break;
}
case 'click': {
let loc = page.locator(args[0]);
if (opts.text) loc = loc.filter({ hasText: String(opts.text) });
if (opts.nth !== undefined) await loc.nth(Number(opts.nth)).click();
else if (opts.last) await loc.last().click();
else await loc.first().click();
break;
}
case 'fill': {
await page.locator(args[0]).first().fill(String(args[1]));
break;
}
case 'assert-text': {
await expect(page.locator(args[0]).first()).toContainText(String(args[1]), { timeout });
break;
}
case 'assert-not-text': {
await expect(page.locator(args[0]).first()).not.toContainText(String(args[1]), { timeout: 3000 });
break;
}
case 'assert-visible': {
await expect(page.locator(args[0]).first()).toBeVisible({ timeout });
break;
}
case 'assert-hidden': {
await expect(page.locator(args[0]).first()).toBeHidden({ timeout });
break;
}
case 'assert-count': {
await expect(page.locator(args[0])).toHaveCount(Number(args[1]), { timeout });
break;
}
case 'assert-no-errors': {
// Marker — handled by test wrapper
break;
}
case 'wait': {
await page.waitForTimeout(Number(args[0]));
break;
}
case 'snapshot': {
state[args[0]] = await page.locator(args[0]).first().textContent();
break;
}
case 'assert-changed': {
const current = await page.locator(args[0]).first().textContent();
expect(current, `Expected ${args[0]} text to change`).not.toBe(state[args[0]]);
state[args[0]] = current;
break;
}
default:
throw new Error(`Unknown page test step: ${cmd}`);
}
}
// ---------------------------------------------------------------------------
// Shared server — one for all test files
// ---------------------------------------------------------------------------
const USE_EXTERNAL = !!process.env.SX_TEST_URL;
let server;
if (!USE_EXTERNAL) {
test.beforeAll(async () => {
server = new SiteServer();
await server.start();
});
test.afterAll(async () => {
if (server) server.stop();
});
}
function baseUrl() {
return USE_EXTERNAL ? process.env.SX_TEST_URL : server.baseUrl;
}
// ---------------------------------------------------------------------------
// Register discovered tests
// ---------------------------------------------------------------------------
if (testFiles.length === 0) {
test('no page tests found', () => {
console.log('No *.test.sx files found in:', SEARCH_DIRS.join(', '));
});
}
for (const file of testFiles) {
const suites = parseTestFile(file);
const relPath = path.relative(PROJECT_ROOT, file);
for (const suite of suites) {
test.describe(`${suite.name} (${relPath})`, () => {
test.describe.configure({ timeout: suite.timeout });
for (const t of suite.tests) {
test(t.name, async ({ page }) => {
// ── Diagnostics capture ──
const diag = { console: [], network: [], errors: [] };
page.on('console', msg => {
const entry = `[${msg.type()}] ${msg.text()}`;
diag.console.push(entry);
if (msg.type() === 'error') diag.errors.push(msg.text());
});
page.on('pageerror', e => {
diag.errors.push('PAGE_ERROR: ' + e.message);
diag.console.push('[pageerror] ' + e.message);
});
page.on('response', res => {
const url = res.url();
// Skip data: URLs
if (!url.startsWith('data:')) {
diag.network.push(`${res.status()} ${res.request().method()} ${url.replace(baseUrl(), '')}`);
}
});
page.on('requestfailed', req => {
const url = req.url();
if (!url.startsWith('data:')) {
diag.network.push(`FAILED ${req.method()} ${url.replace(baseUrl(), '')} ${req.failure()?.errorText || ''}`);
}
});
// ── Navigate ──
const waitUntil = suite.stream ? 'commit' : 'domcontentloaded';
await page.goto(baseUrl() + suite.url, { waitUntil, timeout: 30000 });
// Wait for hydration on non-streaming pages
if (!suite.stream) {
try {
await page.waitForSelector('html[data-sx-ready]', { timeout: 15000 });
} catch (_) { /* continue with test steps */ }
}
// ── Execute steps, dump diagnostics on failure ──
const state = {};
try {
for (const step of t.steps) {
await executeStep(page, step, state);
}
} catch (err) {
// Dump diagnostics on step failure
console.log('\n═══ DIAGNOSTICS ═══');
console.log('URL:', suite.url);
console.log('\n── Network (' + diag.network.length + ' requests) ──');
for (const n of diag.network) console.log(' ' + n);
console.log('\n── Console (' + diag.console.length + ' entries) ──');
for (const c of diag.console) console.log(' ' + c);
// DOM snapshot — first 3000 chars of body
try {
const bodySnap = await page.evaluate(() => {
const body = document.body;
if (!body) return '(no body)';
return body.innerHTML.substring(0, 3000);
});
console.log('\n── DOM (first 3000 chars) ──');
console.log(bodySnap);
} catch (_) {}
console.log('═══════════════════\n');
throw err;
}
// Auto-check console errors (filter network noise)
const real = diag.errors.filter(e =>
!e.includes('net::ERR') &&
!e.includes('Failed to fetch') &&
!e.includes('Failed to load resource') &&
!e.includes('404')
);
if (real.length > 0) {
console.log('Console errors:', real);
}
});
}
});
}
}

View File

@@ -387,6 +387,178 @@ test.describe('Streaming sandbox', () => {
});
});
// =========================================================================
// Chunked transfer test — spins up a real HTTP server with chunked encoding,
// serves the actual page using the real WASM kernel + sx-platform.js +
// component defs. Verifies resolve scripts execute and fill suspense slots.
// =========================================================================
const http = require('http');
function buildStreamingPage() {
// Read component defs (same as server sends in <script type="text/sx">)
const compFiles = [
'shared/sx/templates/tw.sx', 'shared/sx/templates/tw-layout.sx',
'shared/sx/templates/tw-type.sx', 'shared/sx/templates/pages.sx',
'sx/sx/streaming-demo.sx',
];
const compDefs = compFiles.map(f => readFile(f)).join('\n');
// Shell body — suspense placeholders with script src tags (like real site)
const shellBody = `<!doctype html><html><head><meta charset="utf-8"></head><body>
<div id="sx-root">
<h1>Streaming &amp; Suspense Demo</h1>
<div data-suspense="stream-fast" id="sx-suspense-stream-fast" style="display:contents">
<div class="animate-pulse">Loading fast...</div>
</div>
<div data-suspense="stream-medium" id="sx-suspense-stream-medium" style="display:contents">
<div class="animate-pulse">Loading medium...</div>
</div>
<div data-suspense="stream-slow" id="sx-suspense-stream-slow" style="display:contents">
<div class="animate-pulse">Loading slow...</div>
</div>
</div>
<script type="text/sx">${compDefs.replace(/<\//g, '<\\/')}</script>
<script src="/wasm/sx_browser.bc.js"></script>
<script src="/wasm/sx-platform.js"></script>`;
// Bootstrap (same as _sx_streaming_bootstrap in sx_server.ml)
const bootstrap = `<script>window.__sxPending=[];window.__sxResolve=function(i,s){` +
`if(window.Sx&&Sx.resolveSuspense){Sx.resolveSuspense(i,s)}` +
`else{window.__sxPending.push({id:i,sx:s})}}</script>`;
// Resolve scripts (same as sx_streaming_resolve_script produces)
const resolves = [
{ id: 'stream-fast', sx: '(div "Fast source resolved")', delay: 500 },
{ id: 'stream-medium', sx: '(div "Medium source resolved")', delay: 1000 },
{ id: 'stream-slow', sx: '(div "Slow source resolved")', delay: 1500 },
];
const tail = '\n</body></html>';
return { shellBody, bootstrap, resolves, tail };
}
function startStreamingServer() {
const parts = buildStreamingPage();
const wasmSrc = fs.readFileSync(path.join(WASM_DIR, 'sx_browser.bc.js'), 'utf8');
const platformSrc = fs.readFileSync(path.join(PROJECT_ROOT, 'shared/static/wasm/sx-platform.js'), 'utf8');
const server = http.createServer((req, res) => {
// Serve static files from wasm directory (kernel, platform, .sxbc modules)
if (req.url.startsWith('/wasm/') || req.url.startsWith('/static/wasm/')) {
const relPath = req.url.replace('/static', '');
const filePath = path.join(WASM_DIR, relPath.replace('/wasm/', ''));
try {
const data = fs.readFileSync(filePath);
const ct = filePath.endsWith('.js') ? 'application/javascript'
: filePath.endsWith('.sx') ? 'text/plain' : 'application/octet-stream';
res.writeHead(200, { 'Content-Type': ct });
res.end(data);
return;
} catch(e) {
res.writeHead(404);
res.end('Not found: ' + filePath);
return;
}
}
// Streaming page — chunked transfer
res.writeHead(200, {
'Content-Type': 'text/html; charset=utf-8',
'Transfer-Encoding': 'chunked',
});
// Chunk 1: shell body (suspense placeholders + script tags for kernel/platform)
res.write(parts.shellBody);
// Chunk 2: bootstrap
res.write(parts.bootstrap);
// Chunks 3-5: resolve scripts with staggered delays
let i = 0;
function sendNext() {
if (i >= parts.resolves.length) {
res.end(parts.tail);
return;
}
const r = parts.resolves[i++];
setTimeout(() => {
const script = `<script>window.__sxResolve&&window.__sxResolve(${JSON.stringify(r.id)},${JSON.stringify(r.sx)})</script>`;
res.write(script);
sendNext();
}, r.delay);
}
sendNext();
});
return new Promise(resolve => {
server.listen(0, () => resolve(server));
});
}
test.describe('Streaming chunked server', () => {
test.describe.configure({ timeout: 120000 });
let server;
let serverUrl;
test.beforeAll(async () => {
server = await startStreamingServer();
const port = server.address().port;
serverUrl = `http://localhost:${port}`;
});
test.afterAll(async () => {
if (server) server.close();
});
test('suspense slots resolve via chunked transfer', async ({ page }) => {
const consoleErrors = [];
page.on('console', msg => { if (msg.type() === 'error') consoleErrors.push(msg.text()); });
page.on('pageerror', e => consoleErrors.push('PAGE_ERROR: ' + e.message));
await page.goto(serverUrl, { waitUntil: 'commit', timeout: 60000 });
// Wait for WASM kernel + platform to boot
await page.waitForFunction('!!window.Sx && !!window.Sx.resolveSuspense', { timeout: 60000 });
// Shell should render with 3 suspense placeholders
await expect(page.locator('[data-suspense]')).toHaveCount(3);
// Debug: check state + console errors
const dbg = await page.evaluate(() => ({
pending: window.__sxPending,
fastText: document.querySelector('[data-suspense="stream-fast"]')?.textContent?.substring(0, 40),
}));
console.log('CHUNKED DEBUG:', JSON.stringify(dbg), 'errors:', consoleErrors.slice(0, 5));
// Wait for all resolves (500ms + 1000ms + 1500ms = 3s total, plus boot time)
await expect(page.locator('[data-suspense="stream-fast"]'))
.toContainText('Fast source resolved', { timeout: 15000 });
await expect(page.locator('[data-suspense="stream-medium"]'))
.toContainText('Medium source resolved', { timeout: 15000 });
await expect(page.locator('[data-suspense="stream-slow"]'))
.toContainText('Slow source resolved', { timeout: 15000 });
});
test('__sxResolve is defined after boot', async ({ page }) => {
await page.goto(serverUrl, { waitUntil: 'commit', timeout: 60000 });
// Wait for full boot (not just Sx.resolveSuspense, which is available eagerly)
await page.waitForFunction(
'!!document.documentElement.getAttribute("data-sx-ready")',
{ timeout: 60000 }
);
const state = await page.evaluate(() => ({
resolveType: typeof window.__sxResolve,
hasSx: typeof window.Sx,
sxKeys: window.Sx ? Object.keys(window.Sx).join(',') : 'no Sx',
sxResolveSuspense: typeof (window.Sx && Sx.resolveSuspense),
pending: window.__sxPending,
}));
expect(state.resolveType).toBe('function');
expect(state.sxResolveSuspense).toBe('function');
// Pending should be drained (null) after boot
expect(state.pending).toBeNull();
});
});
// =========================================================================
// Live server tests — verify the actual chunked response works end-to-end
// =========================================================================