Modular test architecture: per-module test specs for SX

Split monolithic test.sx into composable test specs:
- test-framework.sx: deftest/defsuite macros + assertion helpers
- test-eval.sx: core evaluator + primitives (81 tests)
- test-parser.sx: parser + serializer + round-trips (39 tests)
- test-router.sx: route matching from router.sx (18 tests)
- test-render.sx: HTML adapter rendering (23 tests)

Runners auto-discover specs and test whatever bootstrapped code
is available. Usage: `run.js eval parser router` or just `run.js`.
Legacy mode (`--legacy`) still runs monolithic test.sx.

Router tests use bootstrapped functions (sx_ref.py / sx-browser.js)
because the hand-written evaluator's flat-dict env model doesn't
support set! mutation across lambda closure boundaries.

JS: 161/161. Python: 159/161 (2 parser escape bugs found).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-07 12:17:13 +00:00
parent 99a78a70b3
commit aab1f3e966
8 changed files with 1506 additions and 37 deletions

View File

@@ -1,9 +1,13 @@
// Run test.sx directly against sx-browser.js.
// Run SX test specs against sx-browser.js.
//
// sx-browser.js parses and evaluates test.sx — SX tests itself.
// sx-browser.js parses and evaluates test specs — SX tests itself.
// This script provides only platform functions (error catching, reporting).
//
// Usage: node shared/sx/tests/run.js
// Usage:
// node shared/sx/tests/run.js # run all available specs
// node shared/sx/tests/run.js eval # run only test-eval.sx
// node shared/sx/tests/run.js eval parser router # run specific specs
// node shared/sx/tests/run.js --legacy # run monolithic test.sx
Object.defineProperty(globalThis, "document", { value: undefined, writable: true });
var path = require("path");
@@ -86,15 +90,149 @@ var env = {
},
"has-key?": function(d, k) { return d && typeof d === "object" && k in d; },
"append": function(c, x) { return Array.isArray(x) ? (c||[]).concat(x) : (c||[]).concat([x]); },
"for-each-indexed": function(f, coll) {
for (var i = 0; i < (coll||[]).length; i++) {
Sx.eval([f, i, coll[i]], env);
}
},
"dict-set!": function(d, k, v) { if (d) d[k] = v; },
"dict-has?": function(d, k) { return d && typeof d === "object" && k in d; },
"dict-get": function(d, k) { return d ? d[k] : undefined; },
"starts-with?": function(s, prefix) { return String(s).indexOf(prefix) === 0; },
"ends-with?": function(s, suffix) { var str = String(s); return str.indexOf(suffix) === str.length - suffix.length; },
"slice": function(s, start, end) { return end !== undefined ? s.slice(start, end) : s.slice(start); },
"inc": function(n) { return n + 1; },
"append!": function(arr, item) { if (Array.isArray(arr)) arr.push(item); },
"dict": function() { return {}; },
"for-each": function(f, coll) {
for (var i = 0; i < (coll||[]).length; i++) {
Sx.eval([f, coll[i]], env);
}
},
// --- Parser platform functions (for test-parser.sx) ---
"sx-parse": function(source) { return Sx.parseAll(source); },
"sx-serialize": function(val) {
// Basic serializer for test roundtrips
if (val === Sx.NIL || val === null || val === undefined) return "nil";
if (typeof val === "boolean") return val ? "true" : "false";
if (typeof val === "number") return String(val);
if (typeof val === "string") return '"' + val.replace(/\\/g, "\\\\").replace(/"/g, '\\"') + '"';
// Check Symbol/Keyword BEFORE generic object — they are objects too
if (val && (val._sym || val._sx_symbol)) return val.name;
if (val && (val._kw || val._sx_keyword)) return ":" + val.name;
if (Array.isArray(val)) return "(" + val.map(function(x) { return env["sx-serialize"](x); }).join(" ") + ")";
if (val && typeof val === "object") {
var parts = [];
Object.keys(val).forEach(function(k) {
parts.push(":" + k);
parts.push(env["sx-serialize"](val[k]));
});
return "{" + parts.join(" ") + "}";
}
return String(val);
},
"make-symbol": function(name) { return Sx.sym ? Sx.sym(name) : { _sx_symbol: true, name: name, toString: function() { return name; } }; },
"make-keyword": function(name) { return Sx.kw ? Sx.kw(name) : { _sx_keyword: true, name: name, toString: function() { return name; } }; },
"symbol-name": function(s) { return s && s.name ? s.name : String(s); },
"keyword-name": function(k) { return k && k.name ? k.name : String(k); },
// --- Render platform function (for test-render.sx) ---
"render-html": function(sxSource) {
if (!Sx.renderToHtml) throw new Error("render-to-html not available — html adapter not bootstrapped");
var exprs = Sx.parseAll(sxSource);
var result = "";
for (var i = 0; i < exprs.length; i++) {
result += Sx.renderToHtml(exprs[i], env);
}
return result;
},
};
// --- Read and evaluate test.sx ---
var src = fs.readFileSync(path.resolve(__dirname, "../ref/test.sx"), "utf8");
var exprs = Sx.parseAll(src);
// --- Resolve which test specs to run ---
var refDir = path.resolve(__dirname, "../ref");
var args = process.argv.slice(2);
console.log("TAP version 13");
for (var i = 0; i < exprs.length; i++) {
Sx.eval(exprs[i], env);
// Available spec modules and their platform requirements
var SPECS = {
"eval": { file: "test-eval.sx", needs: [] },
"parser": { file: "test-parser.sx", needs: ["sx-parse"] },
"router": { file: "test-router.sx", needs: [] },
"render": { file: "test-render.sx", needs: ["render-html"] },
};
function evalFile(filename) {
var filepath = path.resolve(refDir, filename);
if (!fs.existsSync(filepath)) {
console.log("# SKIP " + filename + " (file not found)");
return;
}
var src = fs.readFileSync(filepath, "utf8");
var exprs = Sx.parseAll(src);
for (var i = 0; i < exprs.length; i++) {
Sx.eval(exprs[i], env);
}
}
// Legacy mode — run monolithic test.sx
if (args[0] === "--legacy") {
console.log("TAP version 13");
evalFile("test.sx");
} else {
// Determine which specs to run
var specsToRun;
if (args.length > 0) {
specsToRun = args;
} else {
// Auto-discover: run all specs whose platform functions are available
specsToRun = Object.keys(SPECS);
}
console.log("TAP version 13");
// Always load framework first
evalFile("test-framework.sx");
// Load router.sx if testing router (it defines the functions being tested)
for (var si = 0; si < specsToRun.length; si++) {
var specName = specsToRun[si];
var spec = SPECS[specName];
if (!spec) {
console.log("# SKIP unknown spec: " + specName);
continue;
}
// Check platform requirements
var canRun = true;
for (var ni = 0; ni < spec.needs.length; ni++) {
if (!(spec.needs[ni] in env)) {
console.log("# SKIP " + specName + " (missing: " + spec.needs[ni] + ")");
canRun = false;
break;
}
}
if (!canRun) continue;
// Load prerequisite spec modules
if (specName === "router") {
// Use bootstrapped router functions from sx-browser.js.
// The bare evaluator can't run router.sx faithfully because set!
// inside lambda closures doesn't propagate (dict copies, not cells).
if (Sx.splitPathSegments) {
env["split-path-segments"] = Sx.splitPathSegments;
env["parse-route-pattern"] = Sx.parseRoutePattern;
env["match-route-segments"] = Sx.matchRouteSegments;
env["match-route"] = Sx.matchRoute;
env["find-matching-route"] = Sx.findMatchingRoute;
env["make-route-segment"] = Sx.makeRouteSegment;
} else {
evalFile("router.sx");
}
}
console.log("# --- " + specName + " ---");
evalFile(spec.file);
}
}
// --- Summary ---

View File

@@ -1,23 +1,27 @@
#!/usr/bin/env python3
"""Run test.sx directly against the Python SX evaluator.
"""Run SX test specs against the Python SX evaluator.
The Python evaluator parses and evaluates test.sx — SX tests itself.
The Python evaluator parses and evaluates test specs — SX tests itself.
This script provides only platform functions (error catching, reporting).
Usage: python shared/sx/tests/run.py
Usage:
python shared/sx/tests/run.py # run all available specs
python shared/sx/tests/run.py eval # run only test-eval.sx
python shared/sx/tests/run.py eval parser router # run specific specs
python shared/sx/tests/run.py --legacy # run monolithic test.sx
"""
from __future__ import annotations
import os
import sys
import traceback
_HERE = os.path.dirname(os.path.abspath(__file__))
_PROJECT = os.path.abspath(os.path.join(_HERE, "..", "..", ".."))
sys.path.insert(0, _PROJECT)
from shared.sx.parser import parse_all
from shared.sx.evaluator import _eval, _trampoline
from shared.sx.evaluator import _eval, _trampoline, _call_lambda
from shared.sx.types import Symbol, Keyword, Lambda, NIL
# --- Test state ---
suite_stack: list[str] = []
@@ -29,7 +33,7 @@ test_num = 0
def try_call(thunk):
"""Call an SX thunk, catching errors."""
try:
_trampoline(_eval([thunk], {}))
_trampoline(_eval([thunk], env))
return {"ok": True}
except Exception as e:
return {"ok": False, "error": str(e)}
@@ -60,25 +64,253 @@ def pop_suite():
suite_stack.pop()
def main():
env = {
"try-call": try_call,
"report-pass": report_pass,
"report-fail": report_fail,
"push-suite": push_suite,
"pop-suite": pop_suite,
}
# --- Parser platform functions ---
test_sx = os.path.join(_HERE, "..", "ref", "test.sx")
with open(test_sx) as f:
def sx_parse(source):
"""Parse SX source string into list of AST expressions."""
return parse_all(source)
def sx_serialize(val):
"""Serialize an AST value to SX source text."""
if val is None or val is NIL:
return "nil"
if isinstance(val, bool):
return "true" if val else "false"
if isinstance(val, (int, float)):
return str(val)
if isinstance(val, str):
escaped = val.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped}"'
if isinstance(val, Symbol):
return val.name
if isinstance(val, Keyword):
return f":{val.name}"
if isinstance(val, list):
inner = " ".join(sx_serialize(x) for x in val)
return f"({inner})"
if isinstance(val, dict):
parts = []
for k, v in val.items():
parts.append(f":{k}")
parts.append(sx_serialize(v))
return "{" + " ".join(parts) + "}"
return str(val)
def make_symbol(name):
return Symbol(name)
def make_keyword(name):
return Keyword(name)
def symbol_name(sym):
if isinstance(sym, Symbol):
return sym.name
return str(sym)
def keyword_name(kw):
if isinstance(kw, Keyword):
return kw.name
return str(kw)
# --- Render platform function ---
def render_html(sx_source):
"""Parse SX source and render to HTML via the bootstrapped evaluator."""
try:
from shared.sx.ref.sx_ref import render_to_html as _render_to_html
except ImportError:
raise RuntimeError("render-to-html not available — sx_ref.py not built")
exprs = parse_all(sx_source)
render_env = dict(env)
result = ""
for expr in exprs:
result += _render_to_html(expr, render_env)
return result
# --- Spec registry ---
SPECS = {
"eval": {"file": "test-eval.sx", "needs": []},
"parser": {"file": "test-parser.sx", "needs": ["sx-parse"]},
"router": {"file": "test-router.sx", "needs": []},
"render": {"file": "test-render.sx", "needs": ["render-html"]},
}
REF_DIR = os.path.join(_HERE, "..", "ref")
def eval_file(filename, env):
"""Load and evaluate an SX file."""
filepath = os.path.join(REF_DIR, filename)
if not os.path.exists(filepath):
print(f"# SKIP {filename} (file not found)")
return
with open(filepath) as f:
src = f.read()
exprs = parse_all(src)
print("TAP version 13")
for expr in exprs:
_trampoline(_eval(expr, env))
# --- Build env ---
env = {
"try-call": try_call,
"report-pass": report_pass,
"report-fail": report_fail,
"push-suite": push_suite,
"pop-suite": pop_suite,
# Parser platform functions
"sx-parse": sx_parse,
"sx-serialize": sx_serialize,
"make-symbol": make_symbol,
"make-keyword": make_keyword,
"symbol-name": symbol_name,
"keyword-name": keyword_name,
# Render platform function
"render-html": render_html,
# Extra primitives needed by spec modules (router.sx, deps.sx)
"for-each-indexed": "_deferred", # replaced below
"dict-set!": "_deferred",
"dict-has?": "_deferred",
"dict-get": "_deferred",
"append!": "_deferred",
"inc": lambda n: n + 1,
}
def _call_sx(fn, args, caller_env):
"""Call an SX lambda or native function with args."""
if isinstance(fn, Lambda):
return _trampoline(_call_lambda(fn, list(args), caller_env))
return fn(*args)
def _for_each_indexed(fn, coll):
"""for-each-indexed that respects set! in lambda closures.
The hand-written evaluator copies envs on lambda calls, which breaks
set! mutation of outer scope. We eval directly in the closure dict
to match the bootstrapped semantics (cell-based mutation).
"""
if isinstance(fn, Lambda):
closure = fn.closure
for i, item in enumerate(coll or []):
# Bind params directly in the closure (no copy)
for p, v in zip(fn.params, [i, item]):
closure[p] = v
_trampoline(_eval(fn.body, closure))
else:
for i, item in enumerate(coll or []):
fn(i, item)
return NIL
def _dict_set(d, k, v):
if isinstance(d, dict):
d[k] = v
return NIL
def _dict_has(d, k):
return isinstance(d, dict) and k in d
def _dict_get(d, k):
if isinstance(d, dict):
return d.get(k, NIL)
return NIL
def _append_mut(lst, item):
if isinstance(lst, list):
lst.append(item)
return NIL
env["for-each-indexed"] = _for_each_indexed
env["dict-set!"] = _dict_set
env["dict-has?"] = _dict_has
env["dict-get"] = _dict_get
env["append!"] = _append_mut
def _load_router_from_bootstrap(env):
"""Load router functions from the bootstrapped sx_ref.py.
The hand-written evaluator can't run router.sx faithfully because
set! inside lambda closures doesn't propagate to outer scopes
(the evaluator uses dict copies, not cells). The bootstrapped code
compiles set! to cell-based mutation, so we import from there.
"""
try:
from shared.sx.ref.sx_ref import (
split_path_segments,
parse_route_pattern,
match_route_segments,
match_route,
find_matching_route,
make_route_segment,
)
env["split-path-segments"] = split_path_segments
env["parse-route-pattern"] = parse_route_pattern
env["match-route-segments"] = match_route_segments
env["match-route"] = match_route
env["find-matching-route"] = find_matching_route
env["make-route-segment"] = make_route_segment
except ImportError:
# Fallback: eval router.sx directly (may fail on set! scoping)
eval_file("router.sx", env)
def main():
global passed, failed, test_num
args = sys.argv[1:]
# Legacy mode
if args and args[0] == "--legacy":
print("TAP version 13")
eval_file("test.sx", env)
else:
# Determine which specs to run
specs_to_run = args if args else list(SPECS.keys())
print("TAP version 13")
# Always load framework first
eval_file("test-framework.sx", env)
for spec_name in specs_to_run:
spec = SPECS.get(spec_name)
if not spec:
print(f"# SKIP unknown spec: {spec_name}")
continue
# Check platform requirements
can_run = True
for need in spec["needs"]:
if need not in env:
print(f"# SKIP {spec_name} (missing: {need})")
can_run = False
break
if not can_run:
continue
# Load prerequisite spec modules
if spec_name == "router":
_load_router_from_bootstrap(env)
print(f"# --- {spec_name} ---")
eval_file(spec["file"], env)
# Summary
print()
print(f"1..{test_num}")
print(f"# tests {passed + failed}")