Some checks failed
Test, Build, and Deploy / test-build-deploy (push) Failing after 55s
tests/hs-run-batched.js — fresh-kernel-per-batch conformance runner.
Solves the WASM kernel JIT-cache-saturation problem (compiled VmClosures
accumulate over a single process and slow tests at the tail of the run)
by spawning a child Node process per batch. Each batch starts with an
empty cache, so tests at index 1400 perform identically to tests at
index 100. Configurable batch size (HS_BATCH_SIZE, default 150) and
parallelism (HS_PARALLEL, default 1).
This is option 2 from the cache-architecture plan — the lowest-risk fix:
zero kernel changes, deterministic results, runs in the same time as the
single-process version when parallelism matches CPU count.
plans/jit-cache-architecture.md — sketches the SX-wide architectural
fix in three phases:
1. Tiered compilation — call counter on lambdas; only JIT after K
invocations. Filters out one-shot lambdas (test harness, dynamic
eval, REPLs) at the source.
2. LRU eviction — central cache with fixed budget. Predictable memory
ceiling regardless of input pattern.
3. Reset API — jit-reset!, jit-clear-cold!, jit-stats, jit-pin!
primitives for app-driven cache management.
Layer split: cache datastructure + LRU in hosts/ocaml/lib/sx_jit_cache.ml
(new), VM integration in sx_vm.ml, primitives registered in
sx_primitives.ml, declarative spec in spec/primitives.sx, and SX-level
ergonomics (with-jit-threshold, with-fresh-jit, jit-report) in lib/jit.sx.
This is host-specific to the OCaml WASM kernel but the SX API surface is
shared across all hosted languages (HS, Common Lisp, Erlang, etc.).
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
152 lines
5.5 KiB
JavaScript
Executable File
152 lines
5.5 KiB
JavaScript
Executable File
#!/usr/bin/env node
|
|
/**
|
|
* Batched HS conformance runner — option 2 (per-process kernel isolation).
|
|
*
|
|
* Each batch spawns a fresh Node process running tests/hs-run-filtered.js
|
|
* with HS_START/HS_END set, so the WASM kernel's JIT cache starts empty.
|
|
* Avoids the cumulative slowdown that hits the 1-process runner around
|
|
* test 500-700 (compiled lambdas accumulate, allocation stalls).
|
|
*
|
|
* Usage:
|
|
* node tests/hs-run-batched.js
|
|
* HS_BATCH_SIZE=100 node tests/hs-run-batched.js
|
|
* HS_PARALLEL=4 node tests/hs-run-batched.js
|
|
*/
|
|
const { spawnSync, spawn } = require('child_process');
|
|
const path = require('path');
|
|
const fs = require('fs');
|
|
|
|
const FILTERED = path.join(__dirname, 'hs-run-filtered.js');
|
|
const TOTAL = parseInt(process.env.HS_TOTAL || '1496');
|
|
const FROM = parseInt(process.env.HS_FROM || '0');
|
|
const BATCH_SIZE = parseInt(process.env.HS_BATCH_SIZE || '150');
|
|
const PARALLEL = parseInt(process.env.HS_PARALLEL || '1');
|
|
const VERBOSE = !!process.env.HS_VERBOSE;
|
|
|
|
function makeBatches() {
|
|
const batches = [];
|
|
for (let i = FROM; i < TOTAL; i += BATCH_SIZE) {
|
|
batches.push({ start: i, end: Math.min(i + BATCH_SIZE, TOTAL) });
|
|
}
|
|
return batches;
|
|
}
|
|
|
|
function runBatch({ start, end }) {
|
|
const t0 = Date.now();
|
|
const r = spawnSync('node', [FILTERED], {
|
|
env: { ...process.env, HS_START: String(start), HS_END: String(end) },
|
|
encoding: 'utf8',
|
|
timeout: 1800_000, // 30 min per batch hard cap
|
|
});
|
|
const out = (r.stdout || '') + (r.stderr || '');
|
|
const elapsed = Date.now() - t0;
|
|
return { start, end, elapsed, out, code: r.status };
|
|
}
|
|
|
|
function parseBatch(out) {
|
|
const result = { pass: 0, fail: 0, failures: [], slow: [], timeouts: [] };
|
|
const m = out.match(/Results:\s+(\d+)\/(\d+)/);
|
|
if (m) {
|
|
result.pass = parseInt(m[1]);
|
|
const total = parseInt(m[2]);
|
|
result.fail = total - result.pass;
|
|
}
|
|
// Capture each "[suite] name: error" failure line
|
|
const failSection = out.split('All failures:')[1] || '';
|
|
for (const line of failSection.split('\n')) {
|
|
const fm = line.match(/^\s*\[([^\]]+)\]\s+(.+?):\s*(.*)$/);
|
|
if (fm) result.failures.push({ suite: fm[1], name: fm[2], err: fm[3] || '(empty)' });
|
|
}
|
|
for (const line of out.split('\n')) {
|
|
const sm = line.match(/SLOW: test (\d+) took (\d+)ms \[([^\]]+)\] (.+)$/);
|
|
if (sm) result.slow.push({ idx: +sm[1], ms: +sm[2], suite: sm[3], name: sm[4] });
|
|
const tm = line.match(/TIMEOUT: test (\d+) \[([^\]]+)\] (.+)$/);
|
|
if (tm) result.timeouts.push({ idx: +tm[1], suite: tm[2], name: tm[3] });
|
|
}
|
|
return result;
|
|
}
|
|
|
|
function fmtTime(ms) {
|
|
if (ms < 1000) return `${ms}ms`;
|
|
if (ms < 60_000) return `${(ms / 1000).toFixed(1)}s`;
|
|
return `${Math.floor(ms / 60_000)}m${Math.round((ms % 60_000) / 1000)}s`;
|
|
}
|
|
|
|
async function runParallel(batches, concurrency) {
|
|
const results = new Array(batches.length);
|
|
let cursor = 0;
|
|
async function worker() {
|
|
while (cursor < batches.length) {
|
|
const i = cursor++;
|
|
results[i] = await new Promise((resolve) => {
|
|
const t0 = Date.now();
|
|
let out = '';
|
|
const child = spawn('node', [FILTERED], {
|
|
env: { ...process.env, HS_START: String(batches[i].start), HS_END: String(batches[i].end) },
|
|
});
|
|
child.stdout.on('data', d => out += d);
|
|
child.stderr.on('data', d => out += d);
|
|
child.on('exit', (code) => resolve({ ...batches[i], elapsed: Date.now() - t0, out, code }));
|
|
});
|
|
const r = parseBatch(results[i].out);
|
|
process.stderr.write(` batch ${batches[i].start}-${batches[i].end}: ${r.pass}/${r.pass + r.fail} (${fmtTime(results[i].elapsed)})\n`);
|
|
}
|
|
}
|
|
await Promise.all(Array.from({ length: concurrency }, worker));
|
|
return results;
|
|
}
|
|
|
|
(async () => {
|
|
const batches = makeBatches();
|
|
const t0 = Date.now();
|
|
process.stderr.write(`Running ${TOTAL} tests in ${batches.length} batches of ${BATCH_SIZE} (parallelism=${PARALLEL})\n`);
|
|
|
|
let results;
|
|
if (PARALLEL > 1) {
|
|
results = await runParallel(batches, PARALLEL);
|
|
} else {
|
|
results = [];
|
|
for (const b of batches) {
|
|
const r = runBatch(b);
|
|
results.push(r);
|
|
const p = parseBatch(r.out);
|
|
process.stderr.write(` batch ${b.start}-${b.end}: ${p.pass}/${p.pass + p.fail} (${fmtTime(r.elapsed)})\n`);
|
|
}
|
|
}
|
|
|
|
let totalPass = 0, totalFail = 0;
|
|
const allFailures = [];
|
|
const allTimeouts = [];
|
|
const slowest = [];
|
|
for (const r of results) {
|
|
const p = parseBatch(r.out);
|
|
totalPass += p.pass;
|
|
totalFail += p.fail;
|
|
allFailures.push(...p.failures);
|
|
allTimeouts.push(...p.timeouts);
|
|
slowest.push(...p.slow);
|
|
if (VERBOSE) process.stdout.write(r.out);
|
|
}
|
|
|
|
const totalElapsed = Date.now() - t0;
|
|
process.stdout.write(`\n=== Conformance ===\n`);
|
|
process.stdout.write(`Total: ${totalPass}/${totalPass + totalFail} (${(100 * totalPass / (totalPass + totalFail)).toFixed(2)}%)\n`);
|
|
process.stdout.write(`Wall: ${fmtTime(totalElapsed)} across ${batches.length} batches\n`);
|
|
|
|
if (allFailures.length) {
|
|
process.stdout.write(`\nFailures (${allFailures.length}):\n`);
|
|
for (const f of allFailures) process.stdout.write(` [${f.suite}] ${f.name}: ${f.err}\n`);
|
|
}
|
|
if (allTimeouts.length && allTimeouts.length !== allFailures.length) {
|
|
process.stdout.write(`\nTimeouts (${allTimeouts.length}):\n`);
|
|
for (const t of allTimeouts) process.stdout.write(` [${t.suite}] ${t.name}\n`);
|
|
}
|
|
slowest.sort((a, b) => b.ms - a.ms);
|
|
if (slowest.length) {
|
|
process.stdout.write(`\nSlowest 10 tests:\n`);
|
|
for (const s of slowest.slice(0, 10)) process.stdout.write(` ${s.ms}ms [${s.suite}] ${s.name}\n`);
|
|
}
|
|
|
|
process.exit(totalFail > 0 ? 1 : 0);
|
|
})();
|