Non-blocking batch IO for OCaml kernel + stable component hash

OCaml kernel (sx_server.ml):
- Batch IO mode for aser-slot: batchable helpers (highlight,
  component-source) return placeholders during evaluation instead
  of blocking on stdin. After aser completes, all batched requests
  are flushed to Python at once.
- Python processes them concurrently with asyncio.gather.
- Placeholders (using «IO:N» markers) are replaced with actual
  values in the result string.
- Non-batchable IO (query, action, ctx, request-arg) still uses
  blocking mode — their results drive control flow.

Python bridge (ocaml_bridge.py):
- _read_until_ok handles batched protocol: collects io-request
  lines with numeric IDs, processes on (io-done N) with gather.
- IO result cache for pure helpers — eliminates redundant calls.
- _handle_io_request strips batch ID from request format.

Component caching (jinja_bridge.py):
- Hash computed from FULL component env (all names + bodies),
  not per-page subset. Stable across all pages — browser caches
  once, no re-download on navigation between pages.
- invalidate_component_hash() called on hot-reload.

Tests: 15/15 OCaml helper tests pass (2 new batch IO tests).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-19 16:53:01 +00:00
parent d3b3b4b720
commit 96e7bbbac1
4 changed files with 423 additions and 58 deletions

View File

@@ -139,6 +139,87 @@ class TestHelperInjection(unittest.IsolatedAsyncioTestCase):
self.assertNotIn("~test/code-display", result)
class TestBatchIO(unittest.IsolatedAsyncioTestCase):
"""Test that batchable helper calls are collected and resolved concurrently."""
@classmethod
def setUpClass(cls):
_skip_if_no_binary()
async def asyncSetUp(self):
self.bridge = OcamlBridge()
await self.bridge.start()
spec_dir = os.path.join(_project_root, "spec")
web_dir = os.path.join(_project_root, "web")
for f in ["parser.sx", "render.sx"]:
path = os.path.join(spec_dir, f)
if os.path.isfile(path):
async with self.bridge._lock:
await self.bridge._send(f'(load "{_escape(path)}")')
await self.bridge._read_until_ok(ctx=None)
adapter = os.path.join(web_dir, "adapter-sx.sx")
if os.path.isfile(adapter):
async with self.bridge._lock:
await self.bridge._send(f'(load "{_escape(adapter)}")')
await self.bridge._read_until_ok(ctx=None)
async def asyncTearDown(self):
await self.bridge.stop()
async def test_batch_highlight_calls(self):
"""Multiple highlight calls in aser_slot are batched, not sequential."""
# Map highlight to json-encode (available without Quart app)
sx = '(define highlight (fn (a b) (helper "json-encode" a)))'
async with self.bridge._lock:
await self.bridge._send(f'(load-source "{_escape(sx)}")')
await self.bridge._read_until_ok(ctx=None)
comp = (
'(defcomp ~test/batch (&key)'
' (div (p (highlight "aaa" "x"))'
' (p (highlight "bbb" "x"))'
' (p (highlight "ccc" "x"))))'
)
async with self.bridge._lock:
await self.bridge._send(f'(load-source "{_escape(comp)}")')
await self.bridge._read_until_ok(ctx=None)
result = await self.bridge.aser_slot(
'(~test/batch)', ctx={"_helper_service": "sx"})
# All 3 values present — placeholders replaced
self.assertIn("aaa", result)
self.assertIn("bbb", result)
self.assertIn("ccc", result)
# No placeholder markers remaining
self.assertNotIn("\u00ab", result) # «
self.assertNotIn("\u00bb", result) # »
async def test_batch_faster_than_sequential(self):
"""Batched IO should be faster than N sequential round-trips."""
sx = '(define highlight (fn (a b) (helper "json-encode" a)))'
async with self.bridge._lock:
await self.bridge._send(f'(load-source "{_escape(sx)}")')
await self.bridge._read_until_ok(ctx=None)
calls = " ".join(f'(p (highlight "v{i}" "x"))' for i in range(10))
comp = f'(defcomp ~test/perf (&key) (div {calls}))'
async with self.bridge._lock:
await self.bridge._send(f'(load-source "{_escape(comp)}")')
await self.bridge._read_until_ok(ctx=None)
t0 = time.monotonic()
result = await self.bridge.aser_slot(
'(~test/perf)', ctx={"_helper_service": "sx"})
elapsed = time.monotonic() - t0
# All 10 values present
for i in range(10):
self.assertIn(f"v{i}", result)
# Should complete in under 2 seconds (batched, not 10 × round-trip)
self.assertLess(elapsed, 2.0,
f"10 batched IO calls took {elapsed:.1f}s (target: <2s)")
class TestHelperIOPerformance(unittest.IsolatedAsyncioTestCase):
"""Test that helper IO round-trips are fast enough for production."""