Add generic streaming interpreter with configurable sources/audio

- Add stream_sexp_generic.py: fully generic sexp interpreter
- Add streaming primitives for video sources and audio analysis
- Add config system for external sources and audio files
- Add templates for reusable scans and macros
- Fix video/audio stream mapping in file output
- Add dynamic source cycling based on sources array length
- Remove old Python effect files (migrated to sexp)
- Update sexp effects to use namespaced primitives

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
gilesb
2026-02-02 17:48:04 +00:00
parent d241e2a663
commit 95fcc67dcc
179 changed files with 3935 additions and 8226 deletions

View File

@@ -167,6 +167,8 @@ class FileOutput(Output):
# Add audio input if provided
if audio_source:
cmd.extend(["-i", str(audio_source)])
# Explicitly map: video from input 0 (rawvideo), audio from input 1
cmd.extend(["-map", "0:v", "-map", "1:a"])
cmd.extend([
"-c:v", codec,
@@ -181,10 +183,12 @@ class FileOutput(Output):
cmd.append(str(self.path))
import sys
print(f"FileOutput cmd: {' '.join(cmd)}", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL,
stderr=None, # Show errors for debugging
)
def write(self, frame: np.ndarray, t: float):
@@ -257,3 +261,109 @@ class NullOutput(Output):
@property
def is_open(self) -> bool:
return self._is_open
class PipeOutput(Output):
"""
Pipe frames directly to mpv.
Launches mpv with rawvideo demuxer and writes frames to stdin.
"""
def __init__(self, size: Tuple[int, int], fps: float = 30, audio_source: str = None):
self.size = size
self.fps = fps
self.audio_source = audio_source
self._is_open = True
self._process = None
self._audio_process = None
self._started = False
def _start(self):
"""Start mpv and audio on first frame."""
if self._started:
return
self._started = True
import sys
w, h = self.size
# Start mpv
cmd = [
"mpv", "--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={self.fps}",
"--title=Streaming",
"-"
]
print(f"Starting mpv: {w}x{h} @ {self.fps}fps", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
# Start audio
if self.audio_source:
audio_cmd = [
"ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
str(self.audio_source)
]
print(f"Starting audio: {self.audio_source}", file=sys.stderr)
self._audio_process = subprocess.Popen(
audio_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def write(self, frame: np.ndarray, t: float):
"""Write frame to mpv."""
if not self._is_open:
return
self._start()
# Check mpv still running
if self._process.poll() is not None:
self._is_open = False
return
# Resize if needed
if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]:
import cv2
frame = cv2.resize(frame, self.size)
# Ensure correct format
if frame.dtype != np.uint8:
frame = np.clip(frame, 0, 255).astype(np.uint8)
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
try:
self._process.stdin.write(frame.tobytes())
self._process.stdin.flush()
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close mpv and audio."""
if self._process:
try:
self._process.stdin.close()
except:
pass
self._process.terminate()
self._process.wait()
if self._audio_process:
self._audio_process.terminate()
self._audio_process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
if self._process and self._process.poll() is not None:
self._is_open = False
return self._is_open

846
streaming/pipeline.py Normal file
View File

@@ -0,0 +1,846 @@
"""
Streaming pipeline executor.
Directly executes compiled sexp recipes frame-by-frame.
No adapter layer - frames and analysis flow through the DAG.
"""
import sys
import time
import numpy as np
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
from .sources import VideoSource
from .audio import StreamingAudioAnalyzer
from .output import DisplayOutput, FileOutput
from .sexp_interp import SexpInterpreter
@dataclass
class FrameContext:
"""Context passed through the pipeline for each frame."""
t: float # Current time
energy: float = 0.0
is_beat: bool = False
beat_count: int = 0
analysis: Dict[str, Any] = field(default_factory=dict)
class StreamingPipeline:
"""
Executes a compiled sexp recipe as a streaming pipeline.
Frames flow through the DAG directly - no adapter needed.
Each node is evaluated lazily when its output is requested.
"""
def __init__(self, compiled_recipe, recipe_dir: Path = None, fps: float = 30, seed: int = 42,
output_size: tuple = None):
self.recipe = compiled_recipe
self.recipe_dir = recipe_dir or Path(".")
self.fps = fps
self.seed = seed
# Build node lookup
self.nodes = {n['id']: n for n in compiled_recipe.nodes}
# Runtime state
self.sources: Dict[str, VideoSource] = {}
self.audio_analyzer: Optional[StreamingAudioAnalyzer] = None
self.audio_source_path: Optional[str] = None
# Sexp interpreter for expressions
self.interp = SexpInterpreter()
# Scan state (node_id -> current value)
self.scan_state: Dict[str, Any] = {}
self.scan_emit: Dict[str, Any] = {}
# SLICE_ON state
self.slice_on_acc: Dict[str, Any] = {}
self.slice_on_result: Dict[str, Any] = {}
# Frame cache for current timestep (cleared each frame)
self._frame_cache: Dict[str, np.ndarray] = {}
# Context for current frame
self.ctx = FrameContext(t=0.0)
# Output size (w, h) - set after sources are initialized
self._output_size = output_size
# Initialize
self._init_sources()
self._init_scans()
self._init_slice_on()
# Set output size from first source if not specified
if self._output_size is None and self.sources:
first_source = next(iter(self.sources.values()))
self._output_size = first_source._size
def _init_sources(self):
"""Initialize video and audio sources."""
for node in self.recipe.nodes:
if node.get('type') == 'SOURCE':
config = node.get('config', {})
path = config.get('path')
if path:
full_path = (self.recipe_dir / path).resolve()
suffix = full_path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
if not full_path.exists():
print(f"Warning: video not found: {full_path}", file=sys.stderr)
continue
self.sources[node['id']] = VideoSource(
str(full_path),
target_fps=self.fps
)
elif suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
if not full_path.exists():
print(f"Warning: audio not found: {full_path}", file=sys.stderr)
continue
self.audio_source_path = str(full_path)
self.audio_analyzer = StreamingAudioAnalyzer(str(full_path))
def _init_scans(self):
"""Initialize scan nodes with their initial state."""
import random
seed_offset = 0
for node in self.recipe.nodes:
if node.get('type') == 'SCAN':
config = node.get('config', {})
# Create RNG for this scan
scan_seed = config.get('seed', self.seed + seed_offset)
rng = random.Random(scan_seed)
seed_offset += 1
# Evaluate initial value
init_expr = config.get('init', 0)
init_value = self.interp.eval(init_expr, {})
self.scan_state[node['id']] = {
'value': init_value,
'rng': rng,
'config': config,
}
# Compute initial emit
self._update_scan_emit(node['id'])
def _update_scan_emit(self, node_id: str):
"""Update the emit value for a scan."""
state = self.scan_state[node_id]
config = state['config']
emit_expr = config.get('emit_expr', config.get('emit', None))
if emit_expr is None:
# No emit expression - emit the value directly
self.scan_emit[node_id] = state['value']
return
# Build environment from state
env = {}
if isinstance(state['value'], dict):
env.update(state['value'])
else:
env['acc'] = state['value']
env['beat_count'] = self.ctx.beat_count
env['time'] = self.ctx.t
# Set RNG for interpreter
self.interp.rng = state['rng']
self.scan_emit[node_id] = self.interp.eval(emit_expr, env)
def _step_scan(self, node_id: str):
"""Step a scan forward on beat."""
state = self.scan_state[node_id]
config = state['config']
step_expr = config.get('step_expr', config.get('step', None))
if step_expr is None:
return
# Build environment
env = {}
if isinstance(state['value'], dict):
env.update(state['value'])
else:
env['acc'] = state['value']
env['beat_count'] = self.ctx.beat_count
env['time'] = self.ctx.t
# Set RNG
self.interp.rng = state['rng']
# Evaluate step
new_value = self.interp.eval(step_expr, env)
state['value'] = new_value
# Update emit
self._update_scan_emit(node_id)
def _init_slice_on(self):
"""Initialize SLICE_ON nodes."""
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
config = node.get('config', {})
init = config.get('init', {})
self.slice_on_acc[node['id']] = dict(init)
# Evaluate initial state
self._eval_slice_on(node['id'])
def _eval_slice_on(self, node_id: str):
"""Evaluate a SLICE_ON node's Lambda."""
node = self.nodes[node_id]
config = node.get('config', {})
fn = config.get('fn')
videos = config.get('videos', [])
if not fn:
return
acc = self.slice_on_acc[node_id]
n_videos = len(videos)
# Set up environment
self.interp.globals['videos'] = list(range(n_videos))
try:
from .sexp_interp import eval_slice_on_lambda
result = eval_slice_on_lambda(
fn, acc, self.ctx.beat_count, 0, 1,
list(range(n_videos)), self.interp
)
self.slice_on_result[node_id] = result
# Update accumulator
if 'acc' in result:
self.slice_on_acc[node_id] = result['acc']
except Exception as e:
print(f"SLICE_ON eval error: {e}", file=sys.stderr)
def _on_beat(self):
"""Called when a beat is detected."""
self.ctx.beat_count += 1
# Step all scans
for node_id in self.scan_state:
self._step_scan(node_id)
# Step all SLICE_ON nodes
for node_id in self.slice_on_acc:
self._eval_slice_on(node_id)
def _get_frame(self, node_id: str) -> Optional[np.ndarray]:
"""
Get the output frame for a node at current time.
Recursively evaluates inputs as needed.
Results are cached for the current timestep.
"""
if node_id in self._frame_cache:
return self._frame_cache[node_id]
node = self.nodes.get(node_id)
if not node:
return None
node_type = node.get('type')
if node_type == 'SOURCE':
frame = self._eval_source(node)
elif node_type == 'SEGMENT':
frame = self._eval_segment(node)
elif node_type == 'EFFECT':
frame = self._eval_effect(node)
elif node_type == 'SLICE_ON':
frame = self._eval_slice_on_frame(node)
else:
# Unknown node type - try to pass through input
inputs = node.get('inputs', [])
frame = self._get_frame(inputs[0]) if inputs else None
self._frame_cache[node_id] = frame
return frame
def _eval_source(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SOURCE node."""
source = self.sources.get(node['id'])
if source:
return source.read_frame(self.ctx.t)
return None
def _eval_segment(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SEGMENT node (time segment of source)."""
inputs = node.get('inputs', [])
if not inputs:
return None
config = node.get('config', {})
start = config.get('start', 0)
duration = config.get('duration')
# Resolve any bindings
if isinstance(start, dict):
start = self._resolve_binding(start) if start.get('_binding') else 0
if isinstance(duration, dict):
duration = self._resolve_binding(duration) if duration.get('_binding') else None
# Adjust time for segment
t_local = self.ctx.t + (start if isinstance(start, (int, float)) else 0)
if duration and isinstance(duration, (int, float)):
t_local = t_local % duration # Loop within segment
# Get source frame at adjusted time
source_id = inputs[0]
source = self.sources.get(source_id)
if source:
return source.read_frame(t_local)
return self._get_frame(source_id)
def _eval_effect(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate an EFFECT node."""
import cv2
inputs = node.get('inputs', [])
config = node.get('config', {})
effect_name = config.get('effect')
# Get input frame(s)
input_frames = [self._get_frame(inp) for inp in inputs]
input_frames = [f for f in input_frames if f is not None]
if not input_frames:
return None
frame = input_frames[0]
# Resolve bindings in config
params = self._resolve_config(config)
# Apply effect based on name
if effect_name == 'rotate':
angle = params.get('angle', 0)
if abs(angle) > 0.5:
h, w = frame.shape[:2]
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
frame = cv2.warpAffine(frame, matrix, (w, h))
elif effect_name == 'zoom':
amount = params.get('amount', 1.0)
if abs(amount - 1.0) > 0.01:
frame = self._apply_zoom(frame, amount)
elif effect_name == 'invert':
amount = params.get('amount', 0)
if amount > 0.01:
inverted = 255 - frame
frame = cv2.addWeighted(frame, 1 - amount, inverted, amount, 0)
elif effect_name == 'hue_shift':
degrees = params.get('degrees', 0)
if abs(degrees) > 1:
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + int(degrees / 2)) % 180
frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
elif effect_name == 'blend':
if len(input_frames) >= 2:
opacity = params.get('opacity', 0.5)
frame = cv2.addWeighted(input_frames[0], 1 - opacity,
input_frames[1], opacity, 0)
elif effect_name == 'blend_multi':
weights = params.get('weights', [])
if len(input_frames) > 1 and weights:
h, w = input_frames[0].shape[:2]
result = np.zeros((h, w, 3), dtype=np.float32)
for f, wt in zip(input_frames, weights):
if f is not None and wt > 0.001:
if f.shape[:2] != (h, w):
f = cv2.resize(f, (w, h))
result += f.astype(np.float32) * wt
frame = np.clip(result, 0, 255).astype(np.uint8)
elif effect_name == 'ripple':
amp = params.get('amplitude', 0)
if amp > 1:
frame = self._apply_ripple(frame, amp,
params.get('center_x', 0.5),
params.get('center_y', 0.5),
params.get('frequency', 8),
params.get('decay', 2),
params.get('speed', 5))
return frame
def _eval_slice_on_frame(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SLICE_ON node - returns composited frame."""
import cv2
config = node.get('config', {})
video_ids = config.get('videos', [])
result = self.slice_on_result.get(node['id'], {})
if not result:
# No result yet - return first video
if video_ids:
return self._get_frame(video_ids[0])
return None
# Get layers and compose info
layers = result.get('layers', [])
compose = result.get('compose', {})
weights = compose.get('weights', [])
if not layers or not weights:
if video_ids:
return self._get_frame(video_ids[0])
return None
# Get frames for each layer
frames = []
for i, layer in enumerate(layers):
video_idx = layer.get('video', i)
if video_idx < len(video_ids):
frame = self._get_frame(video_ids[video_idx])
# Apply layer effects (zoom)
effects = layer.get('effects', [])
for eff in effects:
eff_name = eff.get('effect')
if hasattr(eff_name, 'name'):
eff_name = eff_name.name
if eff_name == 'zoom':
zoom_amt = eff.get('amount', 1.0)
if frame is not None:
frame = self._apply_zoom(frame, zoom_amt)
frames.append(frame)
else:
frames.append(None)
# Composite with weights - use consistent output size
if self._output_size:
w, h = self._output_size
else:
# Fallback to first non-None frame size
for f in frames:
if f is not None:
h, w = f.shape[:2]
break
else:
return None
output = np.zeros((h, w, 3), dtype=np.float32)
for frame, weight in zip(frames, weights):
if frame is None or weight < 0.001:
continue
# Resize to output size
if frame.shape[1] != w or frame.shape[0] != h:
frame = cv2.resize(frame, (w, h))
output += frame.astype(np.float32) * weight
# Normalize weights
total_weight = sum(wt for wt in weights if wt > 0.001)
if total_weight > 0 and abs(total_weight - 1.0) > 0.01:
output /= total_weight
return np.clip(output, 0, 255).astype(np.uint8)
def _resolve_config(self, config: dict) -> dict:
"""Resolve bindings in effect config to actual values."""
resolved = {}
for key, value in config.items():
if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry',
'analysis_refs', 'inputs', 'cid'):
continue
if isinstance(value, dict) and value.get('_binding'):
resolved[key] = self._resolve_binding(value)
elif isinstance(value, dict) and value.get('_expr'):
resolved[key] = self._resolve_expr(value)
else:
resolved[key] = value
return resolved
def _resolve_binding(self, binding: dict) -> Any:
"""Resolve a binding to its current value."""
source_id = binding.get('source')
feature = binding.get('feature', 'values')
range_map = binding.get('range')
# Get raw value from scan or analysis
if source_id in self.scan_emit:
value = self.scan_emit[source_id]
elif source_id in self.ctx.analysis:
data = self.ctx.analysis[source_id]
value = data.get(feature, data.get('values', [0]))[0] if isinstance(data, dict) else data
else:
# Fallback to energy
value = self.ctx.energy
# Extract feature from dict
if isinstance(value, dict) and feature in value:
value = value[feature]
# Apply range mapping
if range_map and isinstance(value, (int, float)):
lo, hi = range_map
value = lo + value * (hi - lo)
return value
def _resolve_expr(self, expr: dict) -> Any:
"""Resolve a compiled expression."""
env = {
'energy': self.ctx.energy,
'beat_count': self.ctx.beat_count,
't': self.ctx.t,
}
# Add scan values
for scan_id, value in self.scan_emit.items():
# Use short form if available
env[scan_id] = value
# Extract the actual expression from _expr wrapper
actual_expr = expr.get('_expr', expr)
return self.interp.eval(actual_expr, env)
def _apply_zoom(self, frame: np.ndarray, amount: float) -> np.ndarray:
"""Apply zoom to frame."""
import cv2
h, w = frame.shape[:2]
if amount > 1.01:
# Zoom in: crop center
new_w, new_h = int(w / amount), int(h / amount)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = frame[y1:y1+new_h, x1:x1+new_w]
return cv2.resize(cropped, (w, h))
elif amount < 0.99:
# Zoom out: shrink and center
scaled_w, scaled_h = int(w * amount), int(h * amount)
if scaled_w > 0 and scaled_h > 0:
shrunk = cv2.resize(frame, (scaled_w, scaled_h))
canvas = np.zeros((h, w, 3), dtype=np.uint8)
x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2
canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk
return canvas
return frame
def _apply_ripple(self, frame: np.ndarray, amplitude: float,
cx: float, cy: float, frequency: float,
decay: float, speed: float) -> np.ndarray:
"""Apply ripple effect."""
import cv2
h, w = frame.shape[:2]
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize to center
center_x, center_y = w * cx, h * cy
dx = x_coords - center_x
dy = y_coords - center_y
dist = np.sqrt(dx**2 + dy**2)
# Ripple displacement
phase = self.ctx.t * speed
ripple = amplitude * np.sin(dist / frequency - phase) * np.exp(-dist * decay / max(w, h))
# Displace coordinates
angle = np.arctan2(dy, dx)
map_x = (x_coords + ripple * np.cos(angle)).astype(np.float32)
map_y = (y_coords + ripple * np.sin(angle)).astype(np.float32)
return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
def _find_output_node(self) -> Optional[str]:
"""Find the final output node (MUX or last EFFECT)."""
# Look for MUX node
for node in self.recipe.nodes:
if node.get('type') == 'MUX':
return node['id']
# Otherwise find last EFFECT after SLICE_ON
last_effect = None
found_slice_on = False
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
found_slice_on = True
elif node.get('type') == 'EFFECT' and found_slice_on:
last_effect = node['id']
return last_effect
def render_frame(self, t: float) -> Optional[np.ndarray]:
"""Render a single frame at time t."""
# Clear frame cache
self._frame_cache.clear()
# Update context
self.ctx.t = t
# Update audio analysis
if self.audio_analyzer:
self.audio_analyzer.set_time(t)
energy = self.audio_analyzer.get_energy()
is_beat = self.audio_analyzer.get_beat()
# Beat edge detection
was_beat = self.ctx.is_beat
self.ctx.energy = energy
self.ctx.is_beat = is_beat
if is_beat and not was_beat:
self._on_beat()
# Store in analysis dict
self.ctx.analysis['live_energy'] = {'values': [energy]}
self.ctx.analysis['live_beat'] = {'values': [1.0 if is_beat else 0.0]}
# Find output node and render
output_node = self._find_output_node()
if output_node:
frame = self._get_frame(output_node)
# Normalize to output size
if frame is not None and self._output_size:
w, h = self._output_size
if frame.shape[1] != w or frame.shape[0] != h:
import cv2
frame = cv2.resize(frame, (w, h))
return frame
return None
def run(self, output: str = "preview", duration: float = None):
"""
Run the pipeline.
Args:
output: "preview", filename, or Output object
duration: Duration in seconds (default: audio duration or 60s)
"""
# Determine duration
if duration is None:
if self.audio_analyzer:
duration = self.audio_analyzer.duration
else:
duration = 60.0
# Create output
if output == "preview":
# Get frame size from first source
first_source = next(iter(self.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
out = DisplayOutput(size=(w, h), fps=self.fps, audio_source=self.audio_source_path)
elif isinstance(output, str):
first_source = next(iter(self.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
out = FileOutput(output, size=(w, h), fps=self.fps, audio_source=self.audio_source_path)
else:
out = output
frame_time = 1.0 / self.fps
n_frames = int(duration * self.fps)
print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr)
print(f"Duration: {duration:.1f}s, {n_frames} frames @ {self.fps}fps", file=sys.stderr)
start_time = time.time()
frame_count = 0
try:
for frame_num in range(n_frames):
t = frame_num * frame_time
frame = self.render_frame(t)
if frame is not None:
out.write(frame, t)
frame_count += 1
# Progress
if frame_num % 50 == 0:
elapsed = time.time() - start_time
fps = frame_count / elapsed if elapsed > 0 else 0
pct = 100 * frame_num / n_frames
print(f"\r{pct:5.1f}% | {fps:5.1f} fps | frame {frame_num}/{n_frames}",
end="", file=sys.stderr)
except KeyboardInterrupt:
print("\nInterrupted", file=sys.stderr)
finally:
out.close()
for src in self.sources.values():
src.close()
elapsed = time.time() - start_time
avg_fps = frame_count / elapsed if elapsed > 0 else 0
print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)",
file=sys.stderr)
def run_pipeline(recipe_path: str, output: str = "preview",
duration: float = None, fps: float = None):
"""
Run a recipe through the streaming pipeline.
No adapter layer - directly executes the compiled recipe.
"""
from pathlib import Path
# Add artdag to path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.compiler import compile_string
recipe_path = Path(recipe_path)
recipe_text = recipe_path.read_text()
compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent)
pipeline = StreamingPipeline(
compiled,
recipe_dir=recipe_path.parent,
fps=fps or compiled.encoding.get('fps', 30),
)
pipeline.run(output=output, duration=duration)
def run_pipeline_piped(recipe_path: str, duration: float = None, fps: float = None):
"""
Run pipeline and pipe directly to mpv with audio.
"""
import subprocess
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.compiler import compile_string
recipe_path = Path(recipe_path)
recipe_text = recipe_path.read_text()
compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent)
pipeline = StreamingPipeline(
compiled,
recipe_dir=recipe_path.parent,
fps=fps or compiled.encoding.get('fps', 30),
)
# Get frame info
first_source = next(iter(pipeline.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
# Determine duration
if duration is None:
if pipeline.audio_analyzer:
duration = pipeline.audio_analyzer.duration
else:
duration = 60.0
actual_fps = fps or compiled.encoding.get('fps', 30)
n_frames = int(duration * actual_fps)
frame_time = 1.0 / actual_fps
print(f"Streaming {n_frames} frames @ {actual_fps}fps to mpv", file=sys.stderr)
# Start mpv
mpv_cmd = [
"mpv", "--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={actual_fps}",
"--title=Streaming Pipeline",
"-"
]
mpv = subprocess.Popen(mpv_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL)
# Start audio if available
audio_proc = None
if pipeline.audio_source_path:
audio_cmd = ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
pipeline.audio_source_path]
audio_proc = subprocess.Popen(audio_cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
try:
import cv2
for frame_num in range(n_frames):
if mpv.poll() is not None:
break # mpv closed
t = frame_num * frame_time
frame = pipeline.render_frame(t)
if frame is not None:
# Ensure consistent frame size
if frame.shape[1] != w or frame.shape[0] != h:
frame = cv2.resize(frame, (w, h))
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
try:
mpv.stdin.write(frame.tobytes())
mpv.stdin.flush()
except BrokenPipeError:
break
except KeyboardInterrupt:
pass
finally:
if mpv.stdin:
mpv.stdin.close()
mpv.terminate()
if audio_proc:
audio_proc.terminate()
for src in pipeline.sources.values():
src.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run sexp recipe through streaming pipeline")
parser.add_argument("recipe", help="Path to .sexp recipe file")
parser.add_argument("-o", "--output", default="pipe",
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
parser.add_argument("-d", "--duration", type=float, default=None,
help="Duration in seconds (default: audio duration)")
parser.add_argument("--fps", type=float, default=None,
help="Frame rate (default: from recipe)")
args = parser.parse_args()
if args.output == "pipe":
run_pipeline_piped(args.recipe, duration=args.duration, fps=args.fps)
else:
run_pipeline(args.recipe, output=args.output, duration=args.duration, fps=args.fps)

View File

@@ -412,3 +412,59 @@ def run_recipe(
"""
adapter = RecipeAdapter(recipe_path, params=params)
adapter.run(output=output, duration=duration, fps=fps)
def run_recipe_piped(
recipe_path: str,
duration: float = None,
params: Dict = None,
fps: float = None,
):
"""
Run recipe and pipe directly to mpv.
"""
from .output import PipeOutput
adapter = RecipeAdapter(recipe_path, params=params)
compositor = adapter.build_compositor(analysis_data={}, fps=fps)
# Get frame size
if compositor.sources:
first_source = compositor.sources[0]
w, h = first_source._size
else:
w, h = 720, 720
actual_fps = fps or adapter.compiled.encoding.get('fps', 30)
# Create pipe output
pipe_out = PipeOutput(
size=(w, h),
fps=actual_fps,
audio_source=compositor._audio_source
)
# Create executor
from .sexp_executor import SexpStreamingExecutor
executor = SexpStreamingExecutor(adapter.compiled, seed=42)
# Run with pipe output
compositor.run(output=pipe_out, duration=duration, recipe_executor=executor)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor")
parser.add_argument("recipe", help="Path to .sexp recipe file")
parser.add_argument("-o", "--output", default="pipe",
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
parser.add_argument("-d", "--duration", type=float, default=None,
help="Duration in seconds (default: audio duration)")
parser.add_argument("--fps", type=float, default=None,
help="Frame rate (default: from recipe)")
args = parser.parse_args()
if args.output == "pipe":
run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps)
else:
run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps)

1081
streaming/stream_sexp.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,859 @@
"""
Fully Generic Streaming S-expression Interpreter.
The interpreter knows NOTHING about video, audio, or any domain.
All domain logic comes from primitives loaded via (require-primitives ...).
Built-in forms:
- Control: if, cond, let, let*, lambda, ->
- Arithmetic: +, -, *, /, mod, map-range
- Comparison: <, >, =, <=, >=, and, or, not
- Data: dict, get, list, nth, len, quote
- Random: rand, rand-int, rand-range
- Scan: bind (access scan state)
Everything else comes from primitives or effects.
Context (ctx) is passed explicitly to frame evaluation:
- ctx.t: current time
- ctx.frame-num: current frame number
- ctx.fps: frames per second
"""
import sys
import time
import json
import hashlib
import math
import numpy as np
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, List, Any, Optional, Tuple
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.parser import parse, parse_all, Symbol, Keyword
@dataclass
class Context:
"""Runtime context passed to frame evaluation."""
t: float = 0.0
frame_num: int = 0
fps: float = 30.0
class StreamInterpreter:
"""
Fully generic streaming sexp interpreter.
No domain-specific knowledge - just evaluates expressions
and calls primitives.
"""
def __init__(self, sexp_path: str):
self.sexp_path = Path(sexp_path)
self.sexp_dir = self.sexp_path.parent
text = self.sexp_path.read_text()
self.ast = parse(text)
self.config = self._parse_config()
# Global environment for def bindings
self.globals: Dict[str, Any] = {}
# Scans
self.scans: Dict[str, dict] = {}
# Audio playback path (for syncing output)
self.audio_playback: Optional[str] = None
# Registries for external definitions
self.primitives: Dict[str, Any] = {}
self.effects: Dict[str, dict] = {}
self.macros: Dict[str, dict] = {}
self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs"
self.frame_pipeline = None
# External config files (set before run())
self.sources_config: Optional[Path] = None
self.audio_config: Optional[Path] = None
import random
self.rng = random.Random(self.config.get('seed', 42))
def _load_config_file(self, config_path):
"""Load a config file and process its definitions."""
config_path = Path(config_path) # Accept str or Path
if not config_path.exists():
print(f"Warning: config file not found: {config_path}", file=sys.stderr)
return
text = config_path.read_text()
ast = parse_all(text)
for form in ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'def':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
value = self._eval(form[2], self.globals)
self.globals[name] = value
print(f"Config: {name}", file=sys.stderr)
elif cmd == 'audio-playback':
# Path relative to working directory (consistent with other paths)
path = str(form[1]).strip('"')
self.audio_playback = str(Path(path).resolve())
print(f"Audio playback: {self.audio_playback}", file=sys.stderr)
def _parse_config(self) -> dict:
"""Parse config from (stream name :key val ...)."""
config = {'fps': 30, 'seed': 42, 'width': 720, 'height': 720}
if not self.ast or not isinstance(self.ast[0], Symbol):
return config
if self.ast[0].name != 'stream':
return config
i = 2
while i < len(self.ast):
if isinstance(self.ast[i], Keyword):
config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None
i += 2
elif isinstance(self.ast[i], list):
break
else:
i += 1
return config
def _load_primitives(self, lib_name: str):
"""Load primitives from a Python library file."""
import importlib.util
lib_paths = [
self.primitive_lib_dir / f"{lib_name}.py",
self.sexp_dir / "primitive_libs" / f"{lib_name}.py",
self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py",
]
lib_path = None
for p in lib_paths:
if p.exists():
lib_path = p
break
if not lib_path:
print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr)
return
spec = importlib.util.spec_from_file_location(lib_name, lib_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
count = 0
for name in dir(module):
if name.startswith('prim_'):
func = getattr(module, name)
prim_name = name[5:]
dash_name = prim_name.replace('_', '-')
# Register ONLY with namespace (geometry:ripple-displace)
self.primitives[f"{lib_name}:{dash_name}"] = func
count += 1
if hasattr(module, 'PRIMITIVES'):
prims = getattr(module, 'PRIMITIVES')
if isinstance(prims, dict):
for name, func in prims.items():
# Register ONLY with namespace
dash_name = name.replace('_', '-')
self.primitives[f"{lib_name}:{dash_name}"] = func
count += 1
print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr)
def _load_effect(self, effect_path: Path):
"""Load and register an effect from a .sexp file."""
if not effect_path.exists():
print(f"Warning: effect file not found: {effect_path}", file=sys.stderr)
return
text = effect_path.read_text()
ast = parse_all(text)
for form in ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'define-effect':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = {}
body = None
i = 2
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'params' and i + 1 < len(form):
for pdef in form[i + 1]:
if isinstance(pdef, list) and pdef:
pname = pdef[0].name if isinstance(pdef[0], Symbol) else str(pdef[0])
pinfo = {'default': 0}
j = 1
while j < len(pdef):
if isinstance(pdef[j], Keyword) and j + 1 < len(pdef):
pinfo[pdef[j].name] = pdef[j + 1]
j += 2
else:
j += 1
params[pname] = pinfo
i += 2
else:
body = form[i]
i += 1
self.effects[name] = {'params': params, 'body': body}
print(f"Effect: {name}", file=sys.stderr)
elif cmd == 'defmacro':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]]
body = form[3]
self.macros[name] = {'params': params, 'body': body}
elif cmd == 'effect':
# Handle (effect name :path "...") in included files - recursive
i = 2
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
# Resolve relative to the file being loaded
full = (effect_path.parent / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'include':
# Handle (include :path "...") in included files - recursive
i = 1
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (effect_path.parent / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'scan':
# Handle scans from included files
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
trigger_expr = form[2]
init_val, step_expr = {}, None
i = 3
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'init' and i + 1 < len(form):
init_val = self._eval(form[i + 1], self.globals)
elif form[i].name == 'step' and i + 1 < len(form):
step_expr = form[i + 1]
i += 2
else:
i += 1
self.scans[name] = {
'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val},
'init': init_val,
'step': step_expr,
'trigger': trigger_expr,
}
print(f"Scan: {name}", file=sys.stderr)
def _init(self):
"""Initialize from sexp - load primitives, effects, defs, scans."""
# Load external config files first (they can override recipe definitions)
if self.sources_config:
self._load_config_file(self.sources_config)
if self.audio_config:
self._load_config_file(self.audio_config)
for form in self.ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'effect':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
i = 2
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (self.sexp_dir / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'include':
i = 1
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (self.sexp_dir / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'audio-playback':
# (audio-playback "path") - set audio file for playback sync
# Skip if already set by config file
if self.audio_playback is None:
path = str(form[1]).strip('"')
self.audio_playback = str((self.sexp_dir / path).resolve())
print(f"Audio playback: {self.audio_playback}", file=sys.stderr)
elif cmd == 'def':
# (def name expr) - evaluate and store in globals
# Skip if already defined by config file
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
if name in self.globals:
print(f"Def: {name} (from config, skipped)", file=sys.stderr)
continue
value = self._eval(form[2], self.globals)
self.globals[name] = value
print(f"Def: {name}", file=sys.stderr)
elif cmd == 'defmacro':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]]
body = form[3]
self.macros[name] = {'params': params, 'body': body}
elif cmd == 'scan':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
trigger_expr = form[2]
init_val, step_expr = {}, None
i = 3
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'init' and i + 1 < len(form):
init_val = self._eval(form[i + 1], self.globals)
elif form[i].name == 'step' and i + 1 < len(form):
step_expr = form[i + 1]
i += 2
else:
i += 1
self.scans[name] = {
'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val},
'init': init_val,
'step': step_expr,
'trigger': trigger_expr,
}
print(f"Scan: {name}", file=sys.stderr)
elif cmd == 'frame':
self.frame_pipeline = form[1] if len(form) > 1 else None
def _eval(self, expr, env: dict) -> Any:
"""Evaluate an expression."""
# Primitives
if isinstance(expr, (int, float)):
return expr
if isinstance(expr, str):
return expr
if isinstance(expr, bool):
return expr
if isinstance(expr, Symbol):
name = expr.name
# Built-in constants
if name == 'pi':
return math.pi
if name == 'true':
return True
if name == 'false':
return False
if name == 'nil':
return None
# Environment lookup
if name in env:
return env[name]
# Global lookup
if name in self.globals:
return self.globals[name]
# Scan state lookup
if name in self.scans:
return self.scans[name]['state']
raise NameError(f"Undefined variable: {name}")
if isinstance(expr, Keyword):
return expr.name
if not isinstance(expr, list) or not expr:
return expr
# Dict literal {:key val ...}
if isinstance(expr[0], Keyword):
result = {}
i = 0
while i < len(expr):
if isinstance(expr[i], Keyword):
result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None
i += 2
else:
i += 1
return result
head = expr[0]
if not isinstance(head, Symbol):
return [self._eval(e, env) for e in expr]
op = head.name
args = expr[1:]
# Check for closure call
if op in env:
val = env[op]
if isinstance(val, dict) and val.get('_type') == 'closure':
closure = val
closure_env = dict(closure['env'])
for i, pname in enumerate(closure['params']):
closure_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(closure['body'], closure_env)
if op in self.globals:
val = self.globals[op]
if isinstance(val, dict) and val.get('_type') == 'closure':
closure = val
closure_env = dict(closure['env'])
for i, pname in enumerate(closure['params']):
closure_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(closure['body'], closure_env)
# Threading macro
if op == '->':
result = self._eval(args[0], env)
for form in args[1:]:
if isinstance(form, list) and form:
new_form = [form[0], result] + form[1:]
result = self._eval(new_form, env)
else:
result = self._eval([form, result], env)
return result
# === Binding ===
if op == 'bind':
scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0])
if scan_name in self.scans:
state = self.scans[scan_name]['state']
if len(args) > 1:
key = args[1].name if isinstance(args[1], Keyword) else str(args[1])
return state.get(key, 0)
return state
return 0
# === Arithmetic ===
if op == '+':
return sum(self._eval(a, env) for a in args)
if op == '-':
vals = [self._eval(a, env) for a in args]
return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0]
if op == '*':
result = 1
for a in args:
result *= self._eval(a, env)
return result
if op == '/':
vals = [self._eval(a, env) for a in args]
return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0
if op == 'mod':
vals = [self._eval(a, env) for a in args]
return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0
# === Comparison ===
if op == '<':
return self._eval(args[0], env) < self._eval(args[1], env)
if op == '>':
return self._eval(args[0], env) > self._eval(args[1], env)
if op == '=':
return self._eval(args[0], env) == self._eval(args[1], env)
if op == '<=':
return self._eval(args[0], env) <= self._eval(args[1], env)
if op == '>=':
return self._eval(args[0], env) >= self._eval(args[1], env)
if op == 'and':
for arg in args:
if not self._eval(arg, env):
return False
return True
if op == 'or':
result = False
for arg in args:
result = self._eval(arg, env)
if result:
return result
return result
if op == 'not':
return not self._eval(args[0], env)
# === Logic ===
if op == 'if':
cond = self._eval(args[0], env)
if cond:
return self._eval(args[1], env)
return self._eval(args[2], env) if len(args) > 2 else None
if op == 'cond':
i = 0
while i < len(args) - 1:
pred = self._eval(args[i], env)
if pred:
return self._eval(args[i + 1], env)
i += 2
return None
if op == 'lambda':
params = args[0]
body = args[1]
param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params]
return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)}
if op == 'let' or op == 'let*':
bindings = args[0]
body = args[1]
new_env = dict(env)
if bindings and isinstance(bindings[0], list):
for binding in bindings:
if isinstance(binding, list) and len(binding) >= 2:
name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0])
val = self._eval(binding[1], new_env)
new_env[name] = val
else:
i = 0
while i < len(bindings):
name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i])
val = self._eval(bindings[i + 1], new_env)
new_env[name] = val
i += 2
return self._eval(body, new_env)
# === Dict ===
if op == 'dict':
result = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
key = args[i].name
val = self._eval(args[i + 1], env) if i + 1 < len(args) else None
result[key] = val
i += 2
else:
i += 1
return result
if op == 'get':
obj = self._eval(args[0], env)
key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env)
if isinstance(obj, dict):
return obj.get(key, 0)
return 0
# === List ===
if op == 'list':
return [self._eval(a, env) for a in args]
if op == 'quote':
return args[0] if args else None
if op == 'nth':
lst = self._eval(args[0], env)
idx = int(self._eval(args[1], env))
if isinstance(lst, (list, tuple)) and 0 <= idx < len(lst):
return lst[idx]
return None
if op == 'len':
val = self._eval(args[0], env)
return len(val) if hasattr(val, '__len__') else 0
if op == 'map':
seq = self._eval(args[0], env)
fn = self._eval(args[1], env)
if not isinstance(seq, (list, tuple)):
return []
# Handle closure (lambda from sexp)
if isinstance(fn, dict) and fn.get('_type') == 'closure':
results = []
for item in seq:
closure_env = dict(fn['env'])
if fn['params']:
closure_env[fn['params'][0]] = item
results.append(self._eval(fn['body'], closure_env))
return results
# Handle Python callable
if callable(fn):
return [fn(item) for item in seq]
return []
# === Effects ===
if op in self.effects:
effect = self.effects[op]
effect_env = dict(env)
param_names = list(effect['params'].keys())
for pname, pdef in effect['params'].items():
effect_env[pname] = pdef.get('default', 0)
positional_idx = 0
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
pname = args[i].name
if pname in effect['params'] and i + 1 < len(args):
effect_env[pname] = self._eval(args[i + 1], env)
i += 2
else:
val = self._eval(args[i], env)
if positional_idx == 0:
effect_env['frame'] = val
elif positional_idx - 1 < len(param_names):
effect_env[param_names[positional_idx - 1]] = val
positional_idx += 1
i += 1
return self._eval(effect['body'], effect_env)
# === Primitives ===
if op in self.primitives:
prim_func = self.primitives[op]
evaluated_args = []
kwargs = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
k = args[i].name
v = self._eval(args[i + 1], env) if i + 1 < len(args) else None
kwargs[k] = v
i += 2
else:
evaluated_args.append(self._eval(args[i], env))
i += 1
try:
if kwargs:
return prim_func(*evaluated_args, **kwargs)
return prim_func(*evaluated_args)
except Exception as e:
print(f"Primitive {op} error: {e}", file=sys.stderr)
return None
# === Macros (function-like: args evaluated before binding) ===
if op in self.macros:
macro = self.macros[op]
macro_env = dict(env)
for i, pname in enumerate(macro['params']):
# Evaluate args in calling environment before binding
macro_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(macro['body'], macro_env)
# Underscore variant lookup
prim_name = op.replace('-', '_')
if prim_name in self.primitives:
prim_func = self.primitives[prim_name]
evaluated_args = []
kwargs = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
k = args[i].name.replace('-', '_')
v = self._eval(args[i + 1], env) if i + 1 < len(args) else None
kwargs[k] = v
i += 2
else:
evaluated_args.append(self._eval(args[i], env))
i += 1
try:
if kwargs:
return prim_func(*evaluated_args, **kwargs)
return prim_func(*evaluated_args)
except Exception as e:
print(f"Primitive {op} error: {e}", file=sys.stderr)
return None
# Unknown - return as-is
return expr
def _step_scans(self, ctx: Context, env: dict):
"""Step scans based on trigger evaluation."""
for name, scan in self.scans.items():
trigger_expr = scan['trigger']
# Evaluate trigger in context
should_step = self._eval(trigger_expr, env)
if should_step:
state = scan['state']
step_env = dict(state)
step_env.update(env)
new_state = self._eval(scan['step'], step_env)
if isinstance(new_state, dict):
scan['state'] = new_state
else:
scan['state'] = {'acc': new_state}
def run(self, duration: float = None, output: str = "pipe"):
"""Run the streaming pipeline."""
# Import output classes - handle both package and direct execution
try:
from .output import PipeOutput, DisplayOutput, FileOutput
except ImportError:
from output import PipeOutput, DisplayOutput, FileOutput
self._init()
if not self.frame_pipeline:
print("Error: no (frame ...) pipeline defined", file=sys.stderr)
return
w = self.config.get('width', 720)
h = self.config.get('height', 720)
fps = self.config.get('fps', 30)
if duration is None:
# Try to get duration from audio if available
for name, val in self.globals.items():
if hasattr(val, 'duration'):
duration = val.duration
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
break
else:
duration = 60.0
n_frames = int(duration * fps)
frame_time = 1.0 / fps
print(f"Streaming {n_frames} frames @ {fps}fps", file=sys.stderr)
# Create context
ctx = Context(fps=fps)
# Output (with optional audio sync)
audio = self.audio_playback
if output == "pipe":
out = PipeOutput(size=(w, h), fps=fps, audio_source=audio)
elif output == "preview":
out = DisplayOutput(size=(w, h), fps=fps, audio_source=audio)
else:
out = FileOutput(output, size=(w, h), fps=fps, audio_source=audio)
try:
frame_times = []
for frame_num in range(n_frames):
if not out.is_open:
break
frame_start = time.time()
ctx.t = frame_num * frame_time
ctx.frame_num = frame_num
# Build frame environment with context
frame_env = {
'ctx': {
't': ctx.t,
'frame-num': ctx.frame_num,
'fps': ctx.fps,
},
't': ctx.t, # Also expose t directly for convenience
'frame-num': ctx.frame_num,
}
# Step scans
self._step_scans(ctx, frame_env)
# Evaluate pipeline
result = self._eval(self.frame_pipeline, frame_env)
if result is not None and hasattr(result, 'shape'):
out.write(result, ctx.t)
frame_elapsed = time.time() - frame_start
frame_times.append(frame_elapsed)
# Progress with timing
if frame_num % 30 == 0:
pct = 100 * frame_num / n_frames
avg_ms = 1000 * sum(frame_times[-30:]) / max(1, len(frame_times[-30:]))
target_ms = 1000 * frame_time
print(f"\r{pct:5.1f}% [{avg_ms:.0f}ms/frame, target {target_ms:.0f}ms]", end="", file=sys.stderr, flush=True)
finally:
out.close()
print("\nDone", file=sys.stderr)
def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None,
sources_config: str = None, audio_config: str = None):
"""Run a streaming sexp."""
interp = StreamInterpreter(sexp_path)
if fps:
interp.config['fps'] = fps
if sources_config:
interp.sources_config = Path(sources_config)
if audio_config:
interp.audio_config = Path(audio_config)
interp.run(duration=duration, output=output)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run streaming sexp (generic interpreter)")
parser.add_argument("sexp", help="Path to .sexp file")
parser.add_argument("-d", "--duration", type=float, default=None)
parser.add_argument("-o", "--output", default="pipe")
parser.add_argument("--fps", type=float, default=None)
parser.add_argument("--sources", dest="sources_config", help="Path to sources config .sexp file")
parser.add_argument("--audio", dest="audio_config", help="Path to audio config .sexp file")
args = parser.parse_args()
run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps,
sources_config=args.sources_config, audio_config=args.audio_config)