From c590f2e0399296dc026b06d8e0e7b95d99de84da Mon Sep 17 00:00:00 2001 From: giles Date: Tue, 24 Feb 2026 23:10:04 +0000 Subject: [PATCH] Squashed 'test/' content from commit f2edc20 git-subtree-dir: test git-subtree-split: f2edc20cba865a6ef67ca807c2ed6cee8e6c2836 --- .gitignore | 19 + analyze.py | 223 ++ cache.py | 404 +++ configs/audio-dizzy.sexp | 17 + configs/audio-halleluwah.sexp | 17 + configs/sources-default.sexp | 38 + configs/sources-woods-half.sexp | 19 + configs/sources-woods.sexp | 39 + effects/quick_test_explicit.sexp | 150 ++ execute.py | 2368 +++++++++++++++++ plan.py | 415 +++ run-effect.sh | 122 + run-file.sh | 7 + run.py | 127 + run.sh | 7 + run_staged.py | 528 ++++ sexp_effects/__init__.py | 32 + sexp_effects/effects/ascii_art.sexp | 17 + sexp_effects/effects/ascii_art_fx.sexp | 52 + sexp_effects/effects/ascii_fx_zone.sexp | 102 + sexp_effects/effects/ascii_zones.sexp | 30 + sexp_effects/effects/blend.sexp | 31 + sexp_effects/effects/blend_multi.sexp | 58 + sexp_effects/effects/bloom.sexp | 16 + sexp_effects/effects/blur.sexp | 8 + sexp_effects/effects/brightness.sexp | 9 + sexp_effects/effects/color-adjust.sexp | 13 + sexp_effects/effects/color_cycle.sexp | 13 + sexp_effects/effects/contrast.sexp | 9 + sexp_effects/effects/crt.sexp | 30 + sexp_effects/effects/datamosh.sexp | 14 + sexp_effects/effects/echo.sexp | 19 + sexp_effects/effects/edge_detect.sexp | 9 + sexp_effects/effects/emboss.sexp | 13 + sexp_effects/effects/film_grain.sexp | 19 + sexp_effects/effects/fisheye.sexp | 16 + sexp_effects/effects/flip.sexp | 16 + sexp_effects/effects/grayscale.sexp | 7 + sexp_effects/effects/hue_shift.sexp | 12 + sexp_effects/effects/invert.sexp | 9 + sexp_effects/effects/kaleidoscope.sexp | 20 + sexp_effects/effects/layer.sexp | 36 + sexp_effects/effects/mirror.sexp | 33 + sexp_effects/effects/neon_glow.sexp | 23 + sexp_effects/effects/noise.sexp | 8 + sexp_effects/effects/outline.sexp | 24 + sexp_effects/effects/pixelate.sexp | 13 + sexp_effects/effects/pixelsort.sexp | 11 + sexp_effects/effects/posterize.sexp | 8 + sexp_effects/effects/resize-frame.sexp | 11 + sexp_effects/effects/rgb_split.sexp | 13 + sexp_effects/effects/ripple.sexp | 19 + sexp_effects/effects/rotate.sexp | 11 + sexp_effects/effects/saturation.sexp | 9 + sexp_effects/effects/scanlines.sexp | 15 + sexp_effects/effects/sepia.sexp | 7 + sexp_effects/effects/sharpen.sexp | 8 + sexp_effects/effects/strobe.sexp | 16 + sexp_effects/effects/swirl.sexp | 17 + sexp_effects/effects/threshold.sexp | 9 + sexp_effects/effects/tile_grid.sexp | 29 + sexp_effects/effects/trails.sexp | 20 + sexp_effects/effects/vignette.sexp | 23 + sexp_effects/effects/wave.sexp | 22 + sexp_effects/effects/zoom.sexp | 8 + sexp_effects/interpreter.py | 1016 ++++++++ sexp_effects/parser.py | 168 ++ sexp_effects/primitive_libs/__init__.py | 102 + sexp_effects/primitive_libs/arrays.py | 196 ++ sexp_effects/primitive_libs/ascii.py | 388 +++ sexp_effects/primitive_libs/blending.py | 116 + sexp_effects/primitive_libs/color.py | 137 + sexp_effects/primitive_libs/color_ops.py | 90 + sexp_effects/primitive_libs/core.py | 271 ++ sexp_effects/primitive_libs/drawing.py | 136 + sexp_effects/primitive_libs/filters.py | 119 + sexp_effects/primitive_libs/geometry.py | 143 + sexp_effects/primitive_libs/image.py | 144 + sexp_effects/primitive_libs/math.py | 164 ++ sexp_effects/primitive_libs/streaming.py | 462 ++++ sexp_effects/primitives.py | 3043 ++++++++++++++++++++++ sexp_effects/test_interpreter.py | 236 ++ streaming/__init__.py | 44 + streaming/audio.py | 486 ++++ streaming/backends.py | 308 +++ streaming/compositor.py | 595 +++++ streaming/demo.py | 125 + streaming/output.py | 369 +++ streaming/pipeline.py | 846 ++++++ streaming/recipe_adapter.py | 470 ++++ streaming/recipe_executor.py | 415 +++ streaming/sexp_executor.py | 678 +++++ streaming/sexp_interp.py | 376 +++ streaming/sources.py | 281 ++ streaming/stream_sexp.py | 1081 ++++++++ streaming/stream_sexp_generic.py | 859 ++++++ templates/crossfade-zoom.sexp | 25 + templates/cycle-crossfade.sexp | 65 + templates/process-pair.sexp | 112 + templates/scan-oscillating-spin.sexp | 28 + templates/scan-ripple-drops.sexp | 41 + templates/standard-effects.sexp | 22 + templates/standard-primitives.sexp | 14 + templates/stream-process-pair.sexp | 72 + test_effects_pipeline.py | 258 ++ 105 files changed, 19968 insertions(+) create mode 100644 .gitignore create mode 100644 analyze.py create mode 100644 cache.py create mode 100644 configs/audio-dizzy.sexp create mode 100644 configs/audio-halleluwah.sexp create mode 100644 configs/sources-default.sexp create mode 100644 configs/sources-woods-half.sexp create mode 100644 configs/sources-woods.sexp create mode 100644 effects/quick_test_explicit.sexp create mode 100644 execute.py create mode 100644 plan.py create mode 100644 run-effect.sh create mode 100755 run-file.sh create mode 100755 run.py create mode 100755 run.sh create mode 100644 run_staged.py create mode 100644 sexp_effects/__init__.py create mode 100644 sexp_effects/effects/ascii_art.sexp create mode 100644 sexp_effects/effects/ascii_art_fx.sexp create mode 100644 sexp_effects/effects/ascii_fx_zone.sexp create mode 100644 sexp_effects/effects/ascii_zones.sexp create mode 100644 sexp_effects/effects/blend.sexp create mode 100644 sexp_effects/effects/blend_multi.sexp create mode 100644 sexp_effects/effects/bloom.sexp create mode 100644 sexp_effects/effects/blur.sexp create mode 100644 sexp_effects/effects/brightness.sexp create mode 100644 sexp_effects/effects/color-adjust.sexp create mode 100644 sexp_effects/effects/color_cycle.sexp create mode 100644 sexp_effects/effects/contrast.sexp create mode 100644 sexp_effects/effects/crt.sexp create mode 100644 sexp_effects/effects/datamosh.sexp create mode 100644 sexp_effects/effects/echo.sexp create mode 100644 sexp_effects/effects/edge_detect.sexp create mode 100644 sexp_effects/effects/emboss.sexp create mode 100644 sexp_effects/effects/film_grain.sexp create mode 100644 sexp_effects/effects/fisheye.sexp create mode 100644 sexp_effects/effects/flip.sexp create mode 100644 sexp_effects/effects/grayscale.sexp create mode 100644 sexp_effects/effects/hue_shift.sexp create mode 100644 sexp_effects/effects/invert.sexp create mode 100644 sexp_effects/effects/kaleidoscope.sexp create mode 100644 sexp_effects/effects/layer.sexp create mode 100644 sexp_effects/effects/mirror.sexp create mode 100644 sexp_effects/effects/neon_glow.sexp create mode 100644 sexp_effects/effects/noise.sexp create mode 100644 sexp_effects/effects/outline.sexp create mode 100644 sexp_effects/effects/pixelate.sexp create mode 100644 sexp_effects/effects/pixelsort.sexp create mode 100644 sexp_effects/effects/posterize.sexp create mode 100644 sexp_effects/effects/resize-frame.sexp create mode 100644 sexp_effects/effects/rgb_split.sexp create mode 100644 sexp_effects/effects/ripple.sexp create mode 100644 sexp_effects/effects/rotate.sexp create mode 100644 sexp_effects/effects/saturation.sexp create mode 100644 sexp_effects/effects/scanlines.sexp create mode 100644 sexp_effects/effects/sepia.sexp create mode 100644 sexp_effects/effects/sharpen.sexp create mode 100644 sexp_effects/effects/strobe.sexp create mode 100644 sexp_effects/effects/swirl.sexp create mode 100644 sexp_effects/effects/threshold.sexp create mode 100644 sexp_effects/effects/tile_grid.sexp create mode 100644 sexp_effects/effects/trails.sexp create mode 100644 sexp_effects/effects/vignette.sexp create mode 100644 sexp_effects/effects/wave.sexp create mode 100644 sexp_effects/effects/zoom.sexp create mode 100644 sexp_effects/interpreter.py create mode 100644 sexp_effects/parser.py create mode 100644 sexp_effects/primitive_libs/__init__.py create mode 100644 sexp_effects/primitive_libs/arrays.py create mode 100644 sexp_effects/primitive_libs/ascii.py create mode 100644 sexp_effects/primitive_libs/blending.py create mode 100644 sexp_effects/primitive_libs/color.py create mode 100644 sexp_effects/primitive_libs/color_ops.py create mode 100644 sexp_effects/primitive_libs/core.py create mode 100644 sexp_effects/primitive_libs/drawing.py create mode 100644 sexp_effects/primitive_libs/filters.py create mode 100644 sexp_effects/primitive_libs/geometry.py create mode 100644 sexp_effects/primitive_libs/image.py create mode 100644 sexp_effects/primitive_libs/math.py create mode 100644 sexp_effects/primitive_libs/streaming.py create mode 100644 sexp_effects/primitives.py create mode 100644 sexp_effects/test_interpreter.py create mode 100644 streaming/__init__.py create mode 100644 streaming/audio.py create mode 100644 streaming/backends.py create mode 100644 streaming/compositor.py create mode 100644 streaming/demo.py create mode 100644 streaming/output.py create mode 100644 streaming/pipeline.py create mode 100644 streaming/recipe_adapter.py create mode 100644 streaming/recipe_executor.py create mode 100644 streaming/sexp_executor.py create mode 100644 streaming/sexp_interp.py create mode 100644 streaming/sources.py create mode 100644 streaming/stream_sexp.py create mode 100644 streaming/stream_sexp_generic.py create mode 100644 templates/crossfade-zoom.sexp create mode 100644 templates/cycle-crossfade.sexp create mode 100644 templates/process-pair.sexp create mode 100644 templates/scan-oscillating-spin.sexp create mode 100644 templates/scan-ripple-drops.sexp create mode 100644 templates/standard-effects.sexp create mode 100644 templates/standard-primitives.sexp create mode 100644 templates/stream-process-pair.sexp create mode 100644 test_effects_pipeline.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..16ad0a0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +# Python cache +__pycache__/ +*.pyc + +# Media files +*.mp4 +*.mkv +*.webm +*.mp3 + +# Output files +*.json + +# Cache directories +.cache/ +.stage_cache/ +effects/.stage_cache/ +local_server/.cache/ +local_server/.data/ diff --git a/analyze.py b/analyze.py new file mode 100644 index 0000000..1affa85 --- /dev/null +++ b/analyze.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +""" +Run analyzers from a recipe and output analysis data as S-expressions. + +Usage: + analyze.py recipe.sexp [-o analysis.sexp] + +Output format: + (analysis + (beats-data + :tempo 120.5 + :times (0.0 0.5 1.0 1.5 ...) + :duration 10.0) + (bass-data + :times (0.0 0.1 0.2 ...) + :values (0.5 0.8 0.3 ...))) +""" + +import sys +import tempfile +import subprocess +import importlib.util +from pathlib import Path + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string, parse +from artdag.sexp.parser import Symbol, Keyword, serialize + + +def load_analyzer(analyzer_path: Path): + """Load an analyzer module from file path.""" + spec = importlib.util.spec_from_file_location("analyzer", analyzer_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def run_analyzer(analyzer_path: Path, input_path: Path, params: dict) -> dict: + """Run an analyzer and return results.""" + analyzer = load_analyzer(analyzer_path) + return analyzer.analyze(input_path, params) + + +def pre_execute_segment(source_path: Path, start: float, duration: float, work_dir: Path) -> Path: + """Pre-execute a segment to get audio for analysis.""" + suffix = source_path.suffix.lower() + is_audio = suffix in ('.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a') + + output_ext = ".m4a" if is_audio else ".mp4" + output_path = work_dir / f"segment{output_ext}" + + cmd = ["ffmpeg", "-y", "-i", str(source_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + + if is_audio: + cmd.extend(["-c:a", "aac", str(output_path)]) + else: + cmd.extend(["-c:v", "libx264", "-preset", "fast", "-crf", "18", + "-c:a", "aac", str(output_path)]) + + subprocess.run(cmd, check=True, capture_output=True) + return output_path + + +def to_sexp(value, indent=0): + """Convert a Python value to S-expression string.""" + if isinstance(value, dict): + if not value: + return "()" + items = [] + for k, v in value.items(): + key = k.replace('_', '-') + items.append(f":{key} {to_sexp(v)}") + return "(" + " ".join(items) + ")" + elif isinstance(value, list): + if not value: + return "()" + items = [to_sexp(v) for v in value] + return "(" + " ".join(items) + ")" + elif isinstance(value, str): + return f'"{value}"' + elif isinstance(value, bool): + return "true" if value else "false" + elif value is None: + return "nil" + elif isinstance(value, float): + return f"{value:.6g}" + else: + return str(value) + + +def analyze_recipe(recipe_path: Path, output_file: Path = None): + """Run all analyzers in a recipe and output S-expression analysis data.""" + + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + print(f"Compiling: {recipe_path}", file=sys.stderr) + compiled = compile_string(recipe_text) + print(f"Recipe: {compiled.name} v{compiled.version}", file=sys.stderr) + + # Find all ANALYZE nodes and their dependencies + nodes_by_id = {n["id"]: n for n in compiled.nodes} + + # Track source paths and segment outputs + source_paths = {} + segment_outputs = {} + analysis_results = {} + + work_dir = Path(tempfile.mkdtemp(prefix="artdag_analyze_")) + + # Process nodes in dependency order + def get_input_path(node_id: str) -> Path: + """Resolve the input path for a node.""" + if node_id in segment_outputs: + return segment_outputs[node_id] + if node_id in source_paths: + return source_paths[node_id] + + node = nodes_by_id.get(node_id) + if not node: + return None + + if node["type"] == "SOURCE": + path = recipe_dir / node["config"].get("path", "") + source_paths[node_id] = path.resolve() + return source_paths[node_id] + + if node["type"] == "SEGMENT": + inputs = node.get("inputs", []) + if inputs: + input_path = get_input_path(inputs[0]) + if input_path: + config = node.get("config", {}) + start = config.get("start", 0) + duration = config.get("duration") + output = pre_execute_segment(input_path, start, duration, work_dir) + segment_outputs[node_id] = output + return output + + return None + + # Find and run all analyzers + for node in compiled.nodes: + if node["type"] == "ANALYZE": + config = node.get("config", {}) + analyzer_name = config.get("analyzer", "unknown") + analyzer_path = config.get("analyzer_path") + + if not analyzer_path: + print(f" Skipping {analyzer_name}: no path", file=sys.stderr) + continue + + # Get input + inputs = node.get("inputs", []) + if not inputs: + print(f" Skipping {analyzer_name}: no inputs", file=sys.stderr) + continue + + input_path = get_input_path(inputs[0]) + if not input_path or not input_path.exists(): + print(f" Skipping {analyzer_name}: input not found", file=sys.stderr) + continue + + # Run analyzer + full_path = recipe_dir / analyzer_path + params = {k: v for k, v in config.items() + if k not in ("analyzer", "analyzer_path", "cid")} + + print(f" Running analyzer: {analyzer_name}", file=sys.stderr) + results = run_analyzer(full_path, input_path, params) + + # Store by node ID for uniqueness (multiple analyzers may have same type) + node_id = node.get("id") + analysis_results[node_id] = results + + times = results.get("times", []) + print(f" {len(times)} times @ {results.get('tempo', 0):.1f} BPM", file=sys.stderr) + + # Generate S-expression output + lines = ["(analysis"] + + for name, data in analysis_results.items(): + # Quote node IDs to prevent parser treating hex like "0e42..." as scientific notation + lines.append(f' ("{name}"') + for key, value in data.items(): + sexp_key = key.replace('_', '-') + sexp_value = to_sexp(value) + lines.append(f" :{sexp_key} {sexp_value}") + lines.append(" )") + + lines.append(")") + + output = "\n".join(lines) + + if output_file: + output_file.write_text(output) + print(f"\nAnalysis written to: {output_file}", file=sys.stderr) + else: + print(output) + + print(f"Debug: temp files in {work_dir}", file=sys.stderr) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Run analyzers from recipe") + parser.add_argument("recipe", type=Path, help="Recipe file (.sexp)") + parser.add_argument("-o", "--output", type=Path, help="Output file (default: stdout)") + + args = parser.parse_args() + + if not args.recipe.exists(): + print(f"Recipe not found: {args.recipe}", file=sys.stderr) + sys.exit(1) + + analyze_recipe(args.recipe, args.output) diff --git a/cache.py b/cache.py new file mode 100644 index 0000000..fdb9a7e --- /dev/null +++ b/cache.py @@ -0,0 +1,404 @@ +#!/usr/bin/env python3 +""" +Unified content cache for artdag. + +Design: + - IPNS (cache_id) = computation hash, known BEFORE execution + "What would be the result of running X with inputs Y?" + + - CID = content hash, known AFTER execution + "What is this actual content?" + +Structure: + .cache/ + refs/ # IPNS → CID mappings (computation → result) + {cache_id} # Text file containing the CID of the result + content/ # Content-addressed storage + {cid[:2]}/{cid} # Actual content by CID +""" + +import hashlib +import json +import os +from pathlib import Path +from typing import Optional, Dict, Any, Tuple + +# Default cache location - can be overridden via ARTDAG_CACHE env var +DEFAULT_CACHE_DIR = Path(__file__).parent / ".cache" + + +def get_cache_dir() -> Path: + """Get the cache directory, creating if needed.""" + cache_dir = Path(os.environ.get("ARTDAG_CACHE", DEFAULT_CACHE_DIR)) + cache_dir.mkdir(parents=True, exist_ok=True) + return cache_dir + + +def get_refs_dir() -> Path: + """Get the refs directory (IPNS → CID mappings).""" + refs_dir = get_cache_dir() / "refs" + refs_dir.mkdir(parents=True, exist_ok=True) + return refs_dir + + +def get_content_dir() -> Path: + """Get the content directory (CID → content).""" + content_dir = get_cache_dir() / "content" + content_dir.mkdir(parents=True, exist_ok=True) + return content_dir + + +# ============================================================================= +# CID (Content Hash) Operations +# ============================================================================= + +def compute_cid(content: bytes) -> str: + """Compute content ID (SHA256 hash) for bytes.""" + return hashlib.sha256(content).hexdigest() + + +def compute_file_cid(file_path: Path) -> str: + """Compute content ID for a file.""" + with open(file_path, 'rb') as f: + return compute_cid(f.read()) + + +def compute_string_cid(text: str) -> str: + """Compute content ID for a string.""" + return compute_cid(text.encode('utf-8')) + + +# ============================================================================= +# Content Storage (by CID) +# ============================================================================= + +def _content_path(cid: str) -> Path: + """Get path for content by CID.""" + return get_content_dir() / cid[:2] / cid + + +def content_exists_by_cid(cid: str) -> Optional[Path]: + """Check if content exists by CID.""" + path = _content_path(cid) + if path.exists() and path.stat().st_size > 0: + return path + return None + + +def content_store_by_cid(cid: str, content: bytes) -> Path: + """Store content by its CID.""" + path = _content_path(cid) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_bytes(content) + return path + + +def content_store_file(file_path: Path) -> Tuple[str, Path]: + """Store a file by its content hash. Returns (cid, path).""" + content = file_path.read_bytes() + cid = compute_cid(content) + path = content_store_by_cid(cid, content) + return cid, path + + +def content_store_string(text: str) -> Tuple[str, Path]: + """Store a string by its content hash. Returns (cid, path).""" + content = text.encode('utf-8') + cid = compute_cid(content) + path = content_store_by_cid(cid, content) + return cid, path + + +def content_get(cid: str) -> Optional[bytes]: + """Get content by CID.""" + path = content_exists_by_cid(cid) + if path: + return path.read_bytes() + return None + + +def content_get_string(cid: str) -> Optional[str]: + """Get string content by CID.""" + content = content_get(cid) + if content: + return content.decode('utf-8') + return None + + +# ============================================================================= +# Refs (IPNS → CID mappings) +# ============================================================================= + +def _ref_path(cache_id: str) -> Path: + """Get path for a ref by cache_id.""" + return get_refs_dir() / cache_id + + +def ref_exists(cache_id: str) -> Optional[str]: + """Check if a ref exists. Returns CID if found.""" + path = _ref_path(cache_id) + if path.exists(): + return path.read_text().strip() + return None + + +def ref_set(cache_id: str, cid: str) -> Path: + """Set a ref (IPNS → CID mapping).""" + path = _ref_path(cache_id) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(cid) + return path + + +def ref_get_content(cache_id: str) -> Optional[bytes]: + """Get content by cache_id (looks up ref, then fetches content).""" + cid = ref_exists(cache_id) + if cid: + return content_get(cid) + return None + + +def ref_get_string(cache_id: str) -> Optional[str]: + """Get string content by cache_id.""" + content = ref_get_content(cache_id) + if content: + return content.decode('utf-8') + return None + + +# ============================================================================= +# High-level Cache Operations +# ============================================================================= + +def cache_store(cache_id: str, content: bytes) -> Tuple[str, Path]: + """ + Store content with IPNS→CID indirection. + + Args: + cache_id: Computation hash (IPNS address) + content: Content to store + + Returns: + (cid, path) tuple + """ + cid = compute_cid(content) + path = content_store_by_cid(cid, content) + ref_set(cache_id, cid) + return cid, path + + +def cache_store_file(cache_id: str, file_path: Path) -> Tuple[str, Path]: + """Store a file with IPNS→CID indirection.""" + content = file_path.read_bytes() + return cache_store(cache_id, content) + + +def cache_store_string(cache_id: str, text: str) -> Tuple[str, Path]: + """Store a string with IPNS→CID indirection.""" + return cache_store(cache_id, text.encode('utf-8')) + + +def cache_store_json(cache_id: str, data: Any) -> Tuple[str, Path]: + """Store JSON data with IPNS→CID indirection.""" + text = json.dumps(data, indent=2) + return cache_store_string(cache_id, text) + + +def cache_exists(cache_id: str) -> Optional[Path]: + """Check if cached content exists for a computation.""" + cid = ref_exists(cache_id) + if cid: + return content_exists_by_cid(cid) + return None + + +def cache_get(cache_id: str) -> Optional[bytes]: + """Get cached content by computation hash.""" + return ref_get_content(cache_id) + + +def cache_get_string(cache_id: str) -> Optional[str]: + """Get cached string by computation hash.""" + return ref_get_string(cache_id) + + +def cache_get_json(cache_id: str) -> Optional[Any]: + """Get cached JSON by computation hash.""" + text = cache_get_string(cache_id) + if text: + return json.loads(text) + return None + + +def cache_get_path(cache_id: str) -> Optional[Path]: + """Get path to cached content by computation hash.""" + cid = ref_exists(cache_id) + if cid: + return content_exists_by_cid(cid) + return None + + +# ============================================================================= +# Plan Cache (convenience wrappers) +# ============================================================================= + +def _stable_hash_params(params: Dict[str, Any]) -> str: + """Compute stable hash of params using JSON + SHA256 (consistent with CID).""" + params_str = json.dumps(params, sort_keys=True, default=str) + return hashlib.sha256(params_str.encode()).hexdigest() + + +def plan_cache_id(source_cid: str, params: Dict[str, Any] = None) -> str: + """ + Compute the cache_id (IPNS address) for a plan. + + Based on source CID + params. Name/version are just metadata. + """ + key = f"plan:{source_cid}" + if params: + params_hash = _stable_hash_params(params) + key = f"{key}:{params_hash}" + return hashlib.sha256(key.encode()).hexdigest() + + +def plan_exists(source_cid: str, params: Dict[str, Any] = None) -> Optional[str]: + """Check if a cached plan exists. Returns CID if found.""" + cache_id = plan_cache_id(source_cid, params) + return ref_exists(cache_id) + + +def plan_store(source_cid: str, params: Dict[str, Any], content: str) -> Tuple[str, str, Path]: + """ + Store a plan in the cache. + + Returns: + (cache_id, cid, path) tuple + """ + cache_id = plan_cache_id(source_cid, params) + cid, path = cache_store_string(cache_id, content) + return cache_id, cid, path + + +def plan_load(source_cid: str, params: Dict[str, Any] = None) -> Optional[str]: + """Load a plan from cache. Returns plan content string.""" + cache_id = plan_cache_id(source_cid, params) + return cache_get_string(cache_id) + + +def plan_get_path(source_cid: str, params: Dict[str, Any] = None) -> Optional[Path]: + """Get path to cached plan.""" + cache_id = plan_cache_id(source_cid, params) + return cache_get_path(cache_id) + + +# ============================================================================= +# Cache Listing +# ============================================================================= + +def list_cache(verbose: bool = False) -> Dict[str, Any]: + """List all cached items.""" + from datetime import datetime + + cache_dir = get_cache_dir() + refs_dir = get_refs_dir() + content_dir = get_content_dir() + + def format_size(size): + if size >= 1_000_000_000: + return f"{size / 1_000_000_000:.1f}GB" + elif size >= 1_000_000: + return f"{size / 1_000_000:.1f}MB" + elif size >= 1000: + return f"{size / 1000:.1f}KB" + else: + return f"{size}B" + + def get_file_info(path: Path) -> Dict: + stat = path.stat() + return { + "path": path, + "name": path.name, + "size": stat.st_size, + "size_str": format_size(stat.st_size), + "mtime": datetime.fromtimestamp(stat.st_mtime), + } + + result = { + "refs": [], + "content": [], + "summary": {"total_items": 0, "total_size": 0}, + } + + # Refs + if refs_dir.exists(): + for f in sorted(refs_dir.iterdir()): + if f.is_file(): + info = get_file_info(f) + info["cache_id"] = f.name + info["cid"] = f.read_text().strip() + # Try to determine type from content + cid = info["cid"] + content_path = content_exists_by_cid(cid) + if content_path: + info["content_size"] = content_path.stat().st_size + info["content_size_str"] = format_size(info["content_size"]) + result["refs"].append(info) + + # Content + if content_dir.exists(): + for subdir in sorted(content_dir.iterdir()): + if subdir.is_dir(): + for f in sorted(subdir.iterdir()): + if f.is_file(): + info = get_file_info(f) + info["cid"] = f.name + result["content"].append(info) + + # Summary + result["summary"]["total_refs"] = len(result["refs"]) + result["summary"]["total_content"] = len(result["content"]) + result["summary"]["total_size"] = sum(i["size"] for i in result["content"]) + result["summary"]["total_size_str"] = format_size(result["summary"]["total_size"]) + + return result + + +def print_cache_listing(verbose: bool = False): + """Print cache listing to stdout.""" + info = list_cache(verbose) + cache_dir = get_cache_dir() + + print(f"\nCache directory: {cache_dir}\n") + + # Refs summary + if info["refs"]: + print(f"=== Refs ({len(info['refs'])}) ===") + for ref in info["refs"][:20]: # Show first 20 + content_info = f" → {ref.get('content_size_str', '?')}" if 'content_size_str' in ref else "" + print(f" {ref['cache_id'][:16]}... → {ref['cid'][:16]}...{content_info}") + if len(info["refs"]) > 20: + print(f" ... and {len(info['refs']) - 20} more") + print() + + # Content by type + if info["content"]: + # Group by first 2 chars (subdirectory) + print(f"=== Content ({len(info['content'])} items, {info['summary']['total_size_str']}) ===") + for item in info["content"][:20]: + print(f" {item['cid'][:16]}... {item['size_str']:>8} {item['mtime'].strftime('%Y-%m-%d %H:%M')}") + if len(info["content"]) > 20: + print(f" ... and {len(info['content']) - 20} more") + print() + + print(f"=== Summary ===") + print(f" Refs: {info['summary']['total_refs']}") + print(f" Content: {info['summary']['total_content']} ({info['summary']['total_size_str']})") + + if verbose: + print(f"\nTo clear cache: rm -rf {cache_dir}/*") + + +if __name__ == "__main__": + import sys + verbose = "-v" in sys.argv or "--verbose" in sys.argv + print_cache_listing(verbose) diff --git a/configs/audio-dizzy.sexp b/configs/audio-dizzy.sexp new file mode 100644 index 0000000..dc16087 --- /dev/null +++ b/configs/audio-dizzy.sexp @@ -0,0 +1,17 @@ +;; Audio Configuration - dizzy.mp3 +;; +;; Defines audio analyzer and playback for a recipe. +;; Pass to recipe with: --audio configs/audio-dizzy.sexp +;; +;; Provides: +;; - music: audio analyzer for beat/energy detection +;; - audio-playback: path for synchronized playback + +(require-primitives "streaming") + +;; Audio analyzer (provides beat detection and energy levels) +;; Paths relative to working directory (project root) +(def music (streaming:make-audio-analyzer "dizzy.mp3")) + +;; Audio playback path (for sync with video output) +(audio-playback "dizzy.mp3") diff --git a/configs/audio-halleluwah.sexp b/configs/audio-halleluwah.sexp new file mode 100644 index 0000000..5e4b812 --- /dev/null +++ b/configs/audio-halleluwah.sexp @@ -0,0 +1,17 @@ +;; Audio Configuration - dizzy.mp3 +;; +;; Defines audio analyzer and playback for a recipe. +;; Pass to recipe with: --audio configs/audio-dizzy.sexp +;; +;; Provides: +;; - music: audio analyzer for beat/energy detection +;; - audio-playback: path for synchronized playback + +(require-primitives "streaming") + +;; Audio analyzer (provides beat detection and energy levels) +;; Paths relative to working directory (project root) +(def music (streaming:make-audio-analyzer "woods_half/halleluwah.webm")) + +;; Audio playback path (for sync with video output) +(audio-playback "woods_half/halleluwah.webm") diff --git a/configs/sources-default.sexp b/configs/sources-default.sexp new file mode 100644 index 0000000..754bd92 --- /dev/null +++ b/configs/sources-default.sexp @@ -0,0 +1,38 @@ +;; Default Sources Configuration +;; +;; Defines video sources and per-pair effect configurations. +;; Pass to recipe with: --sources configs/sources-default.sexp +;; +;; Required by recipes using process-pair macro: +;; - sources: array of video sources +;; - pair-configs: array of effect configurations per source + +(require-primitives "streaming") + +;; Video sources array +;; Paths relative to working directory (project root) +(def sources [ + (streaming:make-video-source "monday.webm" 30) + (streaming:make-video-source "escher.webm" 30) + (streaming:make-video-source "2.webm" 30) + (streaming:make-video-source "disruptors.webm" 30) + (streaming:make-video-source "4.mp4" 30) + (streaming:make-video-source "ecstacy.mp4" 30) + (streaming:make-video-source "dopple.webm" 30) + (streaming:make-video-source "5.mp4" 30) +]) + +;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges +;; :dir = rotation direction (1 or -1) +;; :rot-a, :rot-b = max rotation angles for clip A and B +;; :zoom-a, :zoom-b = max zoom amounts for clip A and B +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4 + {:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller) + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed) + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5 +]) diff --git a/configs/sources-woods-half.sexp b/configs/sources-woods-half.sexp new file mode 100644 index 0000000..d2feff8 --- /dev/null +++ b/configs/sources-woods-half.sexp @@ -0,0 +1,19 @@ +;; Half-resolution Woods Sources (960x540) +;; +;; Pass to recipe with: --sources configs/sources-woods-half.sexp + +(require-primitives "streaming") + +(def sources [ + (streaming:make-video-source "woods_half/1.webm" 30) + (streaming:make-video-source "woods_half/2.webm" 30) + (streaming:make-video-source "woods_half/3.webm" 30) + (streaming:make-video-source "woods_half/4.webm" 30) +]) + +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} +]) diff --git a/configs/sources-woods.sexp b/configs/sources-woods.sexp new file mode 100644 index 0000000..717bfd9 --- /dev/null +++ b/configs/sources-woods.sexp @@ -0,0 +1,39 @@ +;; Default Sources Configuration +;; +;; Defines video sources and per-pair effect configurations. +;; Pass to recipe with: --sources configs/sources-default.sexp +;; +;; Required by recipes using process-pair macro: +;; - sources: array of video sources +;; - pair-configs: array of effect configurations per source + +(require-primitives "streaming") + +;; Video sources array +;; Paths relative to working directory (project root) +(def sources [ + (streaming:make-video-source "woods/1.webm" 10) + (streaming:make-video-source "woods/2.webm" 10) + (streaming:make-video-source "woods/3.webm" 10) + (streaming:make-video-source "woods/4.webm" 10) + (streaming:make-video-source "woods/5.webm" 10) + (streaming:make-video-source "woods/6.webm" 10) + (streaming:make-video-source "woods/7.webm" 10) + (streaming:make-video-source "woods/8.webm" 10) +]) + +;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges +;; :dir = rotation direction (1 or -1) +;; :rot-a, :rot-b = max rotation angles for clip A and B +;; :zoom-a, :zoom-b = max zoom amounts for clip A and B +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + +]) diff --git a/effects/quick_test_explicit.sexp b/effects/quick_test_explicit.sexp new file mode 100644 index 0000000..0a3698b --- /dev/null +++ b/effects/quick_test_explicit.sexp @@ -0,0 +1,150 @@ +;; Quick Test - Fully Explicit Streaming Version +;; +;; The interpreter is completely generic - knows nothing about video/audio. +;; All domain logic is explicit via primitives. +;; +;; Run with built-in sources/audio: +;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp --fps 30 +;; +;; Run with external config files: +;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp \ +;; --sources configs/sources-default.sexp \ +;; --audio configs/audio-dizzy.sexp \ +;; --fps 30 + +(stream "quick_test_explicit" + :fps 30 + :width 1920 + :height 1080 + :seed 42 + + ;; Load standard primitives and effects + (include :path "../templates/standard-primitives.sexp") + (include :path "../templates/standard-effects.sexp") + + ;; Load reusable templates + (include :path "../templates/stream-process-pair.sexp") + (include :path "../templates/crossfade-zoom.sexp") + + ;; === SOURCES AS ARRAY === + (def sources [ + (streaming:make-video-source "monday.webm" 30) + (streaming:make-video-source "escher.webm" 30) + (streaming:make-video-source "2.webm" 30) + (streaming:make-video-source "disruptors.webm" 30) + (streaming:make-video-source "4.mp4" 30) + (streaming:make-video-source "ecstacy.mp4" 30) + (streaming:make-video-source "dopple.webm" 30) + (streaming:make-video-source "5.mp4" 30) + ]) + + ;; Per-pair config: [rot-dir, rot-a-max, rot-b-max, zoom-a-max, zoom-b-max] + ;; Pairs 3,6: reversed (negative rot-a, positive rot-b, shrink zoom-a, grow zoom-b) + ;; Pair 5: smaller ranges + (def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4 + {:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller) + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed) + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5 + ]) + + ;; Audio analyzer + (def music (streaming:make-audio-analyzer "dizzy.mp3")) + + ;; Audio playback + (audio-playback "../dizzy.mp3") + + ;; === GLOBAL SCANS === + + ;; Cycle state: which source is active (recipe-specific) + ;; clen = beats per source (8-24 beats = ~4-12 seconds) + (scan cycle (streaming:audio-beat music t) + :init {:active 0 :beat 0 :clen 16} + :step (if (< (+ beat 1) clen) + (dict :active active :beat (+ beat 1) :clen clen) + (dict :active (mod (+ active 1) (len sources)) :beat 0 + :clen (+ 8 (mod (* (streaming:audio-beat-count music t) 7) 17))))) + + ;; Reusable scans from templates (require 'music' to be defined) + (include :path "../templates/scan-oscillating-spin.sexp") + (include :path "../templates/scan-ripple-drops.sexp") + + ;; === PER-PAIR STATE (dynamically sized based on sources) === + ;; Each pair has: inv-a, inv-b, hue-a, hue-b, mix, rot-angle + (scan pairs (streaming:audio-beat music t) + :init {:states (map (core:range (len sources)) (lambda (_) + {:inv-a 0 :inv-b 0 :hue-a 0 :hue-b 0 :hue-a-val 0 :hue-b-val 0 :mix 0.5 :mix-rem 5 :angle 0 :rot-beat 0 :rot-clen 25}))} + :step (dict :states (map states (lambda (p) + (let [;; Invert toggles (10% chance, lasts 1-4 beats) + new-inv-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-a) 1))) + new-inv-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-b) 1))) + ;; Hue shifts (10% chance, lasts 1-4 beats) - use countdown like invert + old-hue-a (get p :hue-a) + old-hue-b (get p :hue-b) + new-hue-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-a 1))) + new-hue-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-b 1))) + ;; Pick random hue value when triggering (stored separately) + new-hue-a-val (if (> new-hue-a old-hue-a) (+ 30 (* (core:rand) 300)) (get p :hue-a-val)) + new-hue-b-val (if (> new-hue-b old-hue-b) (+ 30 (* (core:rand) 300)) (get p :hue-b-val)) + ;; Mix (holds for 1-10 beats, then picks 0, 0.5, or 1) + mix-rem (get p :mix-rem) + old-mix (get p :mix) + new-mix-rem (if (> mix-rem 0) (- mix-rem 1) (+ 1 (core:rand-int 1 10))) + new-mix (if (> mix-rem 0) old-mix (* (core:rand-int 0 2) 0.5)) + ;; Rotation (accumulates, reverses direction when cycle completes) + rot-beat (get p :rot-beat) + rot-clen (get p :rot-clen) + old-angle (get p :angle) + ;; Note: dir comes from pair-configs, but we store rotation state here + new-rot-beat (if (< (+ rot-beat 1) rot-clen) (+ rot-beat 1) 0) + new-rot-clen (if (< (+ rot-beat 1) rot-clen) rot-clen (+ 20 (core:rand-int 0 10))) + new-angle (+ old-angle (/ 360 rot-clen))] + (dict :inv-a new-inv-a :inv-b new-inv-b + :hue-a new-hue-a :hue-b new-hue-b + :hue-a-val new-hue-a-val :hue-b-val new-hue-b-val + :mix new-mix :mix-rem new-mix-rem + :angle new-angle :rot-beat new-rot-beat :rot-clen new-rot-clen)))))) + + ;; === FRAME PIPELINE === + (frame + (let [now t + e (streaming:audio-energy music now) + + ;; Get cycle state + active (bind cycle :active) + beat-pos (bind cycle :beat) + clen (bind cycle :clen) + + ;; Transition logic: last third of cycle crossfades to next + phase3 (* beat-pos 3) + fading (and (>= phase3 (* clen 2)) (< phase3 (* clen 3))) + fade-amt (if fading (/ (- phase3 (* clen 2)) clen) 0) + next-idx (mod (+ active 1) (len sources)) + + ;; Get pair states array (required by process-pair macro) + pair-states (bind pairs :states) + + ;; Process active pair using macro from template + active-frame (process-pair active) + + ;; Crossfade with zoom during transition (using macro) + result (if fading + (crossfade-zoom active-frame (process-pair next-idx) fade-amt) + active-frame) + + ;; Final: global spin + ripple + spun (rotate result :angle (bind spin :angle)) + rip-gate (bind ripple-state :gate) + rip-amp (* rip-gate (core:map-range e 0 1 5 50))] + + (ripple spun + :amplitude rip-amp + :center_x (bind ripple-state :cx) + :center_y (bind ripple-state :cy) + :frequency 8 + :decay 2 + :speed 5)))) diff --git a/execute.py b/execute.py new file mode 100644 index 0000000..34d94a2 --- /dev/null +++ b/execute.py @@ -0,0 +1,2368 @@ +#!/usr/bin/env python3 +""" +Execute a pre-computed plan. + +Takes a plan file (S-expression) and executes primitive operations, +storing artifacts by their content hash. + +Usage: + analyze.py recipe.sexp > analysis.sexp + plan.py recipe.sexp --analysis analysis.sexp --sexp > plan.sexp + execute.py plan.sexp --analysis analysis.sexp +""" + +import json +import shutil +import subprocess +import sys +import tempfile +import importlib.util +from pathlib import Path +from typing import List + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import parse +from artdag.sexp.parser import Symbol, Keyword +import time +import os +import threading +import concurrent.futures +from itertools import groupby + + +# Limit concurrent raw-video pipelines to prevent memory exhaustion. +# Each pipeline holds raw frames in memory (e.g. ~6MB per 1080p frame) +# and spawns 2+ ffmpeg subprocesses. When the ThreadPoolExecutor runs +# many EFFECT steps in parallel the combined load can freeze the system. +# Default: 1 concurrent pipeline; override with ARTDAG_VIDEO_PIPELINES. +_MAX_VIDEO_PIPELINES = int(os.environ.get("ARTDAG_VIDEO_PIPELINES", 1)) +_video_pipeline_sem = threading.Semaphore(_MAX_VIDEO_PIPELINES) + + +def set_max_video_pipelines(n: int): + """Reconfigure the video-pipeline concurrency limit at runtime.""" + global _video_pipeline_sem, _MAX_VIDEO_PIPELINES + _MAX_VIDEO_PIPELINES = n + _video_pipeline_sem = threading.Semaphore(n) + + +def _video_pipeline_guard(fn): + """Decorator: acquire the video-pipeline semaphore for the call's duration.""" + from functools import wraps + @wraps(fn) + def _guarded(*args, **kwargs): + _video_pipeline_sem.acquire() + try: + return fn(*args, **kwargs) + finally: + _video_pipeline_sem.release() + return _guarded + + +class ProgressBar: + """Simple console progress bar with ETA.""" + + def __init__(self, total: int, desc: str = "", width: int = 30, update_interval: int = 30): + self.total = total + self.desc = desc + self.width = width + self.current = 0 + self.start_time = time.time() + self.update_interval = update_interval + self._last_render = 0 + + def update(self, n: int = 1): + self.current += n + if self.current - self._last_render >= self.update_interval: + self._render() + self._last_render = self.current + + def set(self, n: int): + self.current = n + if self.current - self._last_render >= self.update_interval: + self._render() + self._last_render = self.current + + def _render(self): + elapsed = time.time() - self.start_time + + if self.total == 0: + # Unknown total - just show count + line = f"\r {self.desc} {self.current} frames ({elapsed:.1f}s)" + print(line, end="", file=sys.stderr, flush=True) + return + + pct = self.current / self.total + filled = int(self.width * pct) + bar = "█" * filled + "░" * (self.width - filled) + + if self.current > 0 and pct < 1.0: + eta = elapsed / pct - elapsed + eta_str = f"ETA {eta:.0f}s" + elif pct >= 1.0: + eta_str = f"done in {elapsed:.1f}s" + else: + eta_str = "..." + + line = f"\r {self.desc} |{bar}| {self.current}/{self.total} ({pct*100:.0f}%) {eta_str}" + print(line, end="", file=sys.stderr, flush=True) + + def finish(self): + self._render() + print(file=sys.stderr) # newline + + +def check_cache(cache_dir: Path, cache_id: str, extensions: list) -> Path: + """Check if a cached result exists for a step using IPNS/CID lookup. + + Args: + cache_dir: Cache directory (used for unified cache) + cache_id: IPNS address (computation hash, known before execution) + extensions: List of possible file extensions (for legacy compatibility) + + Returns: + Path to cached content file if found, None otherwise + """ + import cache as unified_cache + + # Look up IPNS → CID mapping + cached_path = unified_cache.cache_exists(cache_id) + if cached_path: + return cached_path + return None + + +def save_to_cache(cache_dir: Path, cache_id: str, source_path: Path) -> Path: + """Save a result to cache using IPNS/CID structure. + + Args: + cache_dir: Cache directory (used for unified cache) + cache_id: IPNS address (computation hash, known before execution) + source_path: Path to the file to cache + + Returns: + Path to the cached content file + """ + import cache as unified_cache + + # Store content by CID, create IPNS → CID ref + cid, cached_path = unified_cache.cache_store_file(cache_id, source_path) + return cached_path + + +def extract_segment_with_loop(input_path: Path, output_path: Path, start: float, duration: float, encoding: dict) -> Path: + """Extract a segment from a video, looping the source if needed to reach requested duration. + + Args: + input_path: Source video file + output_path: Output segment file + start: Start time in seconds + duration: Requested duration in seconds + encoding: Encoding settings dict + + Returns: + Path to the output segment + """ + enc = encoding + fps = enc.get("fps", 30) + + # First attempt without looping + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + cmd.extend(["-r", str(fps), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-pix_fmt", "yuv420p", + "-c:a", enc.get("audio_codec", "aac"), + str(output_path)]) + + print(f" Extracting segment: start={start}, duration={duration}", file=sys.stderr) + result = subprocess.run(cmd, capture_output=True, text=True) + + # Check if we need to loop + needs_loop = False + if result.returncode == 0 and duration: + probe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(output_path)] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + if probe_result.returncode == 0: + probe_data = json.loads(probe_result.stdout) + output_duration = float(probe_data.get("format", {}).get("duration", 0)) + if output_duration < duration - 1.0: # 1 second tolerance + needs_loop = True + print(f" Output {output_duration:.1f}s < requested {duration:.1f}s, will loop", file=sys.stderr) + + if needs_loop or result.returncode != 0: + # Get source duration for wrapping + probe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(input_path)] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + src_duration = float(probe_data.get("format", {}).get("duration", 0)) + + if src_duration > 0: + wrapped_start = start % src_duration if start else 0 + print(f" Looping source ({src_duration:.1f}s) to reach {duration:.1f}s", file=sys.stderr) + + # Re-run with stream_loop + cmd = ["ffmpeg", "-y", "-stream_loop", "-1", "-i", str(input_path)] + cmd.extend(["-ss", str(wrapped_start)]) + if duration: + cmd.extend(["-t", str(duration)]) + cmd.extend(["-r", str(fps), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-pix_fmt", "yuv420p", + "-c:a", enc.get("audio_codec", "aac"), + str(output_path)]) + + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + print(f" FFmpeg loop error: {result.stderr[:200]}", file=sys.stderr) + raise ValueError(f"FFmpeg segment extraction with loop failed") + + if not output_path.exists() or output_path.stat().st_size == 0: + raise ValueError(f"Segment output invalid: {output_path}") + + print(f" Segment: {output_path.stat().st_size / 1024 / 1024:.1f}MB", file=sys.stderr) + return output_path + + +def clean_nil_symbols(obj): + """Recursively convert Symbol('nil') to None and filter out None values from dicts.""" + if isinstance(obj, Symbol): + if obj.name == 'nil': + return None + return obj + elif isinstance(obj, dict): + result = {} + for k, v in obj.items(): + cleaned = clean_nil_symbols(v) + # Skip None values (they were nil) + if cleaned is not None: + result[k] = cleaned + return result + elif isinstance(obj, list): + return [clean_nil_symbols(v) for v in obj] + return obj + + +def parse_analysis_sexp(content: str) -> dict: + """Parse analysis S-expression into dict.""" + sexp = parse(content) + if isinstance(sexp, list) and len(sexp) == 1: + sexp = sexp[0] + + if not isinstance(sexp, list) or not sexp: + raise ValueError("Invalid analysis S-expression") + + # Should be (analysis (name ...) (name ...) ...) + if not isinstance(sexp[0], Symbol) or sexp[0].name != "analysis": + raise ValueError("Expected (analysis ...) S-expression") + + result = {} + for item in sexp[1:]: + if isinstance(item, list) and item and isinstance(item[0], Symbol): + name = item[0].name + data = {} + + i = 1 + while i < len(item): + if isinstance(item[i], Keyword): + key = item[i].name.replace("-", "_") + i += 1 + if i < len(item): + data[key] = item[i] + i += 1 + else: + i += 1 + + result[name] = data + + return result + + +def sexp_to_plan(sexp) -> dict: + """Convert a parsed S-expression plan to a dict.""" + if not isinstance(sexp, list) or not sexp: + raise ValueError("Invalid plan S-expression") + + # Skip 'plan' symbol and name + plan = { + "steps": [], + "analysis": {}, + } + + i = 0 + if isinstance(sexp[0], Symbol) and sexp[0].name == "plan": + i = 1 + + # Parse keywords and steps + while i < len(sexp): + item = sexp[i] + + if isinstance(item, Keyword): + key = item.name.replace("-", "_") + i += 1 + if i < len(sexp): + value = sexp[i] + if key == "encoding" and isinstance(value, list): + # Parse encoding dict from sexp + plan["encoding"] = sexp_to_dict(value) + elif key == "output": + # Map :output to output_step_id + plan["output_step_id"] = value + elif key == "id": + # Map :id to plan_id + plan["plan_id"] = value + elif key == "source_cid": + # Map :source-cid to source_hash + plan["source_hash"] = value + else: + plan[key] = value + i += 1 + elif isinstance(item, list) and item and isinstance(item[0], Symbol): + if item[0].name == "step": + # Parse step + step = parse_step_sexp(item) + plan["steps"].append(step) + elif item[0].name == "analysis": + # Parse analysis data + plan["analysis"] = parse_analysis_sexp(item) + elif item[0].name == "effects-registry": + # Parse effects registry + plan["effects_registry"] = parse_effects_registry_sexp(item) + i += 1 + else: + i += 1 + + return plan + + +def parse_analysis_sexp(sexp) -> dict: + """Parse analysis S-expression: (analysis (bass :times [...] :values [...]) ...) + + Handles both inline data (:times [...] :values [...]) and cache-id refs (:cache-id "..."). + """ + analysis = {} + for item in sexp[1:]: # Skip 'analysis' symbol + if isinstance(item, list) and item and isinstance(item[0], Symbol): + name = item[0].name + data = {} + j = 1 + while j < len(item): + if isinstance(item[j], Keyword): + key = item[j].name + j += 1 + if j < len(item): + data[key] = item[j] + j += 1 + else: + j += 1 + # Normalize: parser gives "cache-id", internal code expects "_cache_id" + if "cache-id" in data: + data["_cache_id"] = data.pop("cache-id") + analysis[name] = data + return analysis + + +def parse_effects_registry_sexp(sexp) -> dict: + """Parse effects-registry S-expression: (effects-registry (rotate :path "...") (blur :path "..."))""" + registry = {} + for item in sexp[1:]: # Skip 'effects-registry' symbol + if isinstance(item, list) and item and isinstance(item[0], Symbol): + name = item[0].name + data = {} + j = 1 + while j < len(item): + if isinstance(item[j], Keyword): + key = item[j].name + j += 1 + if j < len(item): + data[key] = item[j] + j += 1 + else: + j += 1 + registry[name] = data + return registry + + +def parse_bind_sexp(sexp) -> dict: + """Parse a bind S-expression: (bind analysis-ref :range [min max] :offset 60 :transform sqrt)""" + if not isinstance(sexp, list) or len(sexp) < 2: + return None + if not isinstance(sexp[0], Symbol) or sexp[0].name != "bind": + return None + + bind = { + "_bind": sexp[1] if isinstance(sexp[1], str) else sexp[1].name if isinstance(sexp[1], Symbol) else str(sexp[1]), + "range_min": 0.0, + "range_max": 1.0, + "transform": None, + "offset": 0.0, + } + + i = 2 + while i < len(sexp): + if isinstance(sexp[i], Keyword): + kw = sexp[i].name + if kw == "range": + i += 1 + if i < len(sexp) and isinstance(sexp[i], list) and len(sexp[i]) >= 2: + bind["range_min"] = float(sexp[i][0]) + bind["range_max"] = float(sexp[i][1]) + elif kw == "offset": + i += 1 + if i < len(sexp): + bind["offset"] = float(sexp[i]) + elif kw == "transform": + i += 1 + if i < len(sexp): + t = sexp[i] + if isinstance(t, Symbol): + bind["transform"] = t.name + elif isinstance(t, str): + bind["transform"] = t + i += 1 + + return bind + + +def sexp_to_dict(sexp) -> dict: + """Convert S-expression key-value pairs to dict.""" + result = {} + i = 0 + while i < len(sexp): + if isinstance(sexp[i], Keyword): + key = sexp[i].name.replace("-", "_") + i += 1 + if i < len(sexp): + value = sexp[i] + # Check for bind expression and convert to dict format + if isinstance(value, list) and value and isinstance(value[0], Symbol) and value[0].name == "bind": + value = parse_bind_sexp(value) + result[key] = value + i += 1 + else: + i += 1 + return result + + +def parse_step_sexp(sexp) -> dict: + """Parse a step S-expression. + + Supports two formats: + 1. (step "id" :cache-id "..." :type "SOURCE" :path "..." :inputs [...]) + 2. (step "id" :cache-id "..." :level 1 (source :path "..." :inputs [...])) + """ + step = { + "inputs": [], + "config": {}, + } + + i = 1 # Skip 'step' symbol + if i < len(sexp) and isinstance(sexp[i], str): + step["step_id"] = sexp[i] + i += 1 + + while i < len(sexp): + item = sexp[i] + + if isinstance(item, Keyword): + key = item.name.replace("-", "_") + i += 1 + if i < len(sexp): + value = sexp[i] + if key == "type": + step["node_type"] = value if isinstance(value, str) else value.name + elif key == "inputs": + step["inputs"] = value if isinstance(value, list) else [value] + elif key in ("level", "cache", "cache_id"): + if key == "cache": + key = "cache_id" + step[key] = value + else: + # Check for bind expression + if isinstance(value, list) and value and isinstance(value[0], Symbol) and value[0].name == "bind": + value = parse_bind_sexp(value) + # Config value + step["config"][key] = value + i += 1 + elif isinstance(item, list) and item and isinstance(item[0], Symbol): + # Nested node expression: (source :path "..." :inputs [...]) + node_type = item[0].name.upper() + step["node_type"] = node_type + + # Parse node config + j = 1 + while j < len(item): + if isinstance(item[j], Keyword): + key = item[j].name.replace("-", "_") + j += 1 + if j < len(item): + value = item[j] + if key == "inputs": + step["inputs"] = value if isinstance(value, list) else [value] + else: + # Check for bind expression + if isinstance(value, list) and value and isinstance(value[0], Symbol) and value[0].name == "bind": + value = parse_bind_sexp(value) + step["config"][key] = value + j += 1 + else: + j += 1 + i += 1 + else: + i += 1 + + return step + + +def parse_plan_input(content: str) -> dict: + """Parse plan from JSON or S-expression string.""" + content = content.strip() + if content.startswith("{"): + return json.loads(content) + elif content.startswith("("): + sexp = parse(content) + return sexp_to_plan(sexp[0] if isinstance(sexp, list) and len(sexp) == 1 else sexp) + else: + raise ValueError("Plan must be JSON (starting with '{') or S-expression (starting with '(')") + + +# Default encoding settings +DEFAULT_ENCODING = { + "codec": "libx264", + "preset": "fast", + "crf": 18, + "audio_codec": "aac", + "fps": 30, +} + + +def get_encoding(recipe_encoding: dict, step_config: dict) -> dict: + """Merge encoding settings: defaults < recipe < step overrides.""" + encoding = {**DEFAULT_ENCODING} + encoding.update(recipe_encoding) + if "encoding" in step_config: + encoding.update(step_config["encoding"]) + return encoding + + +class SexpEffectModule: + """Wrapper for S-expression effects to provide process_frame interface.""" + + def __init__(self, effect_path: Path, effects_registry: dict = None, recipe_dir: Path = None, minimal_primitives: bool = False): + from sexp_effects import get_interpreter + self.interp = get_interpreter(minimal_primitives=minimal_primitives) + + # Load only explicitly declared effects from the recipe's registry + # No auto-loading from directory - everything must be explicit + if effects_registry: + base_dir = recipe_dir or effect_path.parent.parent # Resolve relative paths + for effect_name, effect_info in effects_registry.items(): + effect_rel_path = effect_info.get("path") + if effect_rel_path: + full_path = (base_dir / effect_rel_path).resolve() + if full_path.exists() and effect_name not in self.interp.effects: + self.interp.load_effect(str(full_path)) + + # Load the specific effect if not already loaded + self.interp.load_effect(str(effect_path)) + self.effect_name = effect_path.stem + + def process_frame(self, frame, params, state): + return self.interp.run_effect(self.effect_name, frame, params, state or {}) + + +def load_effect(effect_path: Path, effects_registry: dict = None, recipe_dir: Path = None, minimal_primitives: bool = False): + """Load an effect module from a local path (.py or .sexp).""" + if effect_path.suffix == ".sexp": + return SexpEffectModule(effect_path, effects_registry, recipe_dir, minimal_primitives) + + spec = importlib.util.spec_from_file_location("effect", effect_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def interpolate_analysis(times: list, values: list, t: float) -> float: + """Interpolate analysis value at time t.""" + if not times or not values: + return 0.0 + if t <= times[0]: + return values[0] + if t >= times[-1]: + return values[-1] + + # Binary search for surrounding times + lo, hi = 0, len(times) - 1 + while lo < hi - 1: + mid = (lo + hi) // 2 + if times[mid] <= t: + lo = mid + else: + hi = mid + + # Linear interpolation + t0, t1 = times[lo], times[hi] + v0, v1 = values[lo], values[hi] + if t1 == t0: + return v0 + alpha = (t - t0) / (t1 - t0) + return v0 + alpha * (v1 - v0) + + +def apply_transform(value: float, transform: str) -> float: + """Apply a transform function to a value (0-1 range).""" + if transform is None: + return value + if transform == "sqrt": + return value ** 0.5 + elif transform == "pow2": + return value ** 2 + elif transform == "pow3": + return value ** 3 + elif transform == "log": + # Logarithmic scale: log(1 + 9*x) / log(10) maps 0-1 to 0-1 with log curve + import math + return math.log(1 + 9 * value) / math.log(10) if value > 0 else 0 + elif transform == "exp": + # Exponential scale: (10^x - 1) / 9 maps 0-1 to 0-1 with exp curve + return (10 ** value - 1) / 9 + elif transform == "inv": + return 1 - value + else: + return value + + +def eval_expr(value, frame_time: float, frame_num: int, analysis_data: dict) -> float: + """ + Evaluate a runtime expression. + + Supports: + - Literals (int, float) + - Bindings: {"_binding": True, "source": ..., "feature": ...} + - Math expressions: {"_expr": True, "op": "+", "args": [...]} + - Time/frame: {"_expr": True, "op": "time"} or {"_expr": True, "op": "frame"} + """ + import math + + # Literal values + if isinstance(value, (int, float)): + return float(value) + + if not isinstance(value, dict): + return 0.0 # Unknown type + + # Handle bindings + if "_bind" in value or "_binding" in value: + if "_bind" in value: + ref = value["_bind"] + range_min = value.get("range_min", 0.0) + range_max = value.get("range_max", 1.0) + else: + ref = value.get("source", "") + range_val = value.get("range", [0.0, 1.0]) + range_min = range_val[0] if isinstance(range_val, list) else 0.0 + range_max = range_val[1] if isinstance(range_val, list) and len(range_val) > 1 else 1.0 + + transform = value.get("transform") + bind_offset = value.get("offset", 0.0) + + track = analysis_data.get(ref, {}) + times = track.get("times", []) + values = track.get("values", []) + + lookup_time = frame_time + bind_offset + raw = interpolate_analysis(times, values, lookup_time) + transformed = apply_transform(raw, transform) + + return range_min + transformed * (range_max - range_min) + + # Handle expressions + if "_expr" in value: + op = value.get("op") + args = value.get("args", []) + + # Special ops without args + if op == "time": + return frame_time + if op == "frame": + return float(frame_num) + + # Lazy-evaluated ops (don't evaluate all branches) + if op == "if": + cond = eval_expr(args[0], frame_time, frame_num, analysis_data) if args else 0.0 + if cond: + return eval_expr(args[1], frame_time, frame_num, analysis_data) if len(args) > 1 else 0.0 + return eval_expr(args[2], frame_time, frame_num, analysis_data) if len(args) > 2 else 0.0 + + # Evaluate arguments recursively + evaluated = [eval_expr(arg, frame_time, frame_num, analysis_data) for arg in args] + + # Comparison operations + if op == "<" and len(evaluated) >= 2: + return 1.0 if evaluated[0] < evaluated[1] else 0.0 + if op == ">" and len(evaluated) >= 2: + return 1.0 if evaluated[0] > evaluated[1] else 0.0 + if op == "<=" and len(evaluated) >= 2: + return 1.0 if evaluated[0] <= evaluated[1] else 0.0 + if op == ">=" and len(evaluated) >= 2: + return 1.0 if evaluated[0] >= evaluated[1] else 0.0 + if op == "=" and len(evaluated) >= 2: + return 1.0 if evaluated[0] == evaluated[1] else 0.0 + + # Math operations + if op == "+" and len(evaluated) >= 2: + return evaluated[0] + evaluated[1] + if op == "-" and len(evaluated) >= 2: + return evaluated[0] - evaluated[1] + if op == "*" and len(evaluated) >= 2: + return evaluated[0] * evaluated[1] + if op == "/" and len(evaluated) >= 2: + return evaluated[0] / evaluated[1] if evaluated[1] != 0 else 0.0 + if op == "mod" and len(evaluated) >= 2: + return evaluated[0] % evaluated[1] if evaluated[1] != 0 else 0.0 + if op == "min" and len(evaluated) >= 2: + return min(evaluated[0], evaluated[1]) + if op == "max" and len(evaluated) >= 2: + return max(evaluated[0], evaluated[1]) + if op == "abs" and len(evaluated) >= 1: + return abs(evaluated[0]) + if op == "sin" and len(evaluated) >= 1: + return math.sin(evaluated[0]) + if op == "cos" and len(evaluated) >= 1: + return math.cos(evaluated[0]) + if op == "floor" and len(evaluated) >= 1: + return float(math.floor(evaluated[0])) + if op == "ceil" and len(evaluated) >= 1: + return float(math.ceil(evaluated[0])) + + return 0.0 # Fallback + + +def eval_scan_expr(value, rng, variables): + """ + Evaluate a scan expression with seeded RNG and variable bindings. + + Args: + value: Compiled expression (literal, dict with _expr, etc.) + rng: random.Random instance (seeded, advances state per call) + variables: Dict of variable bindings (acc, rem, hue, etc.) + + Returns: + Evaluated value (number or dict) + """ + import math + + if isinstance(value, (int, float)): + return value + + if isinstance(value, str): + return value + + if not isinstance(value, dict) or "_expr" not in value: + return value + + op = value.get("op") + args = value.get("args", []) + + # Variable reference + if op == "var": + name = value.get("name", "") + return variables.get(name, 0) + + # Dict constructor + if op == "dict": + keys = value.get("keys", []) + vals = [eval_scan_expr(a, rng, variables) for a in args] + return dict(zip(keys, vals)) + + # Random ops (advance RNG state) + if op == "rand": + return rng.random() + if op == "rand-int": + lo = int(eval_scan_expr(args[0], rng, variables)) + hi = int(eval_scan_expr(args[1], rng, variables)) + return rng.randint(lo, hi) + if op == "rand-range": + lo = float(eval_scan_expr(args[0], rng, variables)) + hi = float(eval_scan_expr(args[1], rng, variables)) + return rng.uniform(lo, hi) + + # Conditional (lazy - only evaluate taken branch) + if op == "if": + cond = eval_scan_expr(args[0], rng, variables) if args else 0 + if cond: + return eval_scan_expr(args[1], rng, variables) if len(args) > 1 else 0 + return eval_scan_expr(args[2], rng, variables) if len(args) > 2 else 0 + + # Comparison ops + if op in ("<", ">", "<=", ">=", "="): + left = eval_scan_expr(args[0], rng, variables) if args else 0 + right = eval_scan_expr(args[1], rng, variables) if len(args) > 1 else 0 + if op == "<": + return 1 if left < right else 0 + if op == ">": + return 1 if left > right else 0 + if op == "<=": + return 1 if left <= right else 0 + if op == ">=": + return 1 if left >= right else 0 + if op == "=": + return 1 if left == right else 0 + + # Eagerly evaluate remaining args + evaluated = [eval_scan_expr(a, rng, variables) for a in args] + + # Arithmetic ops + if op == "+" and len(evaluated) >= 2: + return evaluated[0] + evaluated[1] + if op == "-" and len(evaluated) >= 2: + return evaluated[0] - evaluated[1] + if op == "-" and len(evaluated) == 1: + return -evaluated[0] + if op == "*" and len(evaluated) >= 2: + return evaluated[0] * evaluated[1] + if op == "/" and len(evaluated) >= 2: + return evaluated[0] / evaluated[1] if evaluated[1] != 0 else 0 + if op == "mod" and len(evaluated) >= 2: + return evaluated[0] % evaluated[1] if evaluated[1] != 0 else 0 + if op == "min" and len(evaluated) >= 2: + return min(evaluated[0], evaluated[1]) + if op == "max" and len(evaluated) >= 2: + return max(evaluated[0], evaluated[1]) + if op == "abs" and len(evaluated) >= 1: + return abs(evaluated[0]) + if op == "sin" and len(evaluated) >= 1: + return math.sin(evaluated[0]) + if op == "cos" and len(evaluated) >= 1: + return math.cos(evaluated[0]) + if op == "floor" and len(evaluated) >= 1: + return math.floor(evaluated[0]) + if op == "ceil" and len(evaluated) >= 1: + return math.ceil(evaluated[0]) + if op == "nth" and len(evaluated) >= 2: + collection = evaluated[0] + index = int(evaluated[1]) + if isinstance(collection, (list, tuple)) and 0 <= index < len(collection): + return collection[index] + return 0 + + return 0 # Fallback + + +def _is_binding(value): + """Check if a value is a binding/expression dict that needs per-frame resolution.""" + return isinstance(value, dict) and ("_bind" in value or "_binding" in value or "_expr" in value) + + +def _check_has_bindings(params: dict) -> bool: + """Check if any param value (including inside lists) contains bindings.""" + for v in params.values(): + if _is_binding(v): + return True + if isinstance(v, list) and any(_is_binding(item) for item in v): + return True + return False + + +def resolve_params(params: dict, frame_time: float, analysis_data: dict, frame_num: int = 0) -> dict: + """Resolve any binding/expression params using analysis data at frame_time. + + Handles bindings at the top level and inside lists (e.g. blend_multi weights). + """ + resolved = {} + for key, value in params.items(): + if _is_binding(value): + resolved[key] = eval_expr(value, frame_time, frame_num, analysis_data) + elif isinstance(value, list): + resolved[key] = [ + eval_expr(item, frame_time, frame_num, analysis_data) + if _is_binding(item) else item + for item in value + ] + else: + resolved[key] = value + return resolved + + +def resolve_scalar_binding(value, analysis_data: dict): + """Resolve a scalar binding (like duration) from analysis data. + + For scalar features like 'duration', retrieves the value directly from analysis data. + For time-varying features, this returns None (use resolve_params instead). + + Returns: + Resolved value (float) if binding can be resolved to scalar, None otherwise. + If value is not a binding, returns the value unchanged. + """ + if not isinstance(value, dict) or not ("_bind" in value or "_binding" in value): + return value + + # Get source reference and feature + if "_bind" in value: + ref = value["_bind"] + feature = "values" # old format defaults to values + else: + ref = value.get("source", "") + feature = value.get("feature", "values") + + # Look up analysis track + track = analysis_data.get(ref, {}) + + # For scalar features like 'duration', get directly + if feature == "duration": + duration = track.get("duration") + if duration is not None: + return float(duration) + return None + + # For time-varying features, can't resolve to scalar + # Return None to indicate this needs frame-by-frame resolution + return None + + +@_video_pipeline_guard +def run_effect(effect_module, input_path: Path, output_path: Path, params: dict, encoding: dict, analysis_data: dict = None, time_offset: float = 0.0, max_duration: float = None): + """Run an effect on a video file. + + Args: + time_offset: Time offset in seconds for resolving bindings (e.g., segment start time in audio) + max_duration: Maximum duration in seconds to process (stops after this many seconds of frames) + """ + import numpy as np + + # Clean nil Symbols from params + params = clean_nil_symbols(params) + + # Get video info including duration + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + # Find video stream + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + + if not video_stream: + raise ValueError("No video stream found") + + in_width = int(video_stream["width"]) + in_height = int(video_stream["height"]) + + # Get framerate + fps_str = video_stream.get("r_frame_rate", "30/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + # Get duration for progress bar + duration = None + if "format" in probe_data and "duration" in probe_data["format"]: + duration = float(probe_data["format"]["duration"]) + + # Read frames with ffmpeg + read_cmd = [ + "ffmpeg", "-i", str(input_path), + "-f", "rawvideo", "-pix_fmt", "rgb24", "-" + ] + read_proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Check if we have any bindings that need per-frame resolution + has_bindings = _check_has_bindings(params) + analysis_data = analysis_data or {} + + # Debug: print bindings and analysis info once + if has_bindings: + print(f" BINDINGS DEBUG: time_offset={time_offset:.2f}", file=sys.stderr) + for k, v in params.items(): + if isinstance(v, dict) and ("_bind" in v or "_binding" in v): + ref = v.get("_bind") or v.get("source") + bind_offset = float(v.get("offset", 0.0)) + track = analysis_data.get(ref, {}) + times = track.get("times", []) + values = track.get("values", []) + if times and values: + # Find first non-zero value + first_nonzero_idx = next((i for i, v in enumerate(values) if v > 0.01), -1) + first_nonzero_time = times[first_nonzero_idx] if first_nonzero_idx >= 0 else -1 + print(f" param {k}: ref='{ref}' bind_offset={bind_offset} time_range=[{min(times):.2f}, {max(times):.2f}]", file=sys.stderr) + print(f" first_nonzero at t={first_nonzero_time:.2f} max_value={max(values):.4f}", file=sys.stderr) + else: + raise ValueError(f"Binding for param '{k}' references '{ref}' but no analysis data found. Available: {list(analysis_data.keys())}") + + # Process first frame to detect output dimensions + in_frame_size = in_width * in_height * 3 + frame_data = read_proc.stdout.read(in_frame_size) + if len(frame_data) < in_frame_size: + read_proc.stdout.close() + read_proc.wait() + raise ValueError("No frames in input video") + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((in_height, in_width, 3)) + + # Resolve params for first frame + if has_bindings: + frame_params = resolve_params(params, time_offset, analysis_data, frame_num=0) + else: + frame_params = params + + # Apply single effect with mix bypass: mix=0 → passthrough, 0=1 → full + def apply_effect(frame, frame_params, state): + mix_val = float(frame_params.get('mix', 1.0)) + if mix_val <= 0: + return frame, state + result, state = effect_module.process_frame(frame, frame_params, state) + if mix_val < 1.0: + result = np.clip( + frame.astype(np.float32) * (1.0 - mix_val) + + result.astype(np.float32) * mix_val, + 0, 255 + ).astype(np.uint8) + return result, state + + state = None + processed, state = apply_effect(frame, frame_params, state) + + # Get output dimensions from processed frame + out_height, out_width = processed.shape[:2] + if out_width != in_width or out_height != in_height: + print(f" Effect resizes: {in_width}x{in_height} -> {out_width}x{out_height}", file=sys.stderr) + + # Now start write process with correct output dimensions + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{out_width}x{out_height}", "-r", str(encoding.get("fps", 30)), + "-i", "-", + "-i", str(input_path), # For audio + "-map", "0:v", "-map", "1:a?", + "-c:v", encoding["codec"], "-preset", encoding["preset"], "-crf", str(encoding["crf"]), + "-pix_fmt", "yuv420p", + "-c:a", encoding["audio_codec"], + str(output_path) + ] + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Write first processed frame + write_proc.stdin.write(processed.tobytes()) + frame_count = 1 + + # Calculate max frames and total for progress bar + max_frames = None + total_frames = 0 + if max_duration: + max_frames = int(max_duration * fps) + total_frames = max_frames + elif duration: + total_frames = int(duration * fps) + + # Create progress bar + effect_name = getattr(effect_module, 'effect_name', 'effect') + pbar = ProgressBar(total_frames, desc=effect_name) + pbar.set(1) # First frame already processed + + # Process remaining frames + while True: + # Stop if we've reached the frame limit + if max_frames and frame_count >= max_frames: + break + + frame_data = read_proc.stdout.read(in_frame_size) + if len(frame_data) < in_frame_size: + break + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((in_height, in_width, 3)) + + # Resolve params for this frame + if has_bindings: + frame_time = time_offset + frame_count / fps + frame_params = resolve_params(params, frame_time, analysis_data, frame_num=frame_count) + else: + frame_params = params + + processed, state = apply_effect(frame, frame_params, state) + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + pbar.set(frame_count) + + read_proc.stdout.close() + write_proc.stdin.close() + read_proc.wait() + write_proc.wait() + + pbar.finish() + + +@_video_pipeline_guard +def run_multi_effect(effect_module, input_paths: List[Path], output_path: Path, params: dict, encoding: dict, analysis_data: dict = None, time_offset: float = 0.0, max_duration: float = None): + """Run a multi-input effect on multiple video files. + + Args: + time_offset: Time offset in seconds for resolving bindings (e.g., segment start time in audio) + max_duration: Maximum duration in seconds to process (stops after this many seconds of frames) + """ + import numpy as np + + # Clean nil Symbols from params + params = clean_nil_symbols(params) + + if len(input_paths) < 2: + raise ValueError("Multi-input effect requires at least 2 inputs") + + # Get video info for each input (preserve original dimensions) + input_infos = [] + for input_path in input_paths: + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + + if not video_stream: + raise ValueError(f"No video stream found in {input_path}") + + w = int(video_stream["width"]) + h = int(video_stream["height"]) + input_infos.append({"width": w, "height": h, "path": input_path}) + print(f" Input: {input_path.name} ({w}x{h})", file=sys.stderr) + + # Get framerate and duration from first input + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(input_paths[0]) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + video_stream = next(s for s in probe_data.get("streams", []) if s.get("codec_type") == "video") + fps_str = video_stream.get("r_frame_rate", "30/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + # Get duration for progress bar + duration = None + if "format" in probe_data and "duration" in probe_data["format"]: + duration = float(probe_data["format"]["duration"]) + + # Open read processes for all inputs - preserve original dimensions + read_procs = [] + for info in input_infos: + read_cmd = [ + "ffmpeg", "-i", str(info["path"]), + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-" # Don't scale - keep original dimensions + ] + proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + read_procs.append(proc) + + analysis_data = analysis_data or {} + state = None + + # Process first frame to detect output dimensions + frames = [] + for i, (proc, info) in enumerate(zip(read_procs, input_infos)): + frame_size = info["width"] * info["height"] * 3 + frame_data = proc.stdout.read(frame_size) + if len(frame_data) < frame_size: + # Cleanup + for p in read_procs: + p.stdout.close() + p.wait() + raise ValueError(f"No frames in input {i}") + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((info["height"], info["width"], 3)) + frames.append(frame) + + # Check if we have any bindings that need per-frame resolution + has_bindings = _check_has_bindings(params) + + # Resolve params for first frame + if has_bindings: + frame_params = resolve_params(params, time_offset, analysis_data, frame_num=0) + else: + frame_params = params + + processed, state = effect_module.process_frame(frames, frame_params, state) + out_height, out_width = processed.shape[:2] + print(f" Output dimensions: {out_width}x{out_height}", file=sys.stderr) + + # Now start write process with correct output dimensions + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{out_width}x{out_height}", "-r", str(encoding.get("fps", 30)), + "-i", "-", + "-i", str(input_paths[0]), # For audio from first input + "-map", "0:v", "-map", "1:a?", + "-c:v", encoding["codec"], "-preset", encoding["preset"], "-crf", str(encoding["crf"]), + "-pix_fmt", "yuv420p", + "-c:a", encoding["audio_codec"], + str(output_path) + ] + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Write first processed frame + write_proc.stdin.write(processed.tobytes()) + frame_count = 1 + + # Calculate max frames and total for progress bar + max_frames = None + total_frames = 0 + if max_duration: + max_frames = int(max_duration * fps) + total_frames = max_frames + elif duration: + total_frames = int(duration * fps) + + # Create progress bar + effect_name = getattr(effect_module, 'effect_name', 'blend') + pbar = ProgressBar(total_frames, desc=effect_name) + pbar.set(1) # First frame already processed + + # Process remaining frames + while True: + # Stop if we've reached the frame limit + if max_frames and frame_count >= max_frames: + break + + # Read frame from each input (each may have different dimensions) + frames = [] + all_valid = True + for i, (proc, info) in enumerate(zip(read_procs, input_infos)): + frame_size = info["width"] * info["height"] * 3 + frame_data = proc.stdout.read(frame_size) + if len(frame_data) < frame_size: + all_valid = False + break + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((info["height"], info["width"], 3)) + frames.append(frame) + + if not all_valid: + break + + # Resolve params for this frame + if has_bindings: + frame_time = time_offset + frame_count / fps + frame_params = resolve_params(params, frame_time, analysis_data, frame_num=frame_count) + else: + frame_params = params + + # Pass list of frames to effect + processed, state = effect_module.process_frame(frames, frame_params, state) + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + pbar.set(frame_count) + + # Cleanup + for proc in read_procs: + proc.stdout.close() + proc.wait() + write_proc.stdin.close() + write_proc.wait() + + pbar.finish() + + +@_video_pipeline_guard +def run_effect_chain(effect_modules, input_path: Path, output_path: Path, + params_list: list, encoding: dict, + analysis_data=None, time_offset: float = 0.0, + max_duration: float = None): + """Run multiple effects as a single-pass fused chain: one decode, one encode, no intermediates. + + Args: + effect_modules: List of effect modules (each has process_frame) + input_path: Input video file + output_path: Output video file + params_list: List of param dicts, one per effect + encoding: Encoding settings + analysis_data: Optional analysis data for binding resolution + time_offset: Time offset for resolving bindings + max_duration: Maximum duration in seconds to process + """ + import numpy as np + + # Clean nil Symbols from each params dict + params_list = [clean_nil_symbols(p) for p in params_list] + + # Probe input for dimensions/fps/duration + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + if not video_stream: + raise ValueError("No video stream found") + + in_width = int(video_stream["width"]) + in_height = int(video_stream["height"]) + + fps_str = video_stream.get("r_frame_rate", "30/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + duration = None + if "format" in probe_data and "duration" in probe_data["format"]: + duration = float(probe_data["format"]["duration"]) + + # Pre-compute per-effect binding flags + analysis_data = analysis_data or {} + bindings_flags = [] + for params in params_list: + has_b = any(isinstance(v, dict) and ("_bind" in v or "_binding" in v or "_expr" in v) + for v in params.values()) + bindings_flags.append(has_b) + + # Open single ffmpeg reader + read_cmd = [ + "ffmpeg", "-i", str(input_path), + "-f", "rawvideo", "-pix_fmt", "rgb24", "-" + ] + read_proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Read first frame + in_frame_size = in_width * in_height * 3 + frame_data = read_proc.stdout.read(in_frame_size) + if len(frame_data) < in_frame_size: + read_proc.stdout.close() + read_proc.wait() + raise ValueError("No frames in input video") + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((in_height, in_width, 3)) + + # Apply effect chain to a frame, respecting per-effect mix bypass. + # mix=0 → skip (zero cost), 0=1 → full effect. + def apply_chain(frame, states, frame_num, frame_time): + processed = frame + for idx, (module, params, has_b) in enumerate(zip(effect_modules, params_list, bindings_flags)): + if has_b: + fp = resolve_params(params, frame_time, analysis_data, frame_num=frame_num) + else: + fp = params + mix_val = float(fp.get('mix', 1.0)) + if mix_val <= 0: + continue + result, states[idx] = module.process_frame(processed, fp, states[idx]) + if mix_val < 1.0: + processed = np.clip( + processed.astype(np.float32) * (1.0 - mix_val) + + result.astype(np.float32) * mix_val, + 0, 255 + ).astype(np.uint8) + else: + processed = result + return processed, states + + # Push first frame through all effects to discover final output dimensions + states = [None] * len(effect_modules) + processed, states = apply_chain(frame, states, 0, time_offset) + + out_height, out_width = processed.shape[:2] + if out_width != in_width or out_height != in_height: + print(f" Chain resizes: {in_width}x{in_height} -> {out_width}x{out_height}", file=sys.stderr) + + # Open single ffmpeg writer with final output dimensions + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{out_width}x{out_height}", "-r", str(encoding.get("fps", 30)), + "-i", "-", + "-i", str(input_path), # For audio + "-map", "0:v", "-map", "1:a?", + "-c:v", encoding["codec"], "-preset", encoding["preset"], "-crf", str(encoding["crf"]), + "-pix_fmt", "yuv420p", + "-c:a", encoding["audio_codec"], + str(output_path) + ] + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Write first processed frame + write_proc.stdin.write(processed.tobytes()) + frame_count = 1 + + # Calculate max frames and total for progress bar + max_frames = None + total_frames = 0 + if max_duration: + max_frames = int(max_duration * fps) + total_frames = max_frames + elif duration: + total_frames = int(duration * fps) + + effect_names = [getattr(m, 'effect_name', '?') for m in effect_modules] + pbar = ProgressBar(total_frames, desc='+'.join(effect_names)) + pbar.set(1) + + # Frame loop: read -> apply chain -> write + while True: + if max_frames and frame_count >= max_frames: + break + + frame_data = read_proc.stdout.read(in_frame_size) + if len(frame_data) < in_frame_size: + break + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((in_height, in_width, 3)) + + frame_time = time_offset + frame_count / fps + processed, states = apply_chain(frame, states, frame_count, frame_time) + + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + pbar.set(frame_count) + + read_proc.stdout.close() + write_proc.stdin.close() + read_proc.wait() + write_proc.wait() + + pbar.finish() + + +def get_video_dimensions(file_path: Path) -> tuple: + """Get video dimensions using ffprobe.""" + cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(file_path) + ] + result = subprocess.run(cmd, capture_output=True, text=True) + data = json.loads(result.stdout) + + for stream in data.get("streams", []): + if stream.get("codec_type") == "video": + return int(stream["width"]), int(stream["height"]) + + return None, None + + +def normalize_video( + input_path: Path, + output_path: Path, + target_width: int, + target_height: int, + resize_mode: str, + priority: str = None, + pad_color: str = "black", + crop_gravity: str = "center", + encoding: dict = None, +) -> Path: + """ + Normalize video to target dimensions. + + resize_mode: + - stretch: force to exact size (distorts) + - crop: scale to fill, crop overflow + - fit: scale to fit, pad remainder + - cover: scale to cover, crop minimally + + priority: width | height (which dimension to match exactly for fit/crop) + """ + enc = encoding or {} + src_width, src_height = get_video_dimensions(input_path) + + if src_width is None: + # Can't determine dimensions, just copy + shutil.copy(input_path, output_path) + return output_path + + # Already correct size? + if src_width == target_width and src_height == target_height: + shutil.copy(input_path, output_path) + return output_path + + src_aspect = src_width / src_height + target_aspect = target_width / target_height + + if resize_mode == "stretch": + # Force exact size + vf = f"scale={target_width}:{target_height}" + + elif resize_mode == "fit": + # Scale to fit within bounds, pad remainder + if priority == "width": + # Match width exactly, pad height + vf = f"scale={target_width}:-1,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + elif priority == "height": + # Match height exactly, pad width + vf = f"scale=-1:{target_height},pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + else: + # Auto: fit within bounds (may pad both) + if src_aspect > target_aspect: + # Source is wider, fit to width + vf = f"scale={target_width}:-1,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + else: + # Source is taller, fit to height + vf = f"scale=-1:{target_height},pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + + elif resize_mode == "crop": + # Scale to fill, crop overflow + if priority == "width": + # Match width, crop height + vf = f"scale={target_width}:-1,crop={target_width}:{target_height}" + elif priority == "height": + # Match height, crop width + vf = f"scale=-1:{target_height},crop={target_width}:{target_height}" + else: + # Auto: fill bounds, crop minimally + if src_aspect > target_aspect: + # Source is wider, match height and crop width + vf = f"scale=-1:{target_height},crop={target_width}:{target_height}" + else: + # Source is taller, match width and crop height + vf = f"scale={target_width}:-1,crop={target_width}:{target_height}" + + elif resize_mode == "cover": + # Scale to cover target, crop to exact size + if src_aspect > target_aspect: + vf = f"scale=-1:{target_height},crop={target_width}:{target_height}" + else: + vf = f"scale={target_width}:-1,crop={target_width}:{target_height}" + + else: + # Unknown mode, just copy + shutil.copy(input_path, output_path) + return output_path + + cmd = [ + "ffmpeg", "-y", "-i", str(input_path), + "-vf", vf, + "-r", str(enc.get("fps", 30)), # Normalize framerate for concat compatibility + "-c:v", enc.get("codec", "libx264"), + "-preset", enc.get("preset", "fast"), + "-crf", str(enc.get("crf", 18)), + "-pix_fmt", "yuv420p", # Normalize pixel format for concat compatibility + "-c:a", enc.get("audio_codec", "aac"), + str(output_path) + ] + subprocess.run(cmd, check=True, capture_output=True) + return output_path + + +def tree_concat(files: list, work_dir: Path, prefix: str = "concat") -> Path: + """Concatenate files using a binary tree approach.""" + if len(files) == 1: + return files[0] + + level = 0 + current_files = list(files) + print(f" Tree concat: {len(current_files)} files", file=sys.stderr) + for i, f in enumerate(current_files): + print(f" [{i}] {f}", file=sys.stderr) + + while len(current_files) > 1: + next_files = [] + pairs = (len(current_files) + 1) // 2 + print(f" Level {level}: {len(current_files)} -> {pairs} pairs", file=sys.stderr) + + for i in range(0, len(current_files), 2): + if i + 1 < len(current_files): + concat_file = work_dir / f"{prefix}_L{level}_{i}.txt" + output_file = work_dir / f"{prefix}_L{level}_{i}.mp4" + + with open(concat_file, "w") as f: + f.write(f"file '{current_files[i]}'\n") + f.write(f"file '{current_files[i+1]}'\n") + + cmd = ["ffmpeg", "-y", "-f", "concat", "-safe", "0", + "-i", str(concat_file), "-c", "copy", str(output_file)] + subprocess.run(cmd, capture_output=True) + next_files.append(output_file) + else: + next_files.append(current_files[i]) + + current_files = next_files + level += 1 + + return current_files[0] + + +def execute_plan(plan_path: Path = None, output_path: Path = None, recipe_dir: Path = None, plan_data: dict = None, external_analysis: dict = None, cache_dir: Path = None): + """Execute a plan file (S-expression) or plan dict. + + Args: + cache_dir: Directory to cache intermediate results. If provided, steps will + check for cached outputs before recomputing. + """ + + # Load plan from file, stdin, or dict + if plan_data: + plan = plan_data + elif plan_path and str(plan_path) != "-": + content = plan_path.read_text() + plan = parse_plan_input(content) + else: + # Read from stdin + content = sys.stdin.read() + plan = parse_plan_input(content) + + print(f"Executing plan: {plan['plan_id'][:16]}...", file=sys.stderr) + print(f"Source CID: {plan.get('source_hash', 'unknown')[:16]}...", file=sys.stderr) + print(f"Steps: {len(plan['steps'])}", file=sys.stderr) + + recipe_encoding = plan.get("encoding", {}) + + # Merge plan's embedded analysis (includes synthetic tracks from composition + # merging) with external analysis (fresh ANALYZE step outputs). + # External analysis takes priority for tracks that exist in both. + analysis_data = dict(plan.get("analysis", {})) + if external_analysis: + analysis_data.update(external_analysis) + + # Resolve cache-id refs from plan + for name, data in list(analysis_data.items()): + if isinstance(data, dict) and "_cache_id" in data: + try: + from cache import cache_get_json + loaded = cache_get_json(data["_cache_id"]) + if loaded: + analysis_data[name] = loaded + except ImportError: + pass # standalone mode, no cache available + if recipe_dir is None: + recipe_dir = plan_path.parent if plan_path else Path(".") + + if analysis_data: + print(f"Analysis tracks: {list(analysis_data.keys())}", file=sys.stderr) + + # Get effects registry for loading explicitly declared effects + effects_registry = plan.get("effects_registry", {}) + if effects_registry: + print(f"Effects registry: {list(effects_registry.keys())}", file=sys.stderr) + + # Check for minimal primitives mode + minimal_primitives = plan.get("minimal_primitives", False) + if minimal_primitives: + print(f"Minimal primitives mode: enabled", file=sys.stderr) + + # Execute steps + results = {} # step_id -> output_path + work_dir = Path(tempfile.mkdtemp(prefix="artdag_exec_")) + + # Sort steps by level first (respecting dependencies), then by type within each level + # Type priority within same level: SOURCE/SEGMENT first, then ANALYZE, then EFFECT + steps = plan["steps"] + def step_sort_key(s): + node_type = s.get("node_type") or "UNKNOWN" + # Handle node_type being a Symbol + if hasattr(node_type, 'name'): + node_type = node_type.name + level = s.get("level", 0) + # Ensure level is an int (could be Symbol or None) + if not isinstance(level, int): + level = 0 + # Type priority (tiebreaker within same level): SOURCE=0, SEGMENT=1, ANALYZE=2, others=3 + if node_type == "SOURCE": + type_priority = 0 + elif node_type == "SEGMENT": + type_priority = 1 + elif node_type in ("ANALYZE", "SCAN"): + type_priority = 2 + else: + type_priority = 3 + # Sort by level FIRST, then type priority as tiebreaker + return (level, type_priority) + ordered_steps = sorted(steps, key=step_sort_key) + + try: + def _run_step(step): + step_id = step["step_id"] + node_type = step["node_type"] + config = step["config"] + inputs = step.get("inputs", []) + cache_id = step.get("cache_id", step_id) # IPNS address for caching + + print(f"\n[{step.get('level', 0)}] {node_type}: {step_id[:16]}...", file=sys.stderr) + + if node_type == "SOURCE": + if "path" in config: + src_path = (recipe_dir / config["path"]).resolve() + if not src_path.exists(): + raise FileNotFoundError(f"Source not found: {src_path}") + results[step_id] = src_path + print(f" -> {src_path}", file=sys.stderr) + + elif node_type == "SEGMENT": + is_audio = str(results[inputs[0]]).lower().endswith( + ('.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a')) + + input_path = results[inputs[0]] + start = config.get("start", 0) + duration = config.get("duration") + end = config.get("end") + + # Resolve any bindings to scalar values + start = resolve_scalar_binding(start, analysis_data) if start else 0 + duration = resolve_scalar_binding(duration, analysis_data) if duration else None + end = resolve_scalar_binding(end, analysis_data) if end else None + + # Check cache + cached = check_cache(cache_dir, cache_id, ['.m4a'] if is_audio else ['.mp4']) + if cached: + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + print(f" Resolved: start={start}, duration={duration}", file=sys.stderr) + + enc = get_encoding(recipe_encoding, config) + + if is_audio: + output_file = work_dir / f"segment_{step_id}.m4a" + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + cmd.extend(["-c:a", enc["audio_codec"], str(output_file)]) + else: + output_file = work_dir / f"segment_{step_id}.mp4" + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + elif end: + cmd.extend(["-t", str(end - start)]) + cmd.extend(["-r", str(enc["fps"]), # Normalize frame rate + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + str(output_file)]) + + result = subprocess.run(cmd, capture_output=True, text=True) + + # Check if segment has video content AND correct duration, if not try with looping + needs_loop = False + if not is_audio and result.returncode == 0: + probe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(output_file)] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + has_video = any(s.get("codec_type") == "video" for s in probe_data.get("streams", [])) + if not has_video: + needs_loop = True + # Also check if output duration matches requested duration + elif duration: + output_duration = float(probe_data.get("format", {}).get("duration", 0)) + # If output is significantly shorter than requested, need to loop + if output_duration < duration - 1.0: # 1 second tolerance + needs_loop = True + print(f" Output {output_duration:.1f}s < requested {duration:.1f}s, will loop", file=sys.stderr) + + if needs_loop or result.returncode != 0: + # Get source duration and loop the input + probe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(input_path)] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + src_duration = float(probe_data.get("format", {}).get("duration", 0)) + + if src_duration > 0: + # Wrap start time to source duration + wrapped_start = start % src_duration if start else 0 + seg_duration = duration if duration else (end - start if end else None) + + print(f" Wrapping segment: {start:.2f}s -> {wrapped_start:.2f}s (source={src_duration:.2f}s)", file=sys.stderr) + + # Use stream_loop for seamless looping if segment spans wrap point + if wrapped_start + (seg_duration or 0) > src_duration: + # Need to loop - use concat filter + cmd = ["ffmpeg", "-y", "-stream_loop", "-1", "-i", str(input_path)] + cmd.extend(["-ss", str(wrapped_start)]) + if seg_duration: + cmd.extend(["-t", str(seg_duration)]) + cmd.extend(["-r", str(enc["fps"]), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + str(output_file)]) + else: + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + cmd.extend(["-ss", str(wrapped_start)]) + if seg_duration: + cmd.extend(["-t", str(seg_duration)]) + cmd.extend(["-r", str(enc["fps"]), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + str(output_file)]) + + subprocess.run(cmd, check=True, capture_output=True) + else: + raise ValueError(f"Cannot determine source duration for looping") + + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "EFFECT": + # Check cache + cached = check_cache(cache_dir, cache_id, ['.mp4']) + if cached: + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + effect_name = config.get("effect", "unknown") + effect_path = config.get("effect_path") + is_multi_input = config.get("multi_input", False) + + output_file = work_dir / f"effect_{step_id}.mp4" + enc = get_encoding(recipe_encoding, config) + + if effect_path: + full_path = recipe_dir / effect_path + effect_module = load_effect(full_path, effects_registry, recipe_dir, minimal_primitives) + params = {k: v for k, v in config.items() + if k not in ("effect", "effect_path", "cid", "encoding", "multi_input")} + print(f" Effect: {effect_name}", file=sys.stderr) + + # Get timing offset and duration for bindings + effect_time_offset = config.get("start", config.get("segment_start", 0)) + effect_duration = config.get("duration") + + if is_multi_input and len(inputs) > 1: + # Multi-input effect (blend, layer, etc.) + input_paths = [results[inp] for inp in inputs] + run_multi_effect(effect_module, input_paths, output_file, params, enc, analysis_data, time_offset=effect_time_offset, max_duration=effect_duration) + else: + # Single-input effect + input_path = results[inputs[0]] + run_effect(effect_module, input_path, output_file, params, enc, analysis_data, time_offset=effect_time_offset, max_duration=effect_duration) + else: + input_path = results[inputs[0]] + shutil.copy(input_path, output_file) + + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "SEQUENCE": + # Check cache first + cached = check_cache(cache_dir, cache_id, ['.mp4']) + if cached: + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + if len(inputs) < 2: + results[step_id] = results[inputs[0]] + return + + input_files = [results[inp] for inp in inputs] + enc = get_encoding(recipe_encoding, config) + + # Check for normalization config + resize_mode = config.get("resize_mode") + if resize_mode: + # Determine target dimensions + target_width = config.get("target_width") or enc.get("width") + target_height = config.get("target_height") or enc.get("height") + + # If no explicit target, use first input's dimensions + if not target_width or not target_height: + first_w, first_h = get_video_dimensions(input_files[0]) + target_width = target_width or first_w + target_height = target_height or first_h + + if target_width and target_height: + print(f" Normalizing {len(input_files)} inputs to {target_width}x{target_height} ({resize_mode})", file=sys.stderr) + normalized_files = [] + for i, inp_file in enumerate(input_files): + norm_file = work_dir / f"norm_{step_id[:8]}_{i:04d}.mp4" + normalize_video( + inp_file, norm_file, + target_width, target_height, + resize_mode, + priority=config.get("priority"), + pad_color=config.get("pad_color", "black"), + crop_gravity=config.get("crop_gravity", "center"), + encoding=enc, + ) + normalized_files.append(norm_file) + input_files = normalized_files + + # Use tree concat for efficiency + output_file = tree_concat(input_files, work_dir, f"seq_{step_id[:8]}") + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "MUX": + # Check cache + cached = check_cache(cache_dir, cache_id, ['.mp4']) + if cached: + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + video_path = results[inputs[0]] + audio_path = results[inputs[1]] + enc = get_encoding(recipe_encoding, config) + + output_file = work_dir / f"mux_{step_id}.mp4" + + # Get duration for progress bar + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(video_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + mux_duration = None + if probe_result.returncode == 0: + probe_data = json.loads(probe_result.stdout) + mux_duration = float(probe_data.get("format", {}).get("duration", 0)) + + cmd = ["ffmpeg", "-y", + "-i", str(video_path), "-i", str(audio_path), + "-map", "0:v", "-map", "1:a", + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + "-shortest", str(output_file)] + + import re + mux_proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, text=True) + pbar = ProgressBar(int(mux_duration * 1000) if mux_duration else 0, desc="mux") + for line in mux_proc.stderr: + m = re.search(r"time=(\d+):(\d+):(\d+)\.(\d+)", line) + if m: + h, mi, s, cs = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)) + ms = h * 3600000 + mi * 60000 + s * 1000 + cs * 10 + pbar.set(ms) + pbar.finish() + mux_proc.wait() + if mux_proc.returncode != 0: + raise RuntimeError("MUX ffmpeg failed") + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "ANALYZE": + # Check cache first + cached = check_cache(cache_dir, cache_id, ['.json']) + if cached: + with open(cached) as f: + analysis_data[step_id] = json.load(f) + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + output_file = work_dir / f"analysis_{step_id}.json" + + if "analysis_results" in config: + # Analysis was done during planning + with open(output_file, "w") as f: + json.dump(config["analysis_results"], f) + analysis_data[step_id] = config["analysis_results"] + print(f" -> {output_file} (from plan)", file=sys.stderr) + else: + # Run analyzer now + analyzer_path = config.get("analyzer_path") + if analyzer_path: + analyzer_path = (recipe_dir / analyzer_path).resolve() + input_path = results[inputs[0]] + + # Load and run analyzer + import importlib.util + spec = importlib.util.spec_from_file_location("analyzer", analyzer_path) + analyzer_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(analyzer_module) + + # Run analysis + analyzer_params = {k: v for k, v in config.items() + if k not in ("analyzer", "analyzer_path", "cid")} + analysis_result = analyzer_module.analyze(input_path, analyzer_params) + + # Save and store results + with open(output_file, "w") as f: + json.dump(analysis_result, f) + analysis_data[step_id] = analysis_result + print(f" -> {output_file} (ran analyzer: {len(analysis_result.get('times', []))} pts)", file=sys.stderr) + else: + print(f" -> no analyzer path!", file=sys.stderr) + + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + + elif node_type == "SCAN": + # Check cache first + cached = check_cache(cache_dir, cache_id, ['.json']) + if cached: + with open(cached) as f: + scan_result = json.load(f) + analysis_data[step_id] = scan_result + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + import random + + # Load source analysis data + source_id = inputs[0] + source_data = analysis_data.get(source_id, {}) + event_times = source_data.get("times", []) + duration = source_data.get("duration", event_times[-1] if event_times else 0) + + seed = config.get("seed", 0) + init_expr = config.get("init", 0) + step_expr = config.get("step_expr") + emit_expr = config.get("emit_expr") + + # Initialize RNG and accumulator + rng = random.Random(seed) + acc = eval_scan_expr(init_expr, rng, {}) + + # Process each event + event_values = [] # (time, emitted_value) pairs + + for t in event_times: + # Build variable bindings from accumulator + if isinstance(acc, dict): + variables = dict(acc) + variables["acc"] = acc + else: + variables = {"acc": acc} + + # Step: update accumulator + acc = eval_scan_expr(step_expr, rng, variables) + + # Rebind after step + if isinstance(acc, dict): + variables = dict(acc) + variables["acc"] = acc + else: + variables = {"acc": acc} + + # Emit: produce output value + emit_val = eval_scan_expr(emit_expr, rng, variables) + if isinstance(emit_val, (int, float)): + event_values.append((t, float(emit_val))) + else: + event_values.append((t, 0.0)) + + # Generate high-resolution time-series with step-held interpolation + resolution = 100 # points per second + hi_res_times = [] + hi_res_values = [] + + current_val = 0.0 + event_idx = 0 + num_points = int(duration * resolution) + 1 + + for i in range(num_points): + t = i / resolution + + # Advance to the latest event at or before time t + while event_idx < len(event_values) and event_values[event_idx][0] <= t: + current_val = event_values[event_idx][1] + event_idx += 1 + + hi_res_times.append(round(t, 4)) + hi_res_values.append(current_val) + + scan_result = { + "times": hi_res_times, + "values": hi_res_values, + "duration": duration, + } + + analysis_data[step_id] = scan_result + + # Save to cache + output_file = work_dir / f"scan_{step_id}.json" + with open(output_file, "w") as f: + json.dump(scan_result, f) + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + + print(f" SCAN: {len(event_times)} events -> {len(hi_res_times)} points ({duration:.1f}s)", file=sys.stderr) + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "COMPOUND": + # Check cache first + cached = check_cache(cache_dir, cache_id, ['.mp4']) + if cached: + results[step_id] = cached + print(f" -> {cached} (cached)", file=sys.stderr) + return + + # Collapsed effect chains - compile to single FFmpeg command with sendcmd + filter_chain_raw = config.get("filter_chain", []) + if not filter_chain_raw: + raise ValueError("COMPOUND step has empty filter_chain") + + # Get effects registry for this compound step (use different name + # to avoid shadowing the outer effects_registry in nested function) + step_effects_registry = config.get("effects_registry", {}) + + # Convert filter_chain items from S-expression lists to dicts + # and clean nil Symbols from configs + filter_chain = [] + for item in filter_chain_raw: + if isinstance(item, dict): + # Clean nil Symbols from the config + cleaned_item = clean_nil_symbols(item) + filter_chain.append(cleaned_item) + elif isinstance(item, list) and item: + item_dict = sexp_to_dict(item) + ftype = item_dict.get("type", "UNKNOWN") + if isinstance(ftype, Symbol): + ftype = ftype.name + fconfig_raw = item_dict.get("config", {}) + if isinstance(fconfig_raw, list): + fconfig = sexp_to_dict(fconfig_raw) + elif isinstance(fconfig_raw, dict): + fconfig = fconfig_raw + else: + fconfig = {} + # Clean nil Symbols from config + fconfig = clean_nil_symbols(fconfig) + filter_chain.append({"type": ftype, "config": fconfig}) + else: + filter_chain.append({"type": "UNKNOWN", "config": {}}) + + input_path = results[inputs[0]] + # Debug: verify input exists and has content + if not input_path.exists(): + raise ValueError(f"COMPOUND input does not exist: {input_path}") + if input_path.stat().st_size == 0: + raise ValueError(f"COMPOUND input is empty: {input_path}") + print(f" COMPOUND input: {input_path} ({input_path.stat().st_size} bytes)", file=sys.stderr) + enc = get_encoding(recipe_encoding, config) + output_file = work_dir / f"compound_{step_id}.mp4" + + # Extract segment timing and effects + segment_start = 0 + segment_duration = None + effects = [] + + for filter_item in filter_chain: + filter_type = filter_item.get("type", "") + filter_config = filter_item.get("config", {}) + + if filter_type == "SEGMENT": + segment_start = filter_config.get("start", 0) + segment_duration = filter_config.get("duration") + if not segment_duration and filter_config.get("end"): + segment_duration = filter_config["end"] - segment_start + elif filter_type == "EFFECT": + effects.append(filter_config) + + # Try to compile effects to FFmpeg filters + from artdag.sexp.ffmpeg_compiler import FFmpegCompiler, generate_sendcmd_filter + compiler = FFmpegCompiler() + + # Check if any effect has bindings - these need Python path for per-frame resolution + any_has_bindings = any(_check_has_bindings(e) for e in effects) + + # Check if all effects have FFmpeg mappings + all_have_mappings = all( + compiler.get_mapping(e.get("effect", "")) is not None + for e in effects + ) + + # Use FFmpeg only for static effects (no bindings) + # Effects with bindings use Python path for proper per-frame binding resolution + if all_have_mappings and effects and not any_has_bindings: + # Compile to FFmpeg with sendcmd for dynamic params + ffmpeg_filters, sendcmd_path = generate_sendcmd_filter( + effects, + analysis_data, + segment_start, + segment_duration or 1.0, + ) + + # First extract segment with looping if needed + ffmpeg_input = input_path + if segment_start or segment_duration: + seg_temp = work_dir / f"compound_{step_id}_seg_temp.mp4" + extract_segment_with_loop(input_path, seg_temp, segment_start or 0, segment_duration, enc) + ffmpeg_input = seg_temp + + # Build FFmpeg command (segment already extracted, just apply filters) + cmd = ["ffmpeg", "-y", "-i", str(ffmpeg_input)] + + if ffmpeg_filters: + cmd.extend(["-vf", ffmpeg_filters]) + + cmd.extend(["-r", str(enc.get("fps", 30)), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-pix_fmt", "yuv420p", + "-c:a", enc["audio_codec"], + str(output_file)]) + + effect_names = [e.get("effect", "?") for e in effects] + print(f" COMPOUND (FFmpeg): {', '.join(effect_names)}", file=sys.stderr) + print(f" filters: {ffmpeg_filters[:80]}{'...' if len(ffmpeg_filters) > 80 else ''}", file=sys.stderr) + + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + print(f" FFmpeg error: {result.stderr[:200]}", file=sys.stderr) + raise RuntimeError(f"FFmpeg failed: {result.stderr}") + + # Clean up sendcmd file + if sendcmd_path and sendcmd_path.exists(): + sendcmd_path.unlink() + else: + # Fall back to sequential processing for effects without FFmpeg mappings + current_input = input_path + + # First handle segment (with looping if source is shorter than requested) + for filter_item in filter_chain: + if filter_item.get("type") == "SEGMENT": + filter_config = filter_item.get("config", {}) + start = filter_config.get("start", 0) or 0 + duration = filter_config.get("duration") + + if start or duration: + seg_output = work_dir / f"compound_{step_id}_seg.mp4" + extract_segment_with_loop(current_input, seg_output, start, duration, enc) + current_input = seg_output + break + + # Load all effect modules and params for fused single-pass execution + effect_modules = [] + chain_params_list = [] + for effect_config in effects: + effect_name = effect_config.get("effect", "unknown") + effect_path = effect_config.get("effect_path") + + if not effect_path: + for effects_dir in ["effects", "sexp_effects/effects"]: + for ext in [".py", ".sexp"]: + candidate = recipe_dir / effects_dir / f"{effect_name}{ext}" + if candidate.exists(): + effect_path = str(candidate.relative_to(recipe_dir)) + break + if effect_path: + break + + if not effect_path: + raise ValueError(f"COMPOUND EFFECT '{effect_name}' has no effect_path or FFmpeg mapping") + + full_path = recipe_dir / effect_path + effect_modules.append(load_effect(full_path, step_effects_registry or effects_registry, recipe_dir, minimal_primitives)) + chain_params_list.append({k: v for k, v in effect_config.items() + if k not in ("effect", "effect_path", "cid", "encoding", "type")}) + + effect_names = [e.get("effect", "?") for e in effects] + print(f" COMPOUND (fused): {', '.join(effect_names)}", file=sys.stderr) + + run_effect_chain(effect_modules, current_input, output_file, + chain_params_list, enc, analysis_data, + time_offset=segment_start, + max_duration=segment_duration) + + results[step_id] = save_to_cache(cache_dir, cache_id, output_file) or output_file + print(f" -> {output_file}", file=sys.stderr) + + else: + raise ValueError(f"Unknown node type: {node_type}") + + # Group steps by level for parallel execution. + # Default to 4 workers to avoid overwhelming the system with + # CPU-intensive effects (ascii_art, ripple, etc.) running in parallel. + max_workers = int(os.environ.get("ARTDAG_WORKERS", 4)) + level_groups = [] + for k, g in groupby(ordered_steps, key=lambda s: s.get("level", 0)): + level_groups.append((k, list(g))) + + for level_num, level_steps in level_groups: + if len(level_steps) == 1: + _run_step(level_steps[0]) + else: + types = [s.get("node_type", "?") for s in level_steps] + types = [t.name if hasattr(t, 'name') else str(t) for t in types] + type_counts = {} + for t in types: + type_counts[t] = type_counts.get(t, 0) + 1 + type_summary = ", ".join(f"{v}x {k}" for k, v in type_counts.items()) + print(f"\n >> Level {level_num}: {len(level_steps)} steps in parallel ({type_summary})", file=sys.stderr) + with concurrent.futures.ThreadPoolExecutor(max_workers=min(len(level_steps), max_workers)) as pool: + futures = [pool.submit(_run_step, s) for s in level_steps] + for f in concurrent.futures.as_completed(futures): + f.result() # re-raises exceptions from threads + + # Get final output + final_output = results[plan["output_step_id"]] + print(f"\n--- Output ---", file=sys.stderr) + print(f"Final: {final_output}", file=sys.stderr) + + if output_path: + # Handle stdout specially - remux to streamable format + if str(output_path) in ("/dev/stdout", "-"): + # MP4 isn't streamable, use matroska which is + cmd = [ + "ffmpeg", "-y", "-i", str(final_output), + "-c", "copy", "-f", "matroska", "pipe:1" + ] + subprocess.run(cmd, stdout=sys.stdout.buffer, stderr=subprocess.DEVNULL) + return output_path + else: + shutil.copy(final_output, output_path) + print(f"Copied to: {output_path}", file=sys.stderr) + # Print path to stdout for piping + print(output_path) + return output_path + else: + # Use truncated source CID for output filename + source_cid = plan.get('source_hash', 'output')[:16] + out = recipe_dir / f"{source_cid}-output.mp4" + shutil.copy(final_output, out) + print(f"Copied to: {out}", file=sys.stderr) + # Print path to stdout for piping + print(out) + return out + + finally: + print(f"Debug: temp files in {work_dir}", file=sys.stderr) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Execute a plan") + parser.add_argument("plan", nargs="?", default="-", help="Plan file (- for stdin)") + parser.add_argument("-o", "--output", type=Path, help="Output file") + parser.add_argument("-d", "--dir", type=Path, default=Path("."), help="Recipe directory for resolving paths") + parser.add_argument("-a", "--analysis", type=Path, help="Analysis file (.sexp)") + + args = parser.parse_args() + + plan_path = None if args.plan == "-" else Path(args.plan) + if plan_path and not plan_path.exists(): + print(f"Plan not found: {plan_path}") + sys.exit(1) + + # Load external analysis if provided + external_analysis = None + if args.analysis: + if not args.analysis.exists(): + print(f"Analysis file not found: {args.analysis}") + sys.exit(1) + external_analysis = parse_analysis_sexp(args.analysis.read_text()) + + execute_plan(plan_path, args.output, args.dir, external_analysis=external_analysis) diff --git a/plan.py b/plan.py new file mode 100644 index 0000000..6bdead3 --- /dev/null +++ b/plan.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python3 +""" +Plan generator for S-expression recipes. + +Expands dynamic nodes (SLICE_ON) into primitives using analysis data. +Outputs a plan that can be executed by execute.py. + +Usage: + analyze.py recipe.sexp > analysis.sexp + plan.py recipe.sexp --analysis analysis.sexp --sexp > plan.sexp + execute.py plan.sexp --analysis analysis.sexp +""" + +import sys +import json +from pathlib import Path + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string, parse +from artdag.sexp.planner import create_plan +from artdag.sexp.parser import Binding, serialize as sexp_serialize, Symbol, Keyword + + +def parse_analysis_sexp(content: str) -> dict: + """Parse analysis S-expression into dict.""" + sexp = parse(content) + if isinstance(sexp, list) and len(sexp) == 1: + sexp = sexp[0] + + if not isinstance(sexp, list) or not sexp: + raise ValueError("Invalid analysis S-expression") + + # Should be (analysis (name ...) (name ...) ...) + if not isinstance(sexp[0], Symbol) or sexp[0].name != "analysis": + raise ValueError("Expected (analysis ...) S-expression") + + result = {} + for item in sexp[1:]: + if isinstance(item, list) and item: + # Handle both Symbol names and quoted string names (node IDs) + first = item[0] + if isinstance(first, Symbol): + name = first.name + elif isinstance(first, str): + name = first + else: + continue # Skip malformed entries + data = {} + + i = 1 + while i < len(item): + if isinstance(item[i], Keyword): + key = item[i].name.replace("-", "_") + i += 1 + if i < len(item): + data[key] = item[i] + i += 1 + else: + i += 1 + + result[name] = data + + return result + + +def to_sexp(value, indent=0): + """Convert a Python value to S-expression string.""" + from artdag.sexp.parser import Lambda + + # Handle Binding objects + if isinstance(value, Binding): + # analysis_ref can be a string, node ID, or dict - serialize it properly + if isinstance(value.analysis_ref, str): + ref_str = f'"{value.analysis_ref}"' + else: + ref_str = to_sexp(value.analysis_ref, 0) + s = f'(bind {ref_str} :range [{value.range_min} {value.range_max}]' + if value.transform: + s += f' :transform {value.transform}' + return s + ')' + + # Handle binding dicts from compiler (convert to bind sexp format) + if isinstance(value, dict) and value.get("_binding"): + source = value.get("source", "") + range_val = value.get("range", [0.0, 1.0]) + range_min = range_val[0] if isinstance(range_val, list) else 0.0 + range_max = range_val[1] if isinstance(range_val, list) and len(range_val) > 1 else 1.0 + transform = value.get("transform") + offset = value.get("offset") + s = f'(bind "{source}" :range [{range_min} {range_max}]' + if offset: + s += f' :offset {offset}' + if transform: + s += f' :transform {transform}' + return s + ')' + + # Handle Symbol - serialize as bare identifier + if isinstance(value, Symbol): + return value.name + + # Handle Keyword - serialize with colon prefix + if isinstance(value, Keyword): + return f':{value.name}' + + # Handle Lambda + if isinstance(value, Lambda): + params = " ".join(value.params) + body = to_sexp(value.body, 0) + return f'(fn [{params}] {body})' + + prefix = " " * indent + if isinstance(value, dict): + if not value: + return "()" + items = [] + for k, v in value.items(): + if isinstance(k, str): + # Keys starting with _ are internal markers - keep underscore to avoid :-foo + if k.startswith('_'): + key_str = k # Keep as-is: _binding -> :_binding + else: + key_str = k.replace('_', '-') + else: + key_str = str(k) + items.append(f":{key_str} {to_sexp(v, 0)}") + return "(" + " ".join(items) + ")" + elif isinstance(value, list): + if not value: + return "()" + items = [to_sexp(v, 0) for v in value] + return "(" + " ".join(items) + ")" + elif isinstance(value, str): + # Escape special characters in strings + escaped = value.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n') + return f'"{escaped}"' + elif isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, (int, float)): + return str(value) + elif value is None: + return "nil" + else: + # For any unknown type, convert to string and quote it + return f'"{str(value)}"' + + +def plan_recipe(recipe_path: Path, output_format: str = "text", output_file: Path = None, analysis_path: Path = None, params: dict = None): + """Compile recipe, expand dynamic nodes using analysis, output plan. + + Args: + recipe_path: Path to recipe file + output_format: Output format (text, json, sexp) + output_file: Optional output file path + analysis_path: Optional pre-computed analysis file + params: Optional dict of name -> value bindings to inject into compilation + """ + + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + print(f"Compiling: {recipe_path}", file=sys.stderr) + if params: + print(f"Parameters: {params}", file=sys.stderr) + compiled = compile_string(recipe_text, params) + print(f"Recipe: {compiled.name} v{compiled.version}", file=sys.stderr) + print(f"Nodes: {len(compiled.nodes)}", file=sys.stderr) + + # Load pre-computed analysis if provided (file or stdin with -) + pre_analysis = None + if analysis_path: + if str(analysis_path) == "-": + print(f"Loading analysis: stdin", file=sys.stderr) + analysis_text = sys.stdin.read() + else: + print(f"Loading analysis: {analysis_path}", file=sys.stderr) + analysis_text = analysis_path.read_text() + pre_analysis = parse_analysis_sexp(analysis_text) + print(f" Tracks: {list(pre_analysis.keys())}", file=sys.stderr) + + # Track analysis results for embedding in plan + analysis_data = {} + + def on_analysis(node_id, results): + analysis_data[node_id] = results + times = results.get("times", []) + print(f" Analysis complete: {len(times)} beat times", file=sys.stderr) + + # Create plan (uses pre_analysis or runs analyzers, expands SLICE_ON) + print("\n--- Planning ---", file=sys.stderr) + plan = create_plan( + compiled, + inputs={}, + recipe_dir=recipe_dir, + on_analysis=on_analysis, + pre_analysis=pre_analysis, + ) + + print(f"\nPlan ID: {plan.plan_id[:16]}...", file=sys.stderr) + print(f"Steps: {len(plan.steps)}", file=sys.stderr) + + # Generate output + if output_format == "sexp": + output = generate_sexp_output(compiled, plan, analysis_data) + elif output_format == "json": + output = generate_json_output(compiled, plan, analysis_data) + else: + output = generate_text_output(compiled, plan, analysis_data) + + # Write output + if output_file: + output_file.write_text(output) + print(f"\nPlan written to: {output_file}", file=sys.stderr) + else: + print(output) + + +class PlanJSONEncoder(json.JSONEncoder): + """Custom encoder for plan objects.""" + def default(self, obj): + if isinstance(obj, Binding): + return { + "_type": "binding", + "analysis_ref": obj.analysis_ref, + "track": obj.track, + "range_min": obj.range_min, + "range_max": obj.range_max, + "transform": obj.transform, + } + if isinstance(obj, Symbol): + return {"_type": "symbol", "name": obj.name} + if isinstance(obj, Keyword): + return {"_type": "keyword", "name": obj.name} + return super().default(obj) + + +def generate_json_output(compiled, plan, analysis_data): + """Generate JSON plan output.""" + output = { + "plan_id": plan.plan_id, + "recipe_id": compiled.name, + "recipe_hash": plan.recipe_hash, + "encoding": compiled.encoding, + "output_step_id": plan.output_step_id, + "steps": [], + } + + for step in plan.steps: + step_dict = { + "step_id": step.step_id, + "node_type": step.node_type, + "config": step.config, + "inputs": step.inputs, + "level": step.level, + "cache_id": step.cache_id, + } + # Embed analysis results for ANALYZE steps + if step.node_type == "ANALYZE" and step.step_id in analysis_data: + step_dict["config"]["analysis_results"] = analysis_data[step.step_id] + output["steps"].append(step_dict) + + return json.dumps(output, indent=2, cls=PlanJSONEncoder) + + +def generate_sexp_output(compiled, plan, analysis_data): + """Generate S-expression plan output.""" + lines = [ + f'(plan "{compiled.name}"', + f' :version "{compiled.version}"', + f' :plan-id "{plan.plan_id}"', + ] + + if compiled.encoding: + lines.append(f' :encoding {to_sexp(compiled.encoding)}') + + # Include analysis data for effect parameter bindings + if plan.analysis: + lines.append('') + lines.append(' (analysis') + for name, data in plan.analysis.items(): + times = data.get("times", []) + values = data.get("values", []) + # Truncate for display but include all data + times_str = " ".join(str(t) for t in times) + values_str = " ".join(str(v) for v in values) + lines.append(f' ({name}') + lines.append(f' :times ({times_str})') + lines.append(f' :values ({values_str}))') + lines.append(' )') + + lines.append('') + + for step in plan.steps: + lines.append(f' (step "{step.step_id}"') + lines.append(f' :type {step.node_type}') + lines.append(f' :level {step.level}') + lines.append(f' :cache "{step.cache_id}"') + if step.inputs: + inputs_str = " ".join(f'"{i}"' for i in step.inputs) + lines.append(f' :inputs ({inputs_str})') + for key, value in step.config.items(): + lines.append(f' :{key.replace("_", "-")} {to_sexp(value)}') + lines.append(' )') + + lines.append('') + lines.append(f' :output "{plan.output_step_id}")') + + return '\n'.join(lines) + + +def generate_text_output(compiled, plan, analysis_data): + """Generate human-readable text output.""" + lines = [ + f"Recipe: {compiled.name} v{compiled.version}", + ] + + if compiled.encoding: + lines.append(f"Encoding: {compiled.encoding}") + + lines.extend([ + f"\nPlan ID: {plan.plan_id}", + f"Output: {plan.output_step_id[:16]}...", + f"\nSteps ({len(plan.steps)}):", + "-" * 60, + ]) + + for step in plan.steps: + lines.append(f"\n[{step.level}] {step.node_type}") + lines.append(f" id: {step.step_id[:16]}...") + lines.append(f" cache: {step.cache_id[:16]}...") + if step.inputs: + lines.append(f" inputs: {[i[:16] + '...' for i in step.inputs]}") + for key, value in step.config.items(): + if key == "analysis_results": + lines.append(f" {key}: <{len(value.get('times', []))} times>") + else: + lines.append(f" {key}: {value}") + + return '\n'.join(lines) + + +def parse_param(param_str: str) -> tuple: + """Parse a key=value parameter string. + + Args: + param_str: String in format "key=value" + + Returns: + Tuple of (key, parsed_value) where value is converted to int/float if possible + """ + if "=" not in param_str: + raise ValueError(f"Invalid parameter format: {param_str} (expected key=value)") + + key, value = param_str.split("=", 1) + key = key.strip() + value = value.strip() + + # Try to parse as int + try: + return (key, int(value)) + except ValueError: + pass + + # Try to parse as float + try: + return (key, float(value)) + except ValueError: + pass + + # Return as string + return (key, value) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Generate execution plan from recipe") + parser.add_argument("recipe", type=Path, help="Recipe file (.sexp)") + parser.add_argument("-o", "--output", type=Path, help="Output file (default: stdout)") + parser.add_argument("-a", "--analysis", type=Path, help="Pre-computed analysis file (.sexp)") + parser.add_argument("-p", "--param", action="append", dest="params", metavar="KEY=VALUE", + help="Set recipe parameter (can be used multiple times)") + parser.add_argument("--json", action="store_true", help="Output JSON format") + parser.add_argument("--text", action="store_true", help="Output human-readable text format") + + args = parser.parse_args() + + if not args.recipe.exists(): + print(f"Recipe not found: {args.recipe}", file=sys.stderr) + sys.exit(1) + + if args.analysis and str(args.analysis) != "-" and not args.analysis.exists(): + print(f"Analysis file not found: {args.analysis}", file=sys.stderr) + sys.exit(1) + + # Parse parameters + params = {} + if args.params: + for param_str in args.params: + try: + key, value = parse_param(param_str) + params[key] = value + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + if args.json: + fmt = "json" + elif args.text: + fmt = "text" + else: + fmt = "sexp" + + plan_recipe(args.recipe, fmt, args.output, args.analysis, params or None) diff --git a/run-effect.sh b/run-effect.sh new file mode 100644 index 0000000..1c7114a --- /dev/null +++ b/run-effect.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Run a single effect by number (0-42) +# Usage: ./run-effect.sh +# +# Note: For effects with simple numeric params, you can also use native params: +# python3 plan.py recipe-parametric.sexp -p strength=5 -p amount=30 | python3 execute.py - -d . -o output.mp4 + +EFFECT_NUM=${1:-0} + +# Effect definitions array +EFFECTS=( + "(effect invert)" + "(effect grayscale)" + "(effect sepia)" + "(effect brightness :amount 30)" + "(effect contrast :amount 1.5)" + "(effect saturation :amount 2.0)" + "(effect hue_shift :degrees 90)" + "(effect color_cycle :speed 2)" + "(effect threshold :level 128)" + "(effect posterize :levels 6)" + "(effect blur :radius 10)" + "(effect sharpen :amount 2)" + "(effect bloom :intensity 0.6 :radius 20)" + "(effect color-adjust :brightness 20 :contrast 1.2)" + "(effect swirl :strength 3)" + "(effect fisheye :strength 0.5)" + "(effect wave :amplitude 30 :wavelength 60)" + "(effect ripple :amplitude 20 :frequency 6)" + "(effect kaleidoscope :segments 6 :rotation_speed 30)" + "(effect zoom :factor 1.2)" + "(effect rotate :angle 15)" + "(effect mirror :direction \"horizontal\")" + "(effect pixelate :block_size 16)" + "(effect ascii_art :char_size 8 :color_mode \"color\")" + "(effect ascii_zones :char_size 10)" + "(effect edge_detect :low 50 :high 150)" + "(effect emboss :strength 1.5)" + "(effect outline :thickness 2)" + "(effect neon_glow :glow_radius 20 :glow_intensity 2)" + "(effect crt :line_spacing 3 :vignette_amount 0.3)" + "(effect scanlines :spacing 3 :intensity 0.4)" + "(effect film_grain :intensity 0.25)" + "(effect vignette :strength 0.6)" + "(effect noise :amount 40)" + "(effect rgb_split :offset_x 20)" + "(effect echo :num_echoes 4 :decay 0.5)" + "(effect trails :persistence 0.7)" + "(effect strobe :frequency 4)" + "(effect flip :direction \"horizontal\")" + "(effect tile_grid :rows 2 :cols 2)" + "(effect pixelsort :threshold_low 30 :threshold_high 220)" + "(effect datamosh :corruption 0.5 :block_size 24)" +) + +if [ "$EFFECT_NUM" -lt 0 ] || [ "$EFFECT_NUM" -ge ${#EFFECTS[@]} ]; then + echo "Effect number must be 0-$((${#EFFECTS[@]}-1))" + exit 1 +fi + +EFFECT="${EFFECTS[$EFFECT_NUM]}" +echo "Running effect $EFFECT_NUM: $EFFECT" + +# Create temp recipe with selected effect +cat > /tmp/recipe-temp.sexp << EOF +(recipe "effect-test" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") + (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") + (effect bloom :path "sexp_effects/effects/bloom.sexp") + (effect blur :path "sexp_effects/effects/blur.sexp") + (effect brightness :path "sexp_effects/effects/brightness.sexp") + (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") + (effect contrast :path "sexp_effects/effects/contrast.sexp") + (effect crt :path "sexp_effects/effects/crt.sexp") + (effect datamosh :path "sexp_effects/effects/datamosh.sexp") + (effect echo :path "sexp_effects/effects/echo.sexp") + (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") + (effect emboss :path "sexp_effects/effects/emboss.sexp") + (effect film_grain :path "sexp_effects/effects/film_grain.sexp") + (effect fisheye :path "sexp_effects/effects/fisheye.sexp") + (effect flip :path "sexp_effects/effects/flip.sexp") + (effect grayscale :path "sexp_effects/effects/grayscale.sexp") + (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") + (effect invert :path "sexp_effects/effects/invert.sexp") + (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") + (effect mirror :path "sexp_effects/effects/mirror.sexp") + (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") + (effect noise :path "sexp_effects/effects/noise.sexp") + (effect outline :path "sexp_effects/effects/outline.sexp") + (effect pixelate :path "sexp_effects/effects/pixelate.sexp") + (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") + (effect posterize :path "sexp_effects/effects/posterize.sexp") + (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") + (effect ripple :path "sexp_effects/effects/ripple.sexp") + (effect rotate :path "sexp_effects/effects/rotate.sexp") + (effect saturation :path "sexp_effects/effects/saturation.sexp") + (effect scanlines :path "sexp_effects/effects/scanlines.sexp") + (effect sepia :path "sexp_effects/effects/sepia.sexp") + (effect sharpen :path "sexp_effects/effects/sharpen.sexp") + (effect strobe :path "sexp_effects/effects/strobe.sexp") + (effect swirl :path "sexp_effects/effects/swirl.sexp") + (effect threshold :path "sexp_effects/effects/threshold.sexp") + (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") + (effect trails :path "sexp_effects/effects/trails.sexp") + (effect vignette :path "sexp_effects/effects/vignette.sexp") + (effect wave :path "sexp_effects/effects/wave.sexp") + (effect zoom :path "sexp_effects/effects/zoom.sexp") + + (def video (source :path "monday.webm")) + (def audio (source :path "dizzy.mp3")) + (def clip (-> video (segment :start 0 :duration 10))) + (def audio-clip (-> audio (segment :start 0 :duration 10))) + (def result (-> clip $EFFECT)) + (mux result audio-clip)) +EOF + +python3 plan.py /tmp/recipe-temp.sexp | python3 execute.py - -d . -o "effect-${EFFECT_NUM}.mp4" +echo "Output: effect-${EFFECT_NUM}.mp4" diff --git a/run-file.sh b/run-file.sh new file mode 100755 index 0000000..adacb4b --- /dev/null +++ b/run-file.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Run the full pipeline: analyze -> plan -> execute -> play +# Usage: ./run.sh recipe.sexp + +RECIPE="${1:-recipe-bound.sexp}" + +python analyze.py "$RECIPE" | python plan.py "$RECIPE" -a - | python execute.py - -d "$(dirname "$RECIPE")" -o output.mp4 diff --git a/run.py b/run.py new file mode 100755 index 0000000..23703c7 --- /dev/null +++ b/run.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +""" +Run a recipe: plan then execute. + +This is a convenience wrapper that: +1. Generates a plan (runs analyzers, expands SLICE_ON) +2. Executes the plan (produces video output) +""" + +import json +import sys +import tempfile +from pathlib import Path + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string +from artdag.sexp.planner import create_plan +from artdag.sexp.parser import Binding + +# Import execute functionality +from execute import execute_plan + + +class PlanEncoder(json.JSONEncoder): + """JSON encoder that handles Binding objects.""" + def default(self, obj): + if isinstance(obj, Binding): + return { + "_bind": obj.analysis_ref, + "range_min": obj.range_min, + "range_max": obj.range_max, + } + return super().default(obj) + + +def run_recipe(recipe_path: Path, output_path: Path = None): + """Run a recipe file: plan then execute.""" + + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + print(f"=== COMPILE ===") + print(f"Recipe: {recipe_path}") + compiled = compile_string(recipe_text) + print(f"Name: {compiled.name} v{compiled.version}") + print(f"Nodes: {len(compiled.nodes)}") + + # Track analysis results + analysis_data = {} + + def on_analysis(node_id, results): + analysis_data[node_id] = results + times = results.get("times", []) + print(f" Analysis: {len(times)} beat times @ {results.get('tempo', 0):.1f} BPM") + + # Generate plan + print(f"\n=== PLAN ===") + plan = create_plan( + compiled, + inputs={}, + recipe_dir=recipe_dir, + on_analysis=on_analysis, + ) + + print(f"Plan ID: {plan.plan_id[:16]}...") + print(f"Steps: {len(plan.steps)}") + + # Write plan to temp file for execute + plan_dict = { + "plan_id": plan.plan_id, + "recipe_id": compiled.name, + "recipe_hash": plan.recipe_hash, + "encoding": compiled.encoding, + "output_step_id": plan.output_step_id, + "steps": [], + } + + for step in plan.steps: + step_dict = { + "step_id": step.step_id, + "node_type": step.node_type, + "config": step.config, + "inputs": step.inputs, + "level": step.level, + "cache_id": step.cache_id, + } + if step.node_type == "ANALYZE" and step.step_id in analysis_data: + step_dict["config"]["analysis_results"] = analysis_data[step.step_id] + plan_dict["steps"].append(step_dict) + + # Save plan + work_dir = Path(tempfile.mkdtemp(prefix="artdag_run_")) + plan_file = work_dir / "plan.json" + with open(plan_file, "w") as f: + json.dump(plan_dict, f, indent=2, cls=PlanEncoder) + + print(f"Plan saved: {plan_file}") + + # Execute plan + print(f"\n=== EXECUTE ===") + result = execute_plan(plan_file, output_path, recipe_dir) + + print(f"\n=== DONE ===") + print(f"Output: {result}") + return result + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Usage: run.py [output.mp4]") + print() + print("Commands:") + print(" run.py - Plan and execute recipe") + print(" plan.py - Generate plan only") + print(" execute.py - Execute pre-generated plan") + sys.exit(1) + + recipe_path = Path(sys.argv[1]) + output_path = Path(sys.argv[2]) if len(sys.argv) > 2 else None + + if not recipe_path.exists(): + print(f"Recipe not found: {recipe_path}") + sys.exit(1) + + run_recipe(recipe_path, output_path) diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..b65e5f4 --- /dev/null +++ b/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Run the full pipeline: analyze -> plan -> execute -> play +# Usage: ./run.sh recipe.sexp + +RECIPE="${1:-recipe-bound.sexp}" + +python3 analyze.py "$RECIPE" | python plan.py "$RECIPE" -a - | python execute.py - -d "$(dirname "$RECIPE")" | xargs mpv --fs diff --git a/run_staged.py b/run_staged.py new file mode 100644 index 0000000..597aacb --- /dev/null +++ b/run_staged.py @@ -0,0 +1,528 @@ +#!/usr/bin/env python3 +""" +Run a staged recipe through analyze -> plan -> execute pipeline. + +This script demonstrates stage-level caching: analysis stages can be +skipped on re-run if the inputs haven't changed. + +Usage: + python3 run_staged.py recipe.sexp [-o output.mp4] + python3 run_staged.py effects/ascii_art_staged.sexp -o ascii_out.mp4 + +The script: +1. Compiles the recipe and extracts stage information +2. For each stage in topological order: + - Check stage cache (skip if hit) + - Run stage (analyze, plan, execute) + - Cache stage outputs +3. Produce final output +""" + +import os +import sys +import json +import tempfile +import shutil +import subprocess +from pathlib import Path +from typing import Dict, List, Optional, Any + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string, parse +from artdag.sexp.parser import Symbol, Keyword, serialize +from artdag.sexp.planner import create_plan + +# Import unified cache +import cache as unified_cache + +import hashlib + + +def _cache_analysis_tracks(plan): + """Cache each analysis track individually, replace data with cache-id refs.""" + import json as _json + for name, data in plan.analysis.items(): + json_str = _json.dumps(data, sort_keys=True) + content_cid = hashlib.sha256(json_str.encode()).hexdigest() + unified_cache.cache_store_json(content_cid, data) + plan.analysis[name] = {"_cache_id": content_cid} + + +def _resolve_analysis_refs(analysis_dict): + """Resolve cache-id refs back to full analysis data.""" + resolved = {} + for name, data in analysis_dict.items(): + if isinstance(data, dict) and "_cache_id" in data: + loaded = unified_cache.cache_get_json(data["_cache_id"]) + if loaded: + resolved[name] = loaded + else: + resolved[name] = data + return resolved + + +def run_staged_recipe( + recipe_path: Path, + output_path: Optional[Path] = None, + cache_dir: Optional[Path] = None, + params: Optional[Dict[str, Any]] = None, + verbose: bool = True, + force_replan: bool = False, +) -> Path: + """ + Run a staged recipe with stage-level caching. + + Args: + recipe_path: Path to the .sexp recipe file + output_path: Optional output file path + cache_dir: Optional cache directory for stage results + params: Optional parameter overrides + verbose: Print progress information + + Returns: + Path to the final output file + """ + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + # Use unified cache + content_cache_dir = unified_cache.get_content_dir() + + def log(msg: str): + if verbose: + print(msg, file=sys.stderr) + + # Store recipe source by CID + recipe_cid, _ = unified_cache.content_store_string(recipe_text) + log(f"Recipe CID: {recipe_cid[:16]}...") + + # Compile recipe + log(f"Compiling: {recipe_path}") + compiled = compile_string(recipe_text, params, recipe_dir=recipe_dir) + log(f"Recipe: {compiled.name} v{compiled.version}") + log(f"Nodes: {len(compiled.nodes)}") + + # Store effects by CID + for effect_name, effect_info in compiled.registry.get("effects", {}).items(): + effect_path = effect_info.get("path") + effect_cid = effect_info.get("cid") + if effect_path and effect_cid: + effect_file = Path(effect_path) + if effect_file.exists(): + stored_cid, _ = unified_cache.content_store_file(effect_file) + if stored_cid == effect_cid: + log(f"Effect '{effect_name}' CID: {effect_cid[:16]}...") + else: + log(f"Warning: Effect '{effect_name}' CID mismatch") + + # Store analyzers by CID + for analyzer_name, analyzer_info in compiled.registry.get("analyzers", {}).items(): + analyzer_path = analyzer_info.get("path") + analyzer_cid = analyzer_info.get("cid") + if analyzer_path: + analyzer_file = Path(analyzer_path) if Path(analyzer_path).is_absolute() else recipe_dir / analyzer_path + if analyzer_file.exists(): + stored_cid, _ = unified_cache.content_store_file(analyzer_file) + log(f"Analyzer '{analyzer_name}' CID: {stored_cid[:16]}...") + + # Store included files by CID + for include_path, include_cid in compiled.registry.get("includes", {}).items(): + include_file = Path(include_path) + if include_file.exists(): + stored_cid, _ = unified_cache.content_store_file(include_file) + if stored_cid == include_cid: + log(f"Include '{include_file.name}' CID: {include_cid[:16]}...") + else: + log(f"Warning: Include '{include_file.name}' CID mismatch") + + # Check for stages + if not compiled.stages: + log("No stages found - running as regular recipe") + return _run_non_staged(compiled, recipe_dir, output_path, verbose) + + log(f"\nStages: {len(compiled.stages)}") + log(f"Stage order: {compiled.stage_order}") + + # Display stage info + for stage in compiled.stages: + log(f"\n Stage: {stage.name}") + log(f" Requires: {stage.requires or '(none)'}") + log(f" Inputs: {stage.inputs or '(none)'}") + log(f" Outputs: {stage.outputs}") + + # Create plan with analysis + log("\n--- Planning ---") + analysis_data = {} + + def on_analysis(node_id: str, results: dict): + analysis_data[node_id] = results + times = results.get("times", []) + log(f" Analysis complete: {node_id[:16]}... ({len(times)} times)") + + # Check for cached plan using unified cache + plan_cid = unified_cache.plan_exists(recipe_cid, params) + + if plan_cid and not force_replan: + plan_cache_path = unified_cache.plan_get_path(recipe_cid, params) + log(f"\nFound cached plan: {plan_cid[:16]}...") + plan_sexp_str = unified_cache.plan_load(recipe_cid, params) + + # Parse the cached plan + from execute import parse_plan_input + plan_dict = parse_plan_input(plan_sexp_str) + + # Resolve cache-id refs in plan's embedded analysis + if "analysis" in plan_dict: + plan_dict["analysis"] = _resolve_analysis_refs(plan_dict["analysis"]) + + # Load analysis data from unified cache + analysis_data = {} + for step in plan_dict.get("steps", []): + if step.get("node_type") == "ANALYZE": + step_id = step.get("step_id") + cached_analysis = unified_cache.cache_get_json(step_id) + if cached_analysis: + analysis_data[step_id] = cached_analysis + log(f" Loaded analysis: {step_id[:16]}...") + + log(f"Plan ID: {plan_dict.get('plan_id', 'unknown')[:16]}...") + log(f"Steps: {len(plan_dict.get('steps', []))}") + log(f"Analysis tracks: {list(analysis_data.keys())}") + + # Execute directly from cached plan + log("\n--- Execution (from cached plan) ---") + from execute import execute_plan + + result_path = execute_plan( + plan_path=plan_cache_path, + output_path=output_path, + recipe_dir=recipe_dir, + external_analysis=analysis_data, + cache_dir=content_cache_dir, + ) + + log(f"\n--- Complete ---") + log(f"Output: {result_path}") + return result_path + + # No cached plan - create new one + plan = create_plan( + compiled, + inputs={}, + recipe_dir=recipe_dir, + on_analysis=on_analysis, + ) + + log(f"\nPlan ID: {plan.plan_id[:16]}...") + log(f"Steps: {len(plan.steps)}") + log(f"Analysis tracks: {list(analysis_data.keys())}") + + # Cache analysis tracks individually and replace with cache-id refs + _cache_analysis_tracks(plan) + + # Save plan to unified cache + plan_sexp_str = plan.to_string(pretty=True) + plan_cache_id, plan_cid, plan_cache_path = unified_cache.plan_store(recipe_cid, params, plan_sexp_str) + log(f"Saved plan: {plan_cache_id[:16]}... → {plan_cid[:16]}...") + + # Execute the plan using execute.py logic + log("\n--- Execution ---") + from execute import execute_plan + + # Resolve cache-id refs back to full data for execution + resolved_analysis = _resolve_analysis_refs(plan.analysis) + + plan_dict = { + "plan_id": plan.plan_id, + "source_hash": plan.source_hash, + "encoding": compiled.encoding, + "output_step_id": plan.output_step_id, + "analysis": {**resolved_analysis, **analysis_data}, + "effects_registry": plan.effects_registry, + "minimal_primitives": plan.minimal_primitives, + "steps": [], + } + + for step in plan.steps: + step_dict = { + "step_id": step.step_id, + "node_type": step.node_type, + "config": step.config, + "inputs": step.inputs, + "level": step.level, + "cache_id": step.cache_id, + } + # Tag with stage info if present + if step.stage: + step_dict["stage"] = step.stage + plan_dict["steps"].append(step_dict) + + # Execute using unified cache + result_path = execute_plan( + plan_path=None, + output_path=output_path, + recipe_dir=recipe_dir, + plan_data=plan_dict, + external_analysis=analysis_data, + cache_dir=content_cache_dir, + ) + + log(f"\n--- Complete ---") + log(f"Output: {result_path}") + + return result_path + + +def _run_non_staged(compiled, recipe_dir: Path, output_path: Optional[Path], verbose: bool) -> Path: + """Run a non-staged recipe using the standard pipeline.""" + from execute import execute_plan + from plan import plan_recipe + + # This is a fallback for recipes without stages + # Just run through regular plan -> execute + raise NotImplementedError("Non-staged recipes should use plan.py | execute.py") + + +def list_cache(verbose: bool = False): + """List all cached items using the unified cache.""" + unified_cache.print_cache_listing(verbose) + + +def list_params(recipe_path: Path): + """List available parameters for a recipe and its effects.""" + from artdag.sexp import parse + from artdag.sexp.parser import Symbol, Keyword + from artdag.sexp.compiler import _parse_params + from artdag.sexp.effect_loader import load_sexp_effect_file + + recipe_text = recipe_path.read_text() + sexp = parse(recipe_text) + + if isinstance(sexp, list) and len(sexp) == 1: + sexp = sexp[0] + + # Find recipe name + recipe_name = sexp[1] if len(sexp) > 1 and isinstance(sexp[1], str) else recipe_path.stem + + # Find :params block and effect declarations + recipe_params = [] + effect_declarations = {} # name -> path + + i = 2 + while i < len(sexp): + item = sexp[i] + if isinstance(item, Keyword) and item.name == "params": + if i + 1 < len(sexp): + recipe_params = _parse_params(sexp[i + 1]) + i += 2 + elif isinstance(item, list) and item: + # Check for effect declaration: (effect name :path "...") + if isinstance(item[0], Symbol) and item[0].name == "effect": + if len(item) >= 2: + effect_name = item[1].name if isinstance(item[1], Symbol) else item[1] + # Find :path + j = 2 + while j < len(item): + if isinstance(item[j], Keyword) and item[j].name == "path": + if j + 1 < len(item): + effect_declarations[effect_name] = item[j + 1] + break + j += 1 + i += 1 + else: + i += 1 + + # Load effect params + effect_params = {} # effect_name -> list of ParamDef + recipe_dir = recipe_path.parent + + for effect_name, effect_rel_path in effect_declarations.items(): + effect_path = recipe_dir / effect_rel_path + if effect_path.exists() and effect_path.suffix == ".sexp": + try: + _, _, _, param_defs = load_sexp_effect_file(effect_path) + if param_defs: + effect_params[effect_name] = param_defs + except Exception as e: + print(f"Warning: Could not load params from effect {effect_name}: {e}", file=sys.stderr) + + # Print results + def print_params(params, header_prefix=""): + print(f"{header_prefix}{'Name':<20} {'Type':<8} {'Default':<12} {'Range/Choices':<20} Description") + print(f"{header_prefix}{'-' * 88}") + for p in params: + range_str = "" + if p.range_min is not None and p.range_max is not None: + range_str = f"[{p.range_min}, {p.range_max}]" + elif p.choices: + range_str = ", ".join(p.choices[:3]) + if len(p.choices) > 3: + range_str += "..." + + default_str = str(p.default) if p.default is not None else "-" + if len(default_str) > 10: + default_str = default_str[:9] + "…" + + print(f"{header_prefix}{p.name:<20} {p.param_type:<8} {default_str:<12} {range_str:<20} {p.description}") + + if recipe_params: + print(f"\nRecipe parameters for '{recipe_name}':\n") + print_params(recipe_params) + else: + print(f"\nRecipe '{recipe_name}' has no declared parameters.") + + if effect_params: + for effect_name, params in effect_params.items(): + print(f"\n\nEffect '{effect_name}' parameters:\n") + print_params(params) + + if not recipe_params and not effect_params: + print("\nParameters can be declared using :params block:") + print(""" + :params ( + (color_mode :type string :default "color" :desc "Character color") + (char_size :type int :default 12 :range [4 32] :desc "Cell size") + ) +""") + return + + print("\n\nUsage:") + print(f" python3 run_staged.py {recipe_path} -p = [-p = ...]") + print(f"\nExample:") + all_params = recipe_params + [p for params in effect_params.values() for p in params] + if all_params: + p = all_params[0] + example_val = p.default if p.default else ("value" if p.param_type == "string" else "1") + print(f" python3 run_staged.py {recipe_path} -p {p.name}={example_val}") + + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Run a staged recipe with stage-level caching", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python3 run_staged.py effects/ascii_art_fx_staged.sexp --list-params + python3 run_staged.py effects/ascii_art_fx_staged.sexp -o output.mp4 + python3 run_staged.py recipe.sexp -p color_mode=lime -p char_jitter=5 + """ + ) + parser.add_argument("recipe", type=Path, nargs="?", help="Recipe file (.sexp)") + parser.add_argument("-o", "--output", type=Path, help="Output file path") + parser.add_argument("-p", "--param", action="append", dest="params", + metavar="KEY=VALUE", help="Set recipe parameter") + parser.add_argument("-q", "--quiet", action="store_true", help="Suppress progress output") + parser.add_argument("--list-params", action="store_true", help="List available parameters and exit") + parser.add_argument("--list-cache", action="store_true", help="List cached items and exit") + parser.add_argument("--no-cache", action="store_true", help="Ignore cached plan, force re-planning") + parser.add_argument("--show-plan", action="store_true", help="Show the plan S-expression and exit (don't execute)") + parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output") + parser.add_argument("-j", "--jobs", type=int, default=None, + help="Max parallel workers (default: 4, or ARTDAG_WORKERS env)") + parser.add_argument("--pipelines", type=int, default=None, + help="Max concurrent video pipelines (default: 1, or ARTDAG_VIDEO_PIPELINES env)") + + args = parser.parse_args() + + # Apply concurrency limits before any execution + if args.jobs is not None: + os.environ["ARTDAG_WORKERS"] = str(args.jobs) + if args.pipelines is not None: + os.environ["ARTDAG_VIDEO_PIPELINES"] = str(args.pipelines) + from execute import set_max_video_pipelines + set_max_video_pipelines(args.pipelines) + + # List cache mode - doesn't require recipe + if args.list_cache: + list_cache(verbose=args.verbose) + sys.exit(0) + + # All other modes require a recipe + if not args.recipe: + print("Error: recipe file required", file=sys.stderr) + sys.exit(1) + + if not args.recipe.exists(): + print(f"Recipe not found: {args.recipe}", file=sys.stderr) + sys.exit(1) + + # List params mode + if args.list_params: + list_params(args.recipe) + sys.exit(0) + + # Parse parameters + params = {} + if args.params: + for param_str in args.params: + if "=" not in param_str: + print(f"Invalid parameter format: {param_str}", file=sys.stderr) + sys.exit(1) + key, value = param_str.split("=", 1) + # Try to parse as number + try: + value = int(value) + except ValueError: + try: + value = float(value) + except ValueError: + pass # Keep as string + params[key] = value + + # Show plan mode - generate plan and display without executing + if args.show_plan: + recipe_text = args.recipe.read_text() + recipe_dir = args.recipe.parent + + # Compute recipe CID (content hash) + recipe_cid, _ = unified_cache.content_store_string(recipe_text) + + compiled = compile_string(recipe_text, params if params else None, recipe_dir=recipe_dir) + + # Check for cached plan using unified cache (keyed by source CID + params) + plan_cid = unified_cache.plan_exists(recipe_cid, params if params else None) + + if plan_cid and not args.no_cache: + print(f";; Cached plan CID: {plan_cid}", file=sys.stderr) + plan_sexp_str = unified_cache.plan_load(recipe_cid, params if params else None) + print(plan_sexp_str) + else: + print(f";; Generating new plan...", file=sys.stderr) + analysis_data = {} + def on_analysis(node_id: str, results: dict): + analysis_data[node_id] = results + + plan = create_plan( + compiled, + inputs={}, + recipe_dir=recipe_dir, + on_analysis=on_analysis, + ) + # Cache analysis tracks individually before serialization + _cache_analysis_tracks(plan) + plan_sexp_str = plan.to_string(pretty=True) + + # Save to unified cache + cache_id, plan_cid, plan_path = unified_cache.plan_store(recipe_cid, params if params else None, plan_sexp_str) + print(f";; Saved: {cache_id[:16]}... → {plan_cid}", file=sys.stderr) + print(plan_sexp_str) + sys.exit(0) + + result = run_staged_recipe( + recipe_path=args.recipe, + output_path=args.output, + params=params if params else None, + verbose=not args.quiet, + force_replan=args.no_cache, + ) + + # Print final output path + print(result) + + +if __name__ == "__main__": + main() diff --git a/sexp_effects/__init__.py b/sexp_effects/__init__.py new file mode 100644 index 0000000..b001c71 --- /dev/null +++ b/sexp_effects/__init__.py @@ -0,0 +1,32 @@ +""" +S-Expression Effects System + +Safe, shareable effects defined in S-expressions. +""" + +from .parser import parse, parse_file, Symbol, Keyword +from .interpreter import ( + Interpreter, + get_interpreter, + load_effect, + load_effects_dir, + run_effect, + list_effects, + make_process_frame, +) +from .primitives import PRIMITIVES + +__all__ = [ + 'parse', + 'parse_file', + 'Symbol', + 'Keyword', + 'Interpreter', + 'get_interpreter', + 'load_effect', + 'load_effects_dir', + 'run_effect', + 'list_effects', + 'make_process_frame', + 'PRIMITIVES', +] diff --git a/sexp_effects/effects/ascii_art.sexp b/sexp_effects/effects/ascii_art.sexp new file mode 100644 index 0000000..5565872 --- /dev/null +++ b/sexp_effects/effects/ascii_art.sexp @@ -0,0 +1,17 @@ +;; ASCII Art effect - converts image to ASCII characters +(require-primitives "ascii") + +(define-effect ascii_art + :params ( + (char_size :type int :default 8 :range [4 32]) + (alphabet :type string :default "standard") + (color_mode :type string :default "color" :desc ""color", "mono", "invert", or any color name/hex") + (background_color :type string :default "black" :desc "background color name/hex") + (invert_colors :type int :default 0 :desc "swap foreground and background colors") + (contrast :type float :default 1.5 :range [1 3]) + ) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + (chars (luminance-to-chars luminances alphabet contrast))) + (render-char-grid frame chars colors char_size color_mode background_color invert_colors))) diff --git a/sexp_effects/effects/ascii_art_fx.sexp b/sexp_effects/effects/ascii_art_fx.sexp new file mode 100644 index 0000000..2bb14be --- /dev/null +++ b/sexp_effects/effects/ascii_art_fx.sexp @@ -0,0 +1,52 @@ +;; ASCII Art FX - converts image to ASCII characters with per-character effects +(require-primitives "ascii") + +(define-effect ascii_art_fx + :params ( + ;; Basic parameters + (char_size :type int :default 8 :range [4 32] + :desc "Size of each character cell in pixels") + (alphabet :type string :default "standard" + :desc "Character set to use") + (color_mode :type string :default "color" + :choices [color mono invert] + :desc "Color mode: color, mono, invert, or any color name/hex") + (background_color :type string :default "black" + :desc "Background color name or hex value") + (invert_colors :type int :default 0 :range [0 1] + :desc "Swap foreground and background colors (0/1)") + (contrast :type float :default 1.5 :range [1 3] + :desc "Character selection contrast") + + ;; Per-character effects + (char_jitter :type float :default 0 :range [0 20] + :desc "Position jitter amount in pixels") + (char_scale :type float :default 1.0 :range [0.5 2.0] + :desc "Character scale factor") + (char_rotation :type float :default 0 :range [0 180] + :desc "Rotation amount in degrees") + (char_hue_shift :type float :default 0 :range [0 360] + :desc "Hue shift in degrees") + + ;; Modulation sources + (jitter_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives jitter modulation") + (scale_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives scale modulation") + (rotation_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives rotation modulation") + (hue_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives hue shift modulation") + ) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + (chars (luminance-to-chars luminances alphabet contrast))) + (render-char-grid-fx frame chars colors luminances char_size + color_mode background_color invert_colors + char_jitter char_scale char_rotation char_hue_shift + jitter_source scale_source rotation_source hue_source))) diff --git a/sexp_effects/effects/ascii_fx_zone.sexp b/sexp_effects/effects/ascii_fx_zone.sexp new file mode 100644 index 0000000..69e5340 --- /dev/null +++ b/sexp_effects/effects/ascii_fx_zone.sexp @@ -0,0 +1,102 @@ +;; Composable ASCII Art with Per-Zone Expression-Driven Effects +;; Requires ascii primitive library for the ascii-fx-zone primitive + +(require-primitives "ascii") + +;; Two modes of operation: +;; +;; 1. EXPRESSION MODE: Use zone-* variables in expression parameters +;; Zone variables available: +;; zone-row, zone-col: Grid position (integers) +;; zone-row-norm, zone-col-norm: Normalized position (0-1) +;; zone-lum: Cell luminance (0-1) +;; zone-sat: Cell saturation (0-1) +;; zone-hue: Cell hue (0-360) +;; zone-r, zone-g, zone-b: RGB components (0-1) +;; +;; Example: +;; (ascii-fx-zone frame +;; :cols 80 +;; :char_hue (* zone-lum 180) +;; :char_rotation (* zone-col-norm 30)) +;; +;; 2. CELL EFFECT MODE: Pass a lambda to apply arbitrary effects per-cell +;; The lambda receives (cell-image zone-dict) and returns modified cell. +;; Zone dict contains: row, col, row-norm, col-norm, lum, sat, hue, r, g, b, +;; char, color, cell_size, plus any bound analysis values. +;; +;; Any loaded sexp effect can be called on cells - each cell is just a small frame: +;; (blur cell radius) - Gaussian blur +;; (rotate cell angle) - Rotate by angle degrees +;; (brightness cell factor) - Adjust brightness +;; (contrast cell factor) - Adjust contrast +;; (saturation cell factor) - Adjust saturation +;; (hue_shift cell degrees) - Shift hue +;; (rgb_split cell offset_x offset_y) - RGB channel split +;; (invert cell) - Invert colors +;; (pixelate cell block_size) - Pixelate +;; (wave cell amplitude freq) - Wave distortion +;; ... and any other loaded effect +;; +;; Example: +;; (ascii-fx-zone frame +;; :cols 60 +;; :cell_effect (lambda [cell zone] +;; (blur (rotate cell (* (get zone "energy") 45)) +;; (if (> (get zone "lum") 0.5) 3 0)))) + +(define-effect ascii_fx_zone + :params ( + (cols :type int :default 80 :range [20 200] + :desc "Number of character columns") + (char_size :type int :default nil :range [4 32] + :desc "Character cell size in pixels (overrides cols if set)") + (alphabet :type string :default "standard" + :desc "Character set: standard, blocks, simple, digits, or custom string") + (color_mode :type string :default "color" + :desc "Color mode: color, mono, invert, or any color name/hex") + (background :type string :default "black" + :desc "Background color name or hex value") + (contrast :type float :default 1.5 :range [0.5 3.0] + :desc "Contrast for character selection") + (char_hue :type any :default nil + :desc "Hue shift expression (evaluated per-zone with zone-* vars)") + (char_saturation :type any :default nil + :desc "Saturation multiplier expression (1.0 = unchanged)") + (char_brightness :type any :default nil + :desc "Brightness multiplier expression (1.0 = unchanged)") + (char_scale :type any :default nil + :desc "Character scale expression (1.0 = normal size)") + (char_rotation :type any :default nil + :desc "Character rotation expression (degrees)") + (char_jitter :type any :default nil + :desc "Position jitter expression (pixels)") + (cell_effect :type any :default nil + :desc "Lambda (cell zone) -> cell for arbitrary per-cell effects") + ;; Convenience params for staged recipes (avoids compile-time expression issues) + (energy :type float :default nil + :desc "Energy multiplier (0-1) from audio analysis bind") + (rotation_scale :type float :default 0 + :desc "Max rotation at top-right when energy=1 (degrees)") + ) + ;; The ascii-fx-zone special form handles expression params + ;; If energy + rotation_scale provided, it builds: energy * scale * position_factor + ;; where position_factor = 0 at bottom-left, 3 at top-right + ;; If cell_effect provided, each character is rendered to a cell image, + ;; passed to the lambda, and the result composited back + (ascii-fx-zone frame + :cols cols + :char_size char_size + :alphabet alphabet + :color_mode color_mode + :background background + :contrast contrast + :char_hue char_hue + :char_saturation char_saturation + :char_brightness char_brightness + :char_scale char_scale + :char_rotation char_rotation + :char_jitter char_jitter + :cell_effect cell_effect + :energy energy + :rotation_scale rotation_scale)) diff --git a/sexp_effects/effects/ascii_zones.sexp b/sexp_effects/effects/ascii_zones.sexp new file mode 100644 index 0000000..6bc441c --- /dev/null +++ b/sexp_effects/effects/ascii_zones.sexp @@ -0,0 +1,30 @@ +;; ASCII Zones effect - different character sets for different brightness zones +;; Dark areas use simple chars, mid uses standard, bright uses blocks +(require-primitives "ascii") + +(define-effect ascii_zones + :params ( + (char_size :type int :default 8 :range [4 32]) + (dark_threshold :type int :default 80 :range [0 128]) + (bright_threshold :type int :default 180 :range [128 255]) + (color_mode :type string :default "color") + ) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + ;; Start with simple chars as base + (base-chars (luminance-to-chars luminances "simple" 1.2)) + ;; Map each cell to appropriate alphabet based on brightness zone + (zoned-chars (map-char-grid base-chars luminances + (lambda (r c ch lum) + (cond + ;; Bright zones: use block characters + ((> lum bright_threshold) + (alphabet-char "blocks" (floor (/ (- lum bright_threshold) 15)))) + ;; Dark zones: use simple sparse chars + ((< lum dark_threshold) + (alphabet-char " .-" (floor (/ lum 30)))) + ;; Mid zones: use standard ASCII + (else + (alphabet-char "standard" (floor (/ lum 4))))))))) + (render-char-grid frame zoned-chars colors char_size color_mode (list 0 0 0)))) diff --git a/sexp_effects/effects/blend.sexp b/sexp_effects/effects/blend.sexp new file mode 100644 index 0000000..bf7fefd --- /dev/null +++ b/sexp_effects/effects/blend.sexp @@ -0,0 +1,31 @@ +;; Blend effect - combines two video frames +;; Streaming-compatible: frame is background, overlay is second frame +;; Usage: (blend background overlay :opacity 0.5 :mode "alpha") +;; +;; Params: +;; mode - blend mode (add, multiply, screen, overlay, difference, lighten, darken, alpha) +;; opacity - blend amount (0-1) + +(require-primitives "image" "blending" "core") + +(define-effect blend + :params ( + (overlay :type frame :default nil) + (mode :type string :default "alpha") + (opacity :type float :default 0.5) + ) + (if (core:is-nil overlay) + frame + (let [a frame + b overlay + a-h (image:height a) + a-w (image:width a) + b-h (image:height b) + b-w (image:width b) + ;; Resize b to match a if needed + b-sized (if (and (= a-w b-w) (= a-h b-h)) + b + (image:resize b a-w a-h "linear"))] + (if (= mode "alpha") + (blending:blend-images a b-sized opacity) + (blending:blend-images a (blending:blend-mode a b-sized mode) opacity))))) diff --git a/sexp_effects/effects/blend_multi.sexp b/sexp_effects/effects/blend_multi.sexp new file mode 100644 index 0000000..1ee160f --- /dev/null +++ b/sexp_effects/effects/blend_multi.sexp @@ -0,0 +1,58 @@ +;; N-way weighted blend effect +;; Streaming-compatible: pass inputs as a list of frames +;; Usage: (blend_multi :inputs [(read a) (read b) (read c)] :weights [0.3 0.4 0.3]) +;; +;; Parameters: +;; inputs - list of N frames to blend +;; weights - list of N floats, one per input (resolved per-frame) +;; mode - blend mode applied when folding each frame in: +;; "alpha" — pure weighted average (default) +;; "multiply" — darken by multiplication +;; "screen" — lighten (inverse multiply) +;; "overlay" — contrast-boosting midtone blend +;; "soft-light" — gentle dodge/burn +;; "hard-light" — strong dodge/burn +;; "color-dodge" — brightens towards white +;; "color-burn" — darkens towards black +;; "difference" — absolute pixel difference +;; "exclusion" — softer difference +;; "add" — additive (clamped) +;; "subtract" — subtractive (clamped) +;; "darken" — per-pixel minimum +;; "lighten" — per-pixel maximum +;; resize_mode - how to match frame dimensions (fit, crop, stretch) +;; +;; Uses a left-fold over inputs[1..N-1]. At each step the running +;; opacity is: w[i] / (w[0] + w[1] + ... + w[i]) +;; which produces the correct normalised weighted result. + +(require-primitives "image" "blending") + +(define-effect blend_multi + :params ( + (inputs :type list :default []) + (weights :type list :default []) + (mode :type string :default "alpha") + (resize_mode :type string :default "fit") + ) + (let [n (len inputs) + ;; Target dimensions from first frame + target-w (image:width (nth inputs 0)) + target-h (image:height (nth inputs 0)) + ;; Fold over indices 1..n-1 + ;; Accumulator is (list blended-frame running-weight-sum) + seed (list (nth inputs 0) (nth weights 0)) + result (reduce (range 1 n) seed + (lambda (pair i) + (let [acc (nth pair 0) + running (nth pair 1) + w (nth weights i) + new-running (+ running w) + opacity (/ w (max new-running 0.001)) + f (image:resize (nth inputs i) target-w target-h "linear") + ;; Apply blend mode then mix with opacity + blended (if (= mode "alpha") + (blending:blend-images acc f opacity) + (blending:blend-images acc (blending:blend-mode acc f mode) opacity))] + (list blended new-running))))] + (nth result 0))) diff --git a/sexp_effects/effects/bloom.sexp b/sexp_effects/effects/bloom.sexp new file mode 100644 index 0000000..3524d01 --- /dev/null +++ b/sexp_effects/effects/bloom.sexp @@ -0,0 +1,16 @@ +;; Bloom effect - glow on bright areas +(require-primitives "image" "blending") + +(define-effect bloom + :params ( + (intensity :type float :default 0.5 :range [0 2]) + (threshold :type int :default 200 :range [0 255]) + (radius :type int :default 15 :range [1 50]) + ) + (let* ((bright (map-pixels frame + (lambda (x y c) + (if (> (luminance c) threshold) + c + (rgb 0 0 0))))) + (blurred (image:blur bright radius))) + (blending:blend-mode frame blurred "add"))) diff --git a/sexp_effects/effects/blur.sexp b/sexp_effects/effects/blur.sexp new file mode 100644 index 0000000..b71a55a --- /dev/null +++ b/sexp_effects/effects/blur.sexp @@ -0,0 +1,8 @@ +;; Blur effect - gaussian blur +(require-primitives "image") + +(define-effect blur + :params ( + (radius :type int :default 5 :range [1 50]) + ) + (image:blur frame (max 1 radius))) diff --git a/sexp_effects/effects/brightness.sexp b/sexp_effects/effects/brightness.sexp new file mode 100644 index 0000000..4af53a7 --- /dev/null +++ b/sexp_effects/effects/brightness.sexp @@ -0,0 +1,9 @@ +;; Brightness effect - adjusts overall brightness +;; Uses vectorized adjust primitive for fast processing +(require-primitives "color_ops") + +(define-effect brightness + :params ( + (amount :type int :default 0 :range [-255 255]) + ) + (color_ops:adjust-brightness frame amount)) diff --git a/sexp_effects/effects/color-adjust.sexp b/sexp_effects/effects/color-adjust.sexp new file mode 100644 index 0000000..5318bdd --- /dev/null +++ b/sexp_effects/effects/color-adjust.sexp @@ -0,0 +1,13 @@ +;; Color adjustment effect - replaces TRANSFORM node +(require-primitives "color_ops") + +(define-effect color-adjust + :params ( + (brightness :type int :default 0 :range [-255 255] :desc "Brightness adjustment") + (contrast :type float :default 1 :range [0 3] :desc "Contrast multiplier") + (saturation :type float :default 1 :range [0 2] :desc "Saturation multiplier") + ) + (-> frame + (color_ops:adjust-brightness brightness) + (color_ops:adjust-contrast contrast) + (color_ops:adjust-saturation saturation))) diff --git a/sexp_effects/effects/color_cycle.sexp b/sexp_effects/effects/color_cycle.sexp new file mode 100644 index 0000000..e08dbb6 --- /dev/null +++ b/sexp_effects/effects/color_cycle.sexp @@ -0,0 +1,13 @@ +;; Color Cycle effect - animated hue rotation +(require-primitives "color_ops") + +(define-effect color_cycle + :params ( + (speed :type int :default 1 :range [0 10]) + ) + (let ((shift (* t speed 360))) + (map-pixels frame + (lambda (x y c) + (let* ((hsv (rgb->hsv c)) + (new-h (mod (+ (first hsv) shift) 360))) + (hsv->rgb (list new-h (nth hsv 1) (nth hsv 2)))))))) diff --git a/sexp_effects/effects/contrast.sexp b/sexp_effects/effects/contrast.sexp new file mode 100644 index 0000000..660661d --- /dev/null +++ b/sexp_effects/effects/contrast.sexp @@ -0,0 +1,9 @@ +;; Contrast effect - adjusts image contrast +;; Uses vectorized adjust primitive for fast processing +(require-primitives "color_ops") + +(define-effect contrast + :params ( + (amount :type int :default 1 :range [0.5 3]) + ) + (color_ops:adjust-contrast frame amount)) diff --git a/sexp_effects/effects/crt.sexp b/sexp_effects/effects/crt.sexp new file mode 100644 index 0000000..097eaf9 --- /dev/null +++ b/sexp_effects/effects/crt.sexp @@ -0,0 +1,30 @@ +;; CRT effect - old monitor simulation +(require-primitives "image") + +(define-effect crt + :params ( + (line_spacing :type int :default 2 :range [1 10]) + (line_opacity :type float :default 0.3 :range [0 1]) + (vignette_amount :type float :default 0.2) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (/ w 2)) + (cy (/ h 2)) + (max-dist (sqrt (+ (* cx cx) (* cy cy))))) + (map-pixels frame + (lambda (x y c) + (let* (;; Scanline darkening + (scanline-factor (if (= 0 (mod y line_spacing)) + (- 1 line_opacity) + 1)) + ;; Vignette + (dx (- x cx)) + (dy (- y cy)) + (dist (sqrt (+ (* dx dx) (* dy dy)))) + (vignette-factor (- 1 (* (/ dist max-dist) vignette_amount))) + ;; Combined + (factor (* scanline-factor vignette-factor))) + (rgb (* (red c) factor) + (* (green c) factor) + (* (blue c) factor))))))) diff --git a/sexp_effects/effects/datamosh.sexp b/sexp_effects/effects/datamosh.sexp new file mode 100644 index 0000000..60cec66 --- /dev/null +++ b/sexp_effects/effects/datamosh.sexp @@ -0,0 +1,14 @@ +;; Datamosh effect - glitch block corruption + +(define-effect datamosh + :params ( + (block_size :type int :default 32 :range [8 128]) + (corruption :type float :default 0.3 :range [0 1]) + (max_offset :type int :default 50 :range [0 200]) + (color_corrupt :type bool :default true) + ) + ;; Get previous frame from state, or use current frame if none + (let ((prev (state-get "prev_frame" frame))) + (begin + (state-set "prev_frame" (copy frame)) + (datamosh frame prev block_size corruption max_offset color_corrupt)))) diff --git a/sexp_effects/effects/echo.sexp b/sexp_effects/effects/echo.sexp new file mode 100644 index 0000000..2aa2287 --- /dev/null +++ b/sexp_effects/effects/echo.sexp @@ -0,0 +1,19 @@ +;; Echo effect - motion trails using frame buffer +(require-primitives "blending") + +(define-effect echo + :params ( + (num_echoes :type int :default 4 :range [1 20]) + (decay :type float :default 0.5 :range [0 1]) + ) + (let* ((buffer (state-get 'buffer (list))) + (new-buffer (take (cons frame buffer) (+ num_echoes 1)))) + (begin + (state-set 'buffer new-buffer) + ;; Blend frames with decay + (if (< (length new-buffer) 2) + frame + (let ((result (copy frame))) + ;; Simple blend of first two frames for now + ;; Full version would fold over all frames + (blending:blend-images frame (nth new-buffer 1) (* decay 0.5))))))) diff --git a/sexp_effects/effects/edge_detect.sexp b/sexp_effects/effects/edge_detect.sexp new file mode 100644 index 0000000..170befb --- /dev/null +++ b/sexp_effects/effects/edge_detect.sexp @@ -0,0 +1,9 @@ +;; Edge detection effect - highlights edges +(require-primitives "image") + +(define-effect edge_detect + :params ( + (low :type int :default 50 :range [10 100]) + (high :type int :default 150 :range [50 300]) + ) + (image:edge-detect frame low high)) diff --git a/sexp_effects/effects/emboss.sexp b/sexp_effects/effects/emboss.sexp new file mode 100644 index 0000000..1eac3ce --- /dev/null +++ b/sexp_effects/effects/emboss.sexp @@ -0,0 +1,13 @@ +;; Emboss effect - creates raised/3D appearance +(require-primitives "blending") + +(define-effect emboss + :params ( + (strength :type int :default 1 :range [0.5 3]) + (blend :type float :default 0.3 :range [0 1]) + ) + (let* ((kernel (list (list (- strength) (- strength) 0) + (list (- strength) 1 strength) + (list 0 strength strength))) + (embossed (convolve frame kernel))) + (blending:blend-images embossed frame blend))) diff --git a/sexp_effects/effects/film_grain.sexp b/sexp_effects/effects/film_grain.sexp new file mode 100644 index 0000000..29bdd75 --- /dev/null +++ b/sexp_effects/effects/film_grain.sexp @@ -0,0 +1,19 @@ +;; Film Grain effect - adds film grain texture +(require-primitives "core") + +(define-effect film_grain + :params ( + (intensity :type float :default 0.2 :range [0 1]) + (colored :type bool :default false) + ) + (let ((grain-amount (* intensity 50))) + (map-pixels frame + (lambda (x y c) + (if colored + (rgb (clamp (+ (red c) (gaussian 0 grain-amount)) 0 255) + (clamp (+ (green c) (gaussian 0 grain-amount)) 0 255) + (clamp (+ (blue c) (gaussian 0 grain-amount)) 0 255)) + (let ((n (gaussian 0 grain-amount))) + (rgb (clamp (+ (red c) n) 0 255) + (clamp (+ (green c) n) 0 255) + (clamp (+ (blue c) n) 0 255)))))))) diff --git a/sexp_effects/effects/fisheye.sexp b/sexp_effects/effects/fisheye.sexp new file mode 100644 index 0000000..37750a7 --- /dev/null +++ b/sexp_effects/effects/fisheye.sexp @@ -0,0 +1,16 @@ +;; Fisheye effect - barrel/pincushion lens distortion +(require-primitives "geometry" "image") + +(define-effect fisheye + :params ( + (strength :type float :default 0.3 :range [-1 1]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (zoom_correct :type bool :default true) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (coords (geometry:fisheye-coords w h strength cx cy zoom_correct))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/flip.sexp b/sexp_effects/effects/flip.sexp new file mode 100644 index 0000000..977e1e1 --- /dev/null +++ b/sexp_effects/effects/flip.sexp @@ -0,0 +1,16 @@ +;; Flip effect - flips image horizontally or vertically +(require-primitives "geometry") + +(define-effect flip + :params ( + (horizontal :type bool :default true) + (vertical :type bool :default false) + ) + (let ((result frame)) + (if horizontal + (set! result (geometry:flip-img result "horizontal")) + nil) + (if vertical + (set! result (geometry:flip-img result "vertical")) + nil) + result)) diff --git a/sexp_effects/effects/grayscale.sexp b/sexp_effects/effects/grayscale.sexp new file mode 100644 index 0000000..848f8a7 --- /dev/null +++ b/sexp_effects/effects/grayscale.sexp @@ -0,0 +1,7 @@ +;; Grayscale effect - converts to grayscale +;; Uses vectorized mix-gray primitive for fast processing +(require-primitives "image") + +(define-effect grayscale + :params () + (image:grayscale frame)) diff --git a/sexp_effects/effects/hue_shift.sexp b/sexp_effects/effects/hue_shift.sexp new file mode 100644 index 0000000..ab61bd6 --- /dev/null +++ b/sexp_effects/effects/hue_shift.sexp @@ -0,0 +1,12 @@ +;; Hue shift effect - rotates hue values +;; Uses vectorized shift-hsv primitive for fast processing + +(require-primitives "color_ops") + +(define-effect hue_shift + :params ( + (degrees :type int :default 0 :range [0 360]) + (speed :type int :default 0 :desc "rotation per second") + ) + (let ((shift (+ degrees (* speed t)))) + (color_ops:shift-hsv frame shift 1 1))) diff --git a/sexp_effects/effects/invert.sexp b/sexp_effects/effects/invert.sexp new file mode 100644 index 0000000..34936da --- /dev/null +++ b/sexp_effects/effects/invert.sexp @@ -0,0 +1,9 @@ +;; Invert effect - inverts all colors +;; Uses vectorized invert-img primitive for fast processing +;; amount param: 0 = no invert, 1 = full invert (threshold at 0.5) + +(require-primitives "color_ops") + +(define-effect invert + :params ((amount :type float :default 1 :range [0 1])) + (if (> amount 0.5) (color_ops:invert-img frame) frame)) diff --git a/sexp_effects/effects/kaleidoscope.sexp b/sexp_effects/effects/kaleidoscope.sexp new file mode 100644 index 0000000..9487ae2 --- /dev/null +++ b/sexp_effects/effects/kaleidoscope.sexp @@ -0,0 +1,20 @@ +;; Kaleidoscope effect - mandala-like symmetry patterns +(require-primitives "geometry" "image") + +(define-effect kaleidoscope + :params ( + (segments :type int :default 6 :range [3 16]) + (rotation :type int :default 0 :range [0 360]) + (rotation_speed :type int :default 0 :range [-180 180]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (zoom :type int :default 1 :range [0.5 3]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + ;; Total rotation including time-based animation + (total_rot (+ rotation (* rotation_speed (or _time 0)))) + (coords (geometry:kaleidoscope-coords w h segments total_rot cx cy zoom))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/layer.sexp b/sexp_effects/effects/layer.sexp new file mode 100644 index 0000000..e57d627 --- /dev/null +++ b/sexp_effects/effects/layer.sexp @@ -0,0 +1,36 @@ +;; Layer effect - composite overlay over background at position +;; Streaming-compatible: frame is background, overlay is foreground +;; Usage: (layer background overlay :x 10 :y 20 :opacity 0.8) +;; +;; Params: +;; overlay - frame to composite on top +;; x, y - position to place overlay +;; opacity - blend amount (0-1) +;; mode - blend mode (alpha, multiply, screen, etc.) + +(require-primitives "image" "blending" "core") + +(define-effect layer + :params ( + (overlay :type frame :default nil) + (x :type int :default 0) + (y :type int :default 0) + (opacity :type float :default 1.0) + (mode :type string :default "alpha") + ) + (if (core:is-nil overlay) + frame + (let [bg (copy frame) + fg overlay + fg-w (image:width fg) + fg-h (image:height fg)] + (if (= opacity 1.0) + ;; Simple paste + (paste bg fg x y) + ;; Blend with opacity + (let [blended (if (= mode "alpha") + (blending:blend-images (image:crop bg x y fg-w fg-h) fg opacity) + (blending:blend-images (image:crop bg x y fg-w fg-h) + (blending:blend-mode (image:crop bg x y fg-w fg-h) fg mode) + opacity))] + (paste bg blended x y)))))) diff --git a/sexp_effects/effects/mirror.sexp b/sexp_effects/effects/mirror.sexp new file mode 100644 index 0000000..a450cb6 --- /dev/null +++ b/sexp_effects/effects/mirror.sexp @@ -0,0 +1,33 @@ +;; Mirror effect - mirrors half of image +(require-primitives "geometry" "image") + +(define-effect mirror + :params ( + (mode :type string :default "left_right") + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (hw (floor (/ w 2))) + (hh (floor (/ h 2)))) + (cond + ((= mode "left_right") + (let ((left (image:crop frame 0 0 hw h)) + (result (copy frame))) + (paste result (geometry:flip-img left "horizontal") hw 0))) + + ((= mode "right_left") + (let ((right (image:crop frame hw 0 hw h)) + (result (copy frame))) + (paste result (geometry:flip-img right "horizontal") 0 0))) + + ((= mode "top_bottom") + (let ((top (image:crop frame 0 0 w hh)) + (result (copy frame))) + (paste result (geometry:flip-img top "vertical") 0 hh))) + + ((= mode "bottom_top") + (let ((bottom (image:crop frame 0 hh w hh)) + (result (copy frame))) + (paste result (geometry:flip-img bottom "vertical") 0 0))) + + (else frame)))) diff --git a/sexp_effects/effects/neon_glow.sexp b/sexp_effects/effects/neon_glow.sexp new file mode 100644 index 0000000..39245ab --- /dev/null +++ b/sexp_effects/effects/neon_glow.sexp @@ -0,0 +1,23 @@ +;; Neon Glow effect - glowing edge effect +(require-primitives "image" "blending") + +(define-effect neon_glow + :params ( + (edge_low :type int :default 50 :range [10 200]) + (edge_high :type int :default 150 :range [50 300]) + (glow_radius :type int :default 15 :range [1 50]) + (glow_intensity :type int :default 2 :range [0.5 5]) + (background :type float :default 0.3 :range [0 1]) + ) + (let* ((edge-img (image:edge-detect frame edge_low edge_high)) + (glow (image:blur edge-img glow_radius)) + ;; Intensify the glow + (bright-glow (map-pixels glow + (lambda (x y c) + (rgb (clamp (* (red c) glow_intensity) 0 255) + (clamp (* (green c) glow_intensity) 0 255) + (clamp (* (blue c) glow_intensity) 0 255)))))) + (blending:blend-mode (blending:blend-images frame (make-image (image:width frame) (image:height frame) (list 0 0 0)) + (- 1 background)) + bright-glow + "screen"))) diff --git a/sexp_effects/effects/noise.sexp b/sexp_effects/effects/noise.sexp new file mode 100644 index 0000000..4da8298 --- /dev/null +++ b/sexp_effects/effects/noise.sexp @@ -0,0 +1,8 @@ +;; Noise effect - adds random noise +;; Uses vectorized add-noise primitive for fast processing + +(define-effect noise + :params ( + (amount :type int :default 20 :range [0 100]) + ) + (add-noise frame amount)) diff --git a/sexp_effects/effects/outline.sexp b/sexp_effects/effects/outline.sexp new file mode 100644 index 0000000..276f891 --- /dev/null +++ b/sexp_effects/effects/outline.sexp @@ -0,0 +1,24 @@ +;; Outline effect - shows only edges +(require-primitives "image") + +(define-effect outline + :params ( + (thickness :type int :default 2 :range [1 10]) + (threshold :type int :default 100 :range [20 300]) + (color :type list :default (list 0 0 0) + ) + (fill_mode "original")) + (let* ((edge-img (image:edge-detect frame (/ threshold 2) threshold)) + (dilated (if (> thickness 1) + (dilate edge-img thickness) + edge-img)) + (base (cond + ((= fill_mode "original") (copy frame)) + ((= fill_mode "white") (make-image (image:width frame) (image:height frame) (list 255 255 255))) + (else (make-image (image:width frame) (image:height frame) (list 0 0 0)))))) + (map-pixels base + (lambda (x y c) + (let ((edge-val (luminance (pixel dilated x y)))) + (if (> edge-val 128) + color + c)))))) diff --git a/sexp_effects/effects/pixelate.sexp b/sexp_effects/effects/pixelate.sexp new file mode 100644 index 0000000..3d28ce1 --- /dev/null +++ b/sexp_effects/effects/pixelate.sexp @@ -0,0 +1,13 @@ +;; Pixelate effect - creates blocky pixels +(require-primitives "image") + +(define-effect pixelate + :params ( + (block_size :type int :default 8 :range [2 64]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (small-w (max 1 (floor (/ w block_size)))) + (small-h (max 1 (floor (/ h block_size)))) + (small (image:resize frame small-w small-h "area"))) + (image:resize small w h "nearest"))) diff --git a/sexp_effects/effects/pixelsort.sexp b/sexp_effects/effects/pixelsort.sexp new file mode 100644 index 0000000..155ac13 --- /dev/null +++ b/sexp_effects/effects/pixelsort.sexp @@ -0,0 +1,11 @@ +;; Pixelsort effect - glitch art pixel sorting + +(define-effect pixelsort + :params ( + (sort_by :type string :default "lightness") + (threshold_low :type int :default 50 :range [0 255]) + (threshold_high :type int :default 200 :range [0 255]) + (angle :type int :default 0 :range [0 180]) + (reverse :type bool :default false) + ) + (pixelsort frame sort_by threshold_low threshold_high angle reverse)) diff --git a/sexp_effects/effects/posterize.sexp b/sexp_effects/effects/posterize.sexp new file mode 100644 index 0000000..7052ed3 --- /dev/null +++ b/sexp_effects/effects/posterize.sexp @@ -0,0 +1,8 @@ +;; Posterize effect - reduces color levels +(require-primitives "color_ops") + +(define-effect posterize + :params ( + (levels :type int :default 8 :range [2 32]) + ) + (color_ops:posterize frame levels)) diff --git a/sexp_effects/effects/resize-frame.sexp b/sexp_effects/effects/resize-frame.sexp new file mode 100644 index 0000000..a1cce27 --- /dev/null +++ b/sexp_effects/effects/resize-frame.sexp @@ -0,0 +1,11 @@ +;; Resize effect - replaces RESIZE node +;; Note: uses target-w/target-h to avoid conflict with width/height primitives +(require-primitives "image") + +(define-effect resize-frame + :params ( + (target-w :type int :default 640 :desc "Target width in pixels") + (target-h :type int :default 480 :desc "Target height in pixels") + (mode :type string :default "linear" :choices [linear nearest area] :desc "Interpolation mode") + ) + (image:resize frame target-w target-h mode)) diff --git a/sexp_effects/effects/rgb_split.sexp b/sexp_effects/effects/rgb_split.sexp new file mode 100644 index 0000000..4582701 --- /dev/null +++ b/sexp_effects/effects/rgb_split.sexp @@ -0,0 +1,13 @@ +;; RGB Split effect - chromatic aberration + +(define-effect rgb_split + :params ( + (offset_x :type int :default 10 :range [-50 50]) + (offset_y :type int :default 0 :range [-50 50]) + ) + (let* ((r (channel frame 0)) + (g (channel frame 1)) + (b (channel frame 2)) + (r-shifted (translate (merge-channels r r r) offset_x offset_y)) + (b-shifted (translate (merge-channels b b b) (- offset_x) (- offset_y)))) + (merge-channels (channel r-shifted 0) g (channel b-shifted 0)))) diff --git a/sexp_effects/effects/ripple.sexp b/sexp_effects/effects/ripple.sexp new file mode 100644 index 0000000..0bb7a8d --- /dev/null +++ b/sexp_effects/effects/ripple.sexp @@ -0,0 +1,19 @@ +;; Ripple effect - radial wave distortion from center +(require-primitives "geometry" "image" "math") + +(define-effect ripple + :params ( + (frequency :type int :default 5 :range [1 20]) + (amplitude :type int :default 10 :range [0 50]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (decay :type int :default 1 :range [0 5]) + (speed :type int :default 1 :range [0 10]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (phase (* (or t 0) speed 2 pi)) + (coords (geometry:ripple-displace w h frequency amplitude cx cy decay phase))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/rotate.sexp b/sexp_effects/effects/rotate.sexp new file mode 100644 index 0000000..d06c2f7 --- /dev/null +++ b/sexp_effects/effects/rotate.sexp @@ -0,0 +1,11 @@ +;; Rotate effect - rotates image + +(require-primitives "geometry") + +(define-effect rotate + :params ( + (angle :type int :default 0 :range [-360 360]) + (speed :type int :default 0 :desc "rotation per second") + ) + (let ((total-angle (+ angle (* speed t)))) + (geometry:rotate-img frame total-angle))) diff --git a/sexp_effects/effects/saturation.sexp b/sexp_effects/effects/saturation.sexp new file mode 100644 index 0000000..9852dc7 --- /dev/null +++ b/sexp_effects/effects/saturation.sexp @@ -0,0 +1,9 @@ +;; Saturation effect - adjusts color saturation +;; Uses vectorized shift-hsv primitive for fast processing +(require-primitives "color_ops") + +(define-effect saturation + :params ( + (amount :type int :default 1 :range [0 3]) + ) + (color_ops:adjust-saturation frame amount)) diff --git a/sexp_effects/effects/scanlines.sexp b/sexp_effects/effects/scanlines.sexp new file mode 100644 index 0000000..ddfcf44 --- /dev/null +++ b/sexp_effects/effects/scanlines.sexp @@ -0,0 +1,15 @@ +;; Scanlines effect - VHS-style horizontal line shifting +(require-primitives "core") + +(define-effect scanlines + :params ( + (amplitude :type int :default 10 :range [0 100]) + (frequency :type int :default 10 :range [1 100]) + (randomness :type float :default 0.5 :range [0 1]) + ) + (map-rows frame + (lambda (y row) + (let* ((sine-shift (* amplitude (sin (/ (* y 6.28) (max 1 frequency))))) + (rand-shift (core:rand-range (- amplitude) amplitude)) + (shift (floor (lerp sine-shift rand-shift randomness)))) + (roll row shift 0))))) diff --git a/sexp_effects/effects/sepia.sexp b/sexp_effects/effects/sepia.sexp new file mode 100644 index 0000000..e3a5875 --- /dev/null +++ b/sexp_effects/effects/sepia.sexp @@ -0,0 +1,7 @@ +;; Sepia effect - applies sepia tone +;; Classic warm vintage look +(require-primitives "color_ops") + +(define-effect sepia + :params () + (color_ops:sepia frame)) diff --git a/sexp_effects/effects/sharpen.sexp b/sexp_effects/effects/sharpen.sexp new file mode 100644 index 0000000..538bd7f --- /dev/null +++ b/sexp_effects/effects/sharpen.sexp @@ -0,0 +1,8 @@ +;; Sharpen effect - sharpens edges +(require-primitives "image") + +(define-effect sharpen + :params ( + (amount :type int :default 1 :range [0 5]) + ) + (image:sharpen frame amount)) diff --git a/sexp_effects/effects/strobe.sexp b/sexp_effects/effects/strobe.sexp new file mode 100644 index 0000000..e51ba30 --- /dev/null +++ b/sexp_effects/effects/strobe.sexp @@ -0,0 +1,16 @@ +;; Strobe effect - holds frames for choppy look +(require-primitives "core") + +(define-effect strobe + :params ( + (frame_rate :type int :default 12 :range [1 60]) + ) + (let* ((held (state-get 'held nil)) + (held-until (state-get 'held-until 0)) + (frame-duration (/ 1 frame_rate))) + (if (or (core:is-nil held) (>= t held-until)) + (begin + (state-set 'held (copy frame)) + (state-set 'held-until (+ t frame-duration)) + frame) + held))) diff --git a/sexp_effects/effects/swirl.sexp b/sexp_effects/effects/swirl.sexp new file mode 100644 index 0000000..ba9cf57 --- /dev/null +++ b/sexp_effects/effects/swirl.sexp @@ -0,0 +1,17 @@ +;; Swirl effect - spiral vortex distortion +(require-primitives "geometry" "image") + +(define-effect swirl + :params ( + (strength :type int :default 1 :range [-10 10]) + (radius :type float :default 0.5 :range [0.1 2]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (falloff :type string :default "quadratic") + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (coords (geometry:swirl-coords w h strength radius cx cy falloff))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/threshold.sexp b/sexp_effects/effects/threshold.sexp new file mode 100644 index 0000000..50d3bc5 --- /dev/null +++ b/sexp_effects/effects/threshold.sexp @@ -0,0 +1,9 @@ +;; Threshold effect - converts to black and white +(require-primitives "color_ops") + +(define-effect threshold + :params ( + (level :type int :default 128 :range [0 255]) + (invert :type bool :default false) + ) + (color_ops:threshold frame level invert)) diff --git a/sexp_effects/effects/tile_grid.sexp b/sexp_effects/effects/tile_grid.sexp new file mode 100644 index 0000000..44487a9 --- /dev/null +++ b/sexp_effects/effects/tile_grid.sexp @@ -0,0 +1,29 @@ +;; Tile Grid effect - tiles image in grid +(require-primitives "geometry" "image") + +(define-effect tile_grid + :params ( + (rows :type int :default 2 :range [1 10]) + (cols :type int :default 2 :range [1 10]) + (gap :type int :default 0 :range [0 50]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (tile-w (floor (/ (- w (* gap (- cols 1))) cols))) + (tile-h (floor (/ (- h (* gap (- rows 1))) rows))) + (tile (image:resize frame tile-w tile-h "area")) + (result (make-image w h (list 0 0 0)))) + (begin + ;; Manually place tiles using nested iteration + ;; This is a simplified version - full version would loop + (paste result tile 0 0) + (if (> cols 1) + (paste result tile (+ tile-w gap) 0) + nil) + (if (> rows 1) + (paste result tile 0 (+ tile-h gap)) + nil) + (if (and (> cols 1) (> rows 1)) + (paste result tile (+ tile-w gap) (+ tile-h gap)) + nil) + result))) diff --git a/sexp_effects/effects/trails.sexp b/sexp_effects/effects/trails.sexp new file mode 100644 index 0000000..f16c302 --- /dev/null +++ b/sexp_effects/effects/trails.sexp @@ -0,0 +1,20 @@ +;; Trails effect - persistent motion trails +(require-primitives "image" "blending") + +(define-effect trails + :params ( + (persistence :type float :default 0.8 :range [0 0.99]) + ) + (let* ((buffer (state-get 'buffer nil)) + (current frame)) + (if (= buffer nil) + (begin + (state-set 'buffer (copy frame)) + frame) + (let* ((faded (blending:blend-images buffer + (make-image (image:width frame) (image:height frame) (list 0 0 0)) + (- 1 persistence))) + (result (blending:blend-mode faded current "lighten"))) + (begin + (state-set 'buffer result) + result))))) diff --git a/sexp_effects/effects/vignette.sexp b/sexp_effects/effects/vignette.sexp new file mode 100644 index 0000000..46e63ee --- /dev/null +++ b/sexp_effects/effects/vignette.sexp @@ -0,0 +1,23 @@ +;; Vignette effect - darkens corners +(require-primitives "image") + +(define-effect vignette + :params ( + (strength :type float :default 0.5 :range [0 1]) + (radius :type int :default 1 :range [0.5 2]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (/ w 2)) + (cy (/ h 2)) + (max-dist (* (sqrt (+ (* cx cx) (* cy cy))) radius))) + (map-pixels frame + (lambda (x y c) + (let* ((dx (- x cx)) + (dy (- y cy)) + (dist (sqrt (+ (* dx dx) (* dy dy)))) + (factor (- 1 (* (/ dist max-dist) strength))) + (factor (clamp factor 0 1))) + (rgb (* (red c) factor) + (* (green c) factor) + (* (blue c) factor))))))) diff --git a/sexp_effects/effects/wave.sexp b/sexp_effects/effects/wave.sexp new file mode 100644 index 0000000..98b03c2 --- /dev/null +++ b/sexp_effects/effects/wave.sexp @@ -0,0 +1,22 @@ +;; Wave effect - sine wave displacement distortion +(require-primitives "geometry" "image") + +(define-effect wave + :params ( + (amplitude :type int :default 10 :range [0 100]) + (wavelength :type int :default 50 :range [10 500]) + (speed :type int :default 1 :range [0 10]) + (direction :type string :default "horizontal") + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + ;; Use _time for animation phase + (phase (* (or _time 0) speed 2 pi)) + ;; Calculate frequency: waves per dimension + (freq (/ (if (= direction "vertical") w h) wavelength)) + (axis (cond + ((= direction "horizontal") "x") + ((= direction "vertical") "y") + (else "both"))) + (coords (geometry:wave-coords w h axis freq amplitude phase))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/zoom.sexp b/sexp_effects/effects/zoom.sexp new file mode 100644 index 0000000..6e4b9ff --- /dev/null +++ b/sexp_effects/effects/zoom.sexp @@ -0,0 +1,8 @@ +;; Zoom effect - zooms in/out from center +(require-primitives "geometry") + +(define-effect zoom + :params ( + (amount :type int :default 1 :range [0.1 5]) + ) + (geometry:scale-img frame amount amount)) diff --git a/sexp_effects/interpreter.py b/sexp_effects/interpreter.py new file mode 100644 index 0000000..830904a --- /dev/null +++ b/sexp_effects/interpreter.py @@ -0,0 +1,1016 @@ +""" +S-Expression Effect Interpreter + +Interprets effect definitions written in S-expressions. +Only allows safe primitives - no arbitrary code execution. +""" + +import numpy as np +from typing import Any, Dict, List, Optional, Callable +from pathlib import Path + +from .parser import Symbol, Keyword, parse, parse_file +from .primitives import PRIMITIVES, reset_rng + + +def _is_symbol(x) -> bool: + """Check if x is a Symbol (duck typing to support multiple Symbol classes).""" + return hasattr(x, 'name') and type(x).__name__ == 'Symbol' + + +def _is_keyword(x) -> bool: + """Check if x is a Keyword (duck typing to support multiple Keyword classes).""" + return hasattr(x, 'name') and type(x).__name__ == 'Keyword' + + +def _symbol_name(x) -> str: + """Get the name from a Symbol.""" + return x.name if hasattr(x, 'name') else str(x) + + +class Environment: + """Lexical environment for variable bindings.""" + + def __init__(self, parent: 'Environment' = None): + self.bindings: Dict[str, Any] = {} + self.parent = parent + + def get(self, name: str) -> Any: + if name in self.bindings: + return self.bindings[name] + if self.parent: + return self.parent.get(name) + raise NameError(f"Undefined variable: {name}") + + def set(self, name: str, value: Any): + self.bindings[name] = value + + def has(self, name: str) -> bool: + if name in self.bindings: + return True + if self.parent: + return self.parent.has(name) + return False + + +class Lambda: + """A user-defined function (lambda).""" + + def __init__(self, params: List[str], body: Any, env: Environment): + self.params = params + self.body = body + self.env = env # Closure environment + + def __repr__(self): + return f"" + + +class EffectDefinition: + """A parsed effect definition.""" + + def __init__(self, name: str, params: Dict[str, Any], body: Any): + self.name = name + self.params = params # {name: (type, default)} + self.body = body + + def __repr__(self): + return f"" + + +class Interpreter: + """ + S-Expression interpreter for effects. + + Provides a safe execution environment where only + whitelisted primitives can be called. + + Args: + minimal_primitives: If True, only load core primitives (arithmetic, comparison, + basic data access). Additional primitives must be loaded with + (require-primitives) or (with-primitives). + If False (default), load all legacy primitives for backward compatibility. + """ + + def __init__(self, minimal_primitives: bool = False): + # Base environment with primitives + self.global_env = Environment() + self.minimal_primitives = minimal_primitives + + if minimal_primitives: + # Load only core primitives + from .primitive_libs.core import PRIMITIVES as CORE_PRIMITIVES + for name, fn in CORE_PRIMITIVES.items(): + self.global_env.set(name, fn) + else: + # Load all legacy primitives for backward compatibility + for name, fn in PRIMITIVES.items(): + self.global_env.set(name, fn) + + # Special values + self.global_env.set('true', True) + self.global_env.set('false', False) + self.global_env.set('nil', None) + + # Loaded effect definitions + self.effects: Dict[str, EffectDefinition] = {} + + def eval(self, expr: Any, env: Environment = None) -> Any: + """Evaluate an S-expression.""" + if env is None: + env = self.global_env + + # Atoms + if isinstance(expr, (int, float, str, bool)): + return expr + + if expr is None: + return None + + # Handle Symbol (duck typing to support both sexp_effects.parser.Symbol and artdag.sexp.parser.Symbol) + if _is_symbol(expr): + return env.get(expr.name) + + # Handle Keyword (duck typing) + if _is_keyword(expr): + return expr # Keywords evaluate to themselves + + if isinstance(expr, np.ndarray): + return expr # Images pass through + + # Lists (function calls / special forms) + if isinstance(expr, list): + if not expr: + return [] + + head = expr[0] + + # Special forms + if _is_symbol(head): + form = head.name + + # Quote + if form == 'quote': + return expr[1] + + # Define + if form == 'define': + name = expr[1] + if _is_symbol(name): + value = self.eval(expr[2], env) + self.global_env.set(name.name, value) + return value + else: + raise SyntaxError(f"define requires symbol, got {name}") + + # Define-effect + if form == 'define-effect': + return self._define_effect(expr, env) + + # Lambda + if form == 'lambda' or form == 'λ': + params = [p.name if _is_symbol(p) else p for p in expr[1]] + body = expr[2] + return Lambda(params, body, env) + + # Let + if form == 'let': + return self._eval_let(expr, env) + + # Let* + if form == 'let*': + return self._eval_let_star(expr, env) + + # If + if form == 'if': + cond = self.eval(expr[1], env) + if cond: + return self.eval(expr[2], env) + elif len(expr) > 3: + return self.eval(expr[3], env) + return None + + # Cond + if form == 'cond': + return self._eval_cond(expr, env) + + # And + if form == 'and': + result = True + for e in expr[1:]: + result = self.eval(e, env) + if not result: + return False + return result + + # Or + if form == 'or': + for e in expr[1:]: + result = self.eval(e, env) + if result: + return result + return False + + # Not + if form == 'not': + return not self.eval(expr[1], env) + + # Begin (sequence) + if form == 'begin': + result = None + for e in expr[1:]: + result = self.eval(e, env) + return result + + # Thread-first macro: (-> x (f a) (g b)) => (g (f x a) b) + if form == '->': + result = self.eval(expr[1], env) + for form_expr in expr[2:]: + if isinstance(form_expr, list): + # Insert result as first arg: (f a b) => (f result a b) + result = self.eval([form_expr[0], result] + form_expr[1:], env) + else: + # Just a symbol: f => (f result) + result = self.eval([form_expr, result], env) + return result + + # Set! (mutation) + if form == 'set!': + name = expr[1].name if _is_symbol(expr[1]) else expr[1] + value = self.eval(expr[2], env) + # Find and update in appropriate scope + scope = env + while scope: + if name in scope.bindings: + scope.bindings[name] = value + return value + scope = scope.parent + raise NameError(f"Cannot set undefined variable: {name}") + + # State-get / state-set (for effect state) + if form == 'state-get': + state = env.get('__state__') + key = self.eval(expr[1], env) + if _is_symbol(key): + key = key.name + default = self.eval(expr[2], env) if len(expr) > 2 else None + return state.get(key, default) + + if form == 'state-set': + state = env.get('__state__') + key = self.eval(expr[1], env) + if _is_symbol(key): + key = key.name + value = self.eval(expr[2], env) + state[key] = value + return value + + # ascii-fx-zone special form - delays evaluation of expression parameters + if form == 'ascii-fx-zone': + return self._eval_ascii_fx_zone(expr, env) + + # with-primitives - load primitive library and scope to body + if form == 'with-primitives': + return self._eval_with_primitives(expr, env) + + # require-primitives - load primitive library into current scope + if form == 'require-primitives': + return self._eval_require_primitives(expr, env) + + # Function call + fn = self.eval(head, env) + args = [self.eval(arg, env) for arg in expr[1:]] + + # Handle keyword arguments + pos_args = [] + kw_args = {} + i = 0 + while i < len(args): + if _is_keyword(args[i]): + kw_args[args[i].name] = args[i + 1] if i + 1 < len(args) else None + i += 2 + else: + pos_args.append(args[i]) + i += 1 + + return self._apply(fn, pos_args, kw_args, env) + + raise TypeError(f"Cannot evaluate: {expr}") + + def _wrap_lambda(self, lam: 'Lambda') -> Callable: + """Wrap a Lambda in a Python callable for use by primitives.""" + def wrapper(*args): + new_env = Environment(lam.env) + for i, param in enumerate(lam.params): + if i < len(args): + new_env.set(param, args[i]) + else: + new_env.set(param, None) + return self.eval(lam.body, new_env) + return wrapper + + def _apply(self, fn: Any, args: List[Any], kwargs: Dict[str, Any], env: Environment) -> Any: + """Apply a function to arguments.""" + if isinstance(fn, Lambda): + # User-defined function + new_env = Environment(fn.env) + for i, param in enumerate(fn.params): + if i < len(args): + new_env.set(param, args[i]) + else: + new_env.set(param, None) + return self.eval(fn.body, new_env) + + elif callable(fn): + # Wrap any Lambda arguments so primitives can call them + wrapped_args = [] + for arg in args: + if isinstance(arg, Lambda): + wrapped_args.append(self._wrap_lambda(arg)) + else: + wrapped_args.append(arg) + + # Inject _interp and _env for primitives that need them + import inspect + try: + sig = inspect.signature(fn) + params = sig.parameters + if '_interp' in params and '_interp' not in kwargs: + kwargs['_interp'] = self + if '_env' in params and '_env' not in kwargs: + kwargs['_env'] = env + except (ValueError, TypeError): + # Some built-in functions don't have inspectable signatures + pass + + # Primitive function + if kwargs: + return fn(*wrapped_args, **kwargs) + return fn(*wrapped_args) + + else: + raise TypeError(f"Cannot call: {fn}") + + def _parse_bindings(self, bindings: list) -> list: + """Parse bindings in either Scheme or Clojure style. + + Scheme: ((x 1) (y 2)) -> [(x, 1), (y, 2)] + Clojure: [x 1 y 2] -> [(x, 1), (y, 2)] + """ + if not bindings: + return [] + + # Check if Clojure style (flat list with symbols and values alternating) + if _is_symbol(bindings[0]): + # Clojure style: [x 1 y 2] + pairs = [] + i = 0 + while i < len(bindings) - 1: + name = bindings[i].name if _is_symbol(bindings[i]) else bindings[i] + value = bindings[i + 1] + pairs.append((name, value)) + i += 2 + return pairs + else: + # Scheme style: ((x 1) (y 2)) + pairs = [] + for binding in bindings: + name = binding[0].name if _is_symbol(binding[0]) else binding[0] + value = binding[1] + pairs.append((name, value)) + return pairs + + def _eval_let(self, expr: Any, env: Environment) -> Any: + """Evaluate let expression: (let ((x 1) (y 2)) body) or (let [x 1 y 2] body) + + Note: Uses sequential binding (like Clojure let / Scheme let*) so each + binding can reference previous bindings. + """ + bindings = expr[1] + body = expr[2] + + new_env = Environment(env) + for name, value_expr in self._parse_bindings(bindings): + value = self.eval(value_expr, new_env) # Sequential: can see previous bindings + new_env.set(name, value) + + return self.eval(body, new_env) + + def _eval_let_star(self, expr: Any, env: Environment) -> Any: + """Evaluate let* expression: sequential bindings.""" + bindings = expr[1] + body = expr[2] + + new_env = Environment(env) + for name, value_expr in self._parse_bindings(bindings): + value = self.eval(value_expr, new_env) # Evaluate in current env + new_env.set(name, value) + + return self.eval(body, new_env) + + def _eval_cond(self, expr: Any, env: Environment) -> Any: + """Evaluate cond expression.""" + for clause in expr[1:]: + test = clause[0] + if _is_symbol(test) and test.name == 'else': + return self.eval(clause[1], env) + if self.eval(test, env): + return self.eval(clause[1], env) + return None + + def _eval_with_primitives(self, expr: Any, env: Environment) -> Any: + """ + Evaluate with-primitives: scoped primitive library loading. + + Syntax: + (with-primitives "math" + (sin (* x pi))) + + (with-primitives "math" :path "custom/math.py" + body) + + The primitives from the library are only available within the body. + """ + # Parse library name and optional path + lib_name = expr[1] + if _is_symbol(lib_name): + lib_name = lib_name.name + + path = None + body_start = 2 + + # Check for :path keyword + if len(expr) > 2 and _is_keyword(expr[2]) and expr[2].name == 'path': + path = expr[3] + body_start = 4 + + # Load the primitive library + primitives = self.load_primitive_library(lib_name, path) + + # Create new environment with primitives + new_env = Environment(env) + for name, fn in primitives.items(): + new_env.set(name, fn) + + # Evaluate body in new environment + result = None + for e in expr[body_start:]: + result = self.eval(e, new_env) + return result + + def _eval_require_primitives(self, expr: Any, env: Environment) -> Any: + """ + Evaluate require-primitives: load primitives into current scope. + + Syntax: + (require-primitives "math" "color" "filters") + + Unlike with-primitives, this loads into the current environment + (typically used at top-level to set up an effect's dependencies). + """ + for lib_expr in expr[1:]: + if _is_symbol(lib_expr): + lib_name = lib_expr.name + else: + lib_name = lib_expr + + primitives = self.load_primitive_library(lib_name) + for name, fn in primitives.items(): + env.set(name, fn) + + return None + + def load_primitive_library(self, name: str, path: str = None) -> dict: + """ + Load a primitive library by name or path. + + Returns dict of {name: function}. + """ + from .primitive_libs import load_primitive_library + return load_primitive_library(name, path) + + def _eval_ascii_fx_zone(self, expr: Any, env: Environment) -> Any: + """ + Evaluate ascii-fx-zone special form. + + Syntax: + (ascii-fx-zone frame + :cols 80 + :alphabet "standard" + :color_mode "color" + :background "black" + :contrast 1.5 + :char_hue ;; NOT evaluated - passed to primitive + :char_saturation + :char_brightness + :char_scale + :char_rotation + :char_jitter ) + + The expression parameters (:char_hue, etc.) are NOT pre-evaluated. + They are passed as raw S-expressions to the primitive which + evaluates them per-zone with zone context variables injected. + + Requires: (require-primitives "ascii") + """ + # Look up ascii-fx-zone primitive from environment + # It must be loaded via (require-primitives "ascii") + try: + prim_ascii_fx_zone = env.get('ascii-fx-zone') + except NameError: + raise NameError( + "ascii-fx-zone primitive not found. " + "Add (require-primitives \"ascii\") to your effect file." + ) + + # Expression parameter names that should NOT be evaluated + expr_params = {'char_hue', 'char_saturation', 'char_brightness', + 'char_scale', 'char_rotation', 'char_jitter', 'cell_effect'} + + # Parse arguments + frame = self.eval(expr[1], env) # First arg is always the frame + + # Defaults + cols = 80 + char_size = None # If set, overrides cols + alphabet = "standard" + color_mode = "color" + background = "black" + contrast = 1.5 + char_hue = None + char_saturation = None + char_brightness = None + char_scale = None + char_rotation = None + char_jitter = None + cell_effect = None # Lambda for arbitrary per-cell effects + # Convenience params for staged recipes + energy = None + rotation_scale = 0 + # Extra params to pass to zone dict for lambdas + extra_params = {} + + # Parse keyword arguments + i = 2 + while i < len(expr): + item = expr[i] + if _is_keyword(item): + if i + 1 >= len(expr): + break + value_expr = expr[i + 1] + kw_name = item.name + + if kw_name in expr_params: + # Resolve symbol references but don't evaluate expressions + # This handles the case where effect definition passes a param like :char_hue char_hue + resolved = value_expr + if _is_symbol(value_expr): + try: + resolved = env.get(value_expr.name) + except NameError: + resolved = value_expr # Keep as symbol if not found + + if kw_name == 'char_hue': + char_hue = resolved + elif kw_name == 'char_saturation': + char_saturation = resolved + elif kw_name == 'char_brightness': + char_brightness = resolved + elif kw_name == 'char_scale': + char_scale = resolved + elif kw_name == 'char_rotation': + char_rotation = resolved + elif kw_name == 'char_jitter': + char_jitter = resolved + elif kw_name == 'cell_effect': + cell_effect = resolved + else: + # Evaluate normally + value = self.eval(value_expr, env) + if kw_name == 'cols': + cols = int(value) + elif kw_name == 'char_size': + # Handle nil/None values + if value is None or (_is_symbol(value) and value.name == 'nil'): + char_size = None + else: + char_size = int(value) + elif kw_name == 'alphabet': + alphabet = str(value) + elif kw_name == 'color_mode': + color_mode = str(value) + elif kw_name == 'background': + background = str(value) + elif kw_name == 'contrast': + contrast = float(value) + elif kw_name == 'energy': + if value is None or (_is_symbol(value) and value.name == 'nil'): + energy = None + else: + energy = float(value) + elif kw_name == 'rotation_scale': + rotation_scale = float(value) + else: + # Store any other params for lambdas to access + extra_params[kw_name] = value + i += 2 + else: + i += 1 + + # If energy and rotation_scale provided, build rotation expression + # rotation = energy * rotation_scale * position_factor + # position_factor: bottom-left=0, top-right=3 + # Formula: 1.5 * (zone-col-norm + (1 - zone-row-norm)) + if energy is not None and rotation_scale > 0: + # Build expression as S-expression list that will be evaluated per-zone + # (* (* energy rotation_scale) (* 1.5 (+ zone-col-norm (- 1 zone-row-norm)))) + energy_times_scale = energy * rotation_scale + # The position part uses zone variables, so we build it as an expression + char_rotation = [ + Symbol('*'), + energy_times_scale, + [Symbol('*'), 1.5, + [Symbol('+'), Symbol('zone-col-norm'), + [Symbol('-'), 1, Symbol('zone-row-norm')]]] + ] + + # Pull any extra params from environment that aren't standard params + # These are typically passed from recipes for use in cell_effect lambdas + standard_params = { + 'cols', 'char_size', 'alphabet', 'color_mode', 'background', 'contrast', + 'char_hue', 'char_saturation', 'char_brightness', 'char_scale', + 'char_rotation', 'char_jitter', 'cell_effect', 'energy', 'rotation_scale', + 'frame', 't', '_time', '__state__', '__interp__', 'true', 'false', 'nil' + } + # Check environment for extra bindings + current_env = env + while current_env is not None: + for k, v in current_env.bindings.items(): + if k not in standard_params and k not in extra_params and not callable(v): + # Add non-standard, non-callable bindings to extra_params + if isinstance(v, (int, float, str, bool)) or v is None: + extra_params[k] = v + current_env = current_env.parent + + # Call the primitive with interpreter and env for expression evaluation + return prim_ascii_fx_zone( + frame, + cols=cols, + char_size=char_size, + alphabet=alphabet, + color_mode=color_mode, + background=background, + contrast=contrast, + char_hue=char_hue, + char_saturation=char_saturation, + char_brightness=char_brightness, + char_scale=char_scale, + char_rotation=char_rotation, + char_jitter=char_jitter, + cell_effect=cell_effect, + energy=energy, + rotation_scale=rotation_scale, + _interp=self, + _env=env, + **extra_params + ) + + def _define_effect(self, expr: Any, env: Environment) -> EffectDefinition: + """ + Parse effect definition. + + Required syntax: + (define-effect name + :params ( + (param1 :type int :default 8 :desc "description") + ) + body) + + Effects MUST use :params syntax. Legacy ((param default) ...) is not supported. + """ + name = expr[1].name if _is_symbol(expr[1]) else expr[1] + + params = {} + body = None + found_params = False + + # Parse :params and body + i = 2 + while i < len(expr): + item = expr[i] + if _is_keyword(item) and item.name == "params": + # :params syntax + if i + 1 >= len(expr): + raise SyntaxError(f"Effect '{name}': Missing params list after :params keyword") + params_list = expr[i + 1] + params = self._parse_params_block(params_list) + found_params = True + i += 2 + elif _is_keyword(item): + # Skip other keywords (like :desc) + i += 2 + elif body is None: + # First non-keyword item is the body + if isinstance(item, list) and item: + first_elem = item[0] + # Check for legacy syntax and reject it + if isinstance(first_elem, list) and len(first_elem) >= 2: + raise SyntaxError( + f"Effect '{name}': Legacy parameter syntax ((name default) ...) is not supported. " + f"Use :params block instead." + ) + body = item + i += 1 + else: + i += 1 + + if body is None: + raise SyntaxError(f"Effect '{name}': No body found") + + if not found_params: + raise SyntaxError( + f"Effect '{name}': Missing :params block. " + f"For effects with no parameters, use empty :params ()" + ) + + effect = EffectDefinition(name, params, body) + self.effects[name] = effect + return effect + + def _parse_params_block(self, params_list: list) -> Dict[str, Any]: + """ + Parse :params block syntax: + ( + (param_name :type int :default 8 :range [4 32] :desc "description") + ) + """ + params = {} + for param_def in params_list: + if not isinstance(param_def, list) or len(param_def) < 1: + continue + + # First element is the parameter name + first = param_def[0] + if _is_symbol(first): + param_name = first.name + elif isinstance(first, str): + param_name = first + else: + continue + + # Parse keyword arguments + default = None + i = 1 + while i < len(param_def): + item = param_def[i] + if _is_keyword(item): + if i + 1 >= len(param_def): + break + kw_value = param_def[i + 1] + + if item.name == "default": + default = kw_value + i += 2 + else: + i += 1 + + params[param_name] = default + + return params + + def load_effect(self, path: str) -> EffectDefinition: + """Load an effect definition from a .sexp file.""" + expr = parse_file(path) + + # Handle multiple top-level expressions + if isinstance(expr, list) and expr and isinstance(expr[0], list): + for e in expr: + self.eval(e) + else: + self.eval(expr) + + # Return the last defined effect + if self.effects: + return list(self.effects.values())[-1] + return None + + def load_effect_from_string(self, sexp_content: str, effect_name: str = None) -> EffectDefinition: + """Load an effect definition from an S-expression string. + + Args: + sexp_content: The S-expression content as a string + effect_name: Optional name hint (used if effect doesn't define its own name) + + Returns: + The loaded EffectDefinition + """ + expr = parse(sexp_content) + + # Handle multiple top-level expressions + if isinstance(expr, list) and expr and isinstance(expr[0], list): + for e in expr: + self.eval(e) + else: + self.eval(expr) + + # Return the effect if we can find it by name + if effect_name and effect_name in self.effects: + return self.effects[effect_name] + + # Return the most recently loaded effect + if self.effects: + return list(self.effects.values())[-1] + + return None + + def run_effect(self, name: str, frame, params: Dict[str, Any], + state: Dict[str, Any]) -> tuple: + """ + Run an effect on frame(s). + + Args: + name: Effect name + frame: Input frame (H, W, 3) RGB uint8, or list of frames for multi-input + params: Effect parameters (overrides defaults) + state: Persistent state dict + + Returns: + (output_frame, new_state) + """ + if name not in self.effects: + raise ValueError(f"Unknown effect: {name}") + + effect = self.effects[name] + + # Create environment for this run + env = Environment(self.global_env) + + # Bind frame(s) - support both single frame and list of frames + if isinstance(frame, list): + # Multi-input effect + frames = frame + env.set('frame', frames[0] if frames else None) # Backwards compat + env.set('inputs', frames) + # Named frame bindings + for i, f in enumerate(frames): + env.set(f'frame-{chr(ord("a") + i)}', f) # frame-a, frame-b, etc. + else: + # Single-input effect + env.set('frame', frame) + + # Bind state + if state is None: + state = {} + env.set('__state__', state) + + # Validate that all provided params are known (except internal params) + # Extra params are allowed and will be passed through to cell_effect lambdas + known_params = set(effect.params.keys()) + internal_params = {'_time', 'seed', '_binding', 'effect', 'cid', 'hash', 'effect_path'} + extra_effect_params = {} # Unknown params passed through for cell_effect lambdas + for k in params.keys(): + if k not in known_params and k not in internal_params: + # Allow unknown params - they'll be passed to cell_effect lambdas via zone dict + extra_effect_params[k] = params[k] + + # Bind parameters (defaults + overrides) + for pname, pdefault in effect.params.items(): + value = params.get(pname) + if value is None: + # Evaluate default if it's an expression (list) + if isinstance(pdefault, list): + value = self.eval(pdefault, env) + else: + value = pdefault + env.set(pname, value) + + # Bind extra params (unknown params passed through for cell_effect lambdas) + for k, v in extra_effect_params.items(): + env.set(k, v) + + # Reset RNG with seed if provided + seed = params.get('seed', 42) + reset_rng(int(seed)) + + # Bind time if provided + time_val = params.get('_time', 0) + env.set('t', time_val) + env.set('_time', time_val) + + # Evaluate body + result = self.eval(effect.body, env) + + # Ensure result is an image + if not isinstance(result, np.ndarray): + result = frame + + return result, state + + def eval_with_zone(self, expr, env: Environment, zone) -> Any: + """ + Evaluate expression with zone-* variables injected. + + Args: + expr: Expression to evaluate (S-expression) + env: Parent environment with bound values + zone: ZoneContext object with cell data + + Zone variables injected: + zone-row, zone-col: Grid position (integers) + zone-row-norm, zone-col-norm: Normalized position (0-1) + zone-lum: Cell luminance (0-1) + zone-sat: Cell saturation (0-1) + zone-hue: Cell hue (0-360) + zone-r, zone-g, zone-b: RGB components (0-1) + + Returns: + Evaluated result (typically a number) + """ + # Create child environment with zone variables + zone_env = Environment(env) + zone_env.set('zone-row', zone.row) + zone_env.set('zone-col', zone.col) + zone_env.set('zone-row-norm', zone.row_norm) + zone_env.set('zone-col-norm', zone.col_norm) + zone_env.set('zone-lum', zone.luminance) + zone_env.set('zone-sat', zone.saturation) + zone_env.set('zone-hue', zone.hue) + zone_env.set('zone-r', zone.r) + zone_env.set('zone-g', zone.g) + zone_env.set('zone-b', zone.b) + + return self.eval(expr, zone_env) + + +# ============================================================================= +# Convenience Functions +# ============================================================================= + +_interpreter = None +_interpreter_minimal = None + + +def get_interpreter(minimal_primitives: bool = False) -> Interpreter: + """Get or create the global interpreter. + + Args: + minimal_primitives: If True, return interpreter with only core primitives. + Additional primitives must be loaded with require-primitives or with-primitives. + """ + global _interpreter, _interpreter_minimal + + if minimal_primitives: + if _interpreter_minimal is None: + _interpreter_minimal = Interpreter(minimal_primitives=True) + return _interpreter_minimal + else: + if _interpreter is None: + _interpreter = Interpreter(minimal_primitives=False) + return _interpreter + + +def load_effect(path: str) -> EffectDefinition: + """Load an effect from a .sexp file.""" + return get_interpreter().load_effect(path) + + +def load_effects_dir(directory: str): + """Load all .sexp effects from a directory.""" + interp = get_interpreter() + dir_path = Path(directory) + for path in dir_path.glob('*.sexp'): + try: + interp.load_effect(str(path)) + except Exception as e: + print(f"Warning: Failed to load {path}: {e}") + + +def run_effect(name: str, frame: np.ndarray, params: Dict[str, Any], + state: Dict[str, Any] = None) -> tuple: + """Run an effect.""" + return get_interpreter().run_effect(name, frame, params, state or {}) + + +def list_effects() -> List[str]: + """List loaded effect names.""" + return list(get_interpreter().effects.keys()) + + +# ============================================================================= +# Adapter for existing effect system +# ============================================================================= + +def make_process_frame(effect_path: str) -> Callable: + """ + Create a process_frame function from a .sexp effect. + + This allows S-expression effects to be used with the existing + effect system. + """ + interp = get_interpreter() + interp.load_effect(effect_path) + effect_name = Path(effect_path).stem + + def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + return interp.run_effect(effect_name, frame, params, state) + + return process_frame diff --git a/sexp_effects/parser.py b/sexp_effects/parser.py new file mode 100644 index 0000000..12bedfd --- /dev/null +++ b/sexp_effects/parser.py @@ -0,0 +1,168 @@ +""" +S-Expression Parser + +Parses S-expressions into Python data structures: +- Lists become Python lists +- Symbols become Symbol objects +- Numbers become int/float +- Strings become str +- Keywords (:foo) become Keyword objects +""" + +import re +from dataclasses import dataclass +from typing import Any, List, Union + + +@dataclass(frozen=True) +class Symbol: + """A symbol (identifier) in the S-expression.""" + name: str + + def __repr__(self): + return self.name + + +@dataclass(frozen=True) +class Keyword: + """A keyword like :foo in the S-expression.""" + name: str + + def __repr__(self): + return f":{self.name}" + + +# Token patterns +TOKEN_PATTERNS = [ + (r'\s+', None), # Whitespace (skip) + (r';[^\n]*', None), # Comments (skip) + (r'\(', 'LPAREN'), + (r'\)', 'RPAREN'), + (r'\[', 'LBRACKET'), + (r'\]', 'RBRACKET'), + (r"'", 'QUOTE'), + (r'"([^"\\]|\\.)*"', 'STRING'), + (r':[a-zA-Z_][a-zA-Z0-9_\-]*', 'KEYWORD'), + (r'-?[0-9]+\.[0-9]+', 'FLOAT'), + (r'-?[0-9]+', 'INT'), + (r'#t|#f|true|false', 'BOOL'), + (r'[a-zA-Z_+\-*/<>=!?][a-zA-Z0-9_+\-*/<>=!?]*', 'SYMBOL'), +] + +TOKEN_REGEX = '|'.join(f'(?P<{name}>{pattern})' if name else f'(?:{pattern})' + for pattern, name in TOKEN_PATTERNS) + + +def tokenize(source: str) -> List[tuple]: + """Tokenize S-expression source code.""" + tokens = [] + for match in re.finditer(TOKEN_REGEX, source): + kind = match.lastgroup + value = match.group() + if kind: + tokens.append((kind, value)) + return tokens + + +def parse(source: str) -> Any: + """Parse S-expression source into Python data structures.""" + tokens = tokenize(source) + pos = [0] # Use list for mutability in nested function + + def parse_expr(): + if pos[0] >= len(tokens): + raise SyntaxError("Unexpected end of input") + + kind, value = tokens[pos[0]] + + if kind == 'LPAREN': + pos[0] += 1 + items = [] + while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RPAREN': + items.append(parse_expr()) + if pos[0] >= len(tokens): + raise SyntaxError("Missing closing parenthesis") + pos[0] += 1 # Skip RPAREN + return items + + if kind == 'LBRACKET': + pos[0] += 1 + items = [] + while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RBRACKET': + items.append(parse_expr()) + if pos[0] >= len(tokens): + raise SyntaxError("Missing closing bracket") + pos[0] += 1 # Skip RBRACKET + return items + + elif kind == 'RPAREN': + raise SyntaxError("Unexpected closing parenthesis") + + elif kind == 'QUOTE': + pos[0] += 1 + return [Symbol('quote'), parse_expr()] + + elif kind == 'STRING': + pos[0] += 1 + # Remove quotes and unescape + return value[1:-1].replace('\\"', '"').replace('\\n', '\n') + + elif kind == 'INT': + pos[0] += 1 + return int(value) + + elif kind == 'FLOAT': + pos[0] += 1 + return float(value) + + elif kind == 'BOOL': + pos[0] += 1 + return value in ('#t', 'true') + + elif kind == 'KEYWORD': + pos[0] += 1 + return Keyword(value[1:]) # Remove leading : + + elif kind == 'SYMBOL': + pos[0] += 1 + return Symbol(value) + + else: + raise SyntaxError(f"Unknown token: {kind} {value}") + + result = parse_expr() + + # Check for multiple top-level expressions + if pos[0] < len(tokens): + # Allow multiple top-level expressions, return as list + results = [result] + while pos[0] < len(tokens): + results.append(parse_expr()) + return results + + return result + + +def parse_file(path: str) -> Any: + """Parse an S-expression file.""" + with open(path, 'r') as f: + return parse(f.read()) + + +# Convenience for pretty-printing +def to_sexp(obj: Any) -> str: + """Convert Python object back to S-expression string.""" + if isinstance(obj, list): + return '(' + ' '.join(to_sexp(x) for x in obj) + ')' + elif isinstance(obj, Symbol): + return obj.name + elif isinstance(obj, Keyword): + return f':{obj.name}' + elif isinstance(obj, str): + return f'"{obj}"' + elif isinstance(obj, bool): + return '#t' if obj else '#f' + elif isinstance(obj, (int, float)): + return str(obj) + else: + return repr(obj) diff --git a/sexp_effects/primitive_libs/__init__.py b/sexp_effects/primitive_libs/__init__.py new file mode 100644 index 0000000..47ee174 --- /dev/null +++ b/sexp_effects/primitive_libs/__init__.py @@ -0,0 +1,102 @@ +""" +Primitive Libraries System + +Provides modular loading of primitives. Core primitives are always available, +additional primitive libraries can be loaded on-demand with scoped availability. + +Usage in sexp: + ;; Load at recipe level - available throughout + (primitives math :path "primitive_libs/math.py") + + ;; Or use with-primitives for scoped access + (with-primitives "image" + (blur frame 3)) ;; blur only available inside + + ;; Nested scopes work + (with-primitives "math" + (with-primitives "color" + (hue-shift frame (* (sin t) 30)))) + +Library file format (primitive_libs/math.py): + import math + + def prim_sin(x): return math.sin(x) + def prim_cos(x): return math.cos(x) + + PRIMITIVES = { + 'sin': prim_sin, + 'cos': prim_cos, + } +""" + +import importlib.util +from pathlib import Path +from typing import Dict, Callable, Any, Optional + +# Cache of loaded primitive libraries +_library_cache: Dict[str, Dict[str, Any]] = {} + +# Core primitives - always available, cannot be overridden +CORE_PRIMITIVES: Dict[str, Any] = {} + + +def register_core_primitive(name: str, fn: Callable): + """Register a core primitive that's always available.""" + CORE_PRIMITIVES[name] = fn + + +def load_primitive_library(name: str, path: Optional[str] = None) -> Dict[str, Any]: + """ + Load a primitive library by name or path. + + Args: + name: Library name (e.g., "math", "image", "color") + path: Optional explicit path to library file + + Returns: + Dict of primitive name -> function + """ + # Check cache first + cache_key = path or name + if cache_key in _library_cache: + return _library_cache[cache_key] + + # Find library file + if path: + lib_path = Path(path) + else: + # Look in standard locations + lib_dir = Path(__file__).parent + lib_path = lib_dir / f"{name}.py" + + if not lib_path.exists(): + raise ValueError(f"Primitive library '{name}' not found at {lib_path}") + + if not lib_path.exists(): + raise ValueError(f"Primitive library file not found: {lib_path}") + + # Load the module + spec = importlib.util.spec_from_file_location(f"prim_lib_{name}", lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Get PRIMITIVES dict from module + if not hasattr(module, 'PRIMITIVES'): + raise ValueError(f"Primitive library '{name}' missing PRIMITIVES dict") + + primitives = module.PRIMITIVES + + # Cache and return + _library_cache[cache_key] = primitives + return primitives + + +def get_library_names() -> list: + """Get names of available primitive libraries.""" + lib_dir = Path(__file__).parent + return [p.stem for p in lib_dir.glob("*.py") if p.stem != "__init__"] + + +def clear_cache(): + """Clear the library cache (useful for testing).""" + _library_cache.clear() diff --git a/sexp_effects/primitive_libs/arrays.py b/sexp_effects/primitive_libs/arrays.py new file mode 100644 index 0000000..61da196 --- /dev/null +++ b/sexp_effects/primitive_libs/arrays.py @@ -0,0 +1,196 @@ +""" +Array Primitives Library + +Vectorized operations on numpy arrays for coordinate transformations. +""" +import numpy as np + + +# Arithmetic +def prim_arr_add(a, b): + return np.add(a, b) + + +def prim_arr_sub(a, b): + return np.subtract(a, b) + + +def prim_arr_mul(a, b): + return np.multiply(a, b) + + +def prim_arr_div(a, b): + return np.divide(a, b) + + +def prim_arr_mod(a, b): + return np.mod(a, b) + + +def prim_arr_neg(a): + return np.negative(a) + + +# Math functions +def prim_arr_sin(a): + return np.sin(a) + + +def prim_arr_cos(a): + return np.cos(a) + + +def prim_arr_tan(a): + return np.tan(a) + + +def prim_arr_sqrt(a): + return np.sqrt(np.maximum(a, 0)) + + +def prim_arr_pow(a, b): + return np.power(a, b) + + +def prim_arr_abs(a): + return np.abs(a) + + +def prim_arr_exp(a): + return np.exp(a) + + +def prim_arr_log(a): + return np.log(np.maximum(a, 1e-10)) + + +def prim_arr_atan2(y, x): + return np.arctan2(y, x) + + +# Comparison / selection +def prim_arr_min(a, b): + return np.minimum(a, b) + + +def prim_arr_max(a, b): + return np.maximum(a, b) + + +def prim_arr_clip(a, lo, hi): + return np.clip(a, lo, hi) + + +def prim_arr_where(cond, a, b): + return np.where(cond, a, b) + + +def prim_arr_floor(a): + return np.floor(a) + + +def prim_arr_ceil(a): + return np.ceil(a) + + +def prim_arr_round(a): + return np.round(a) + + +# Interpolation +def prim_arr_lerp(a, b, t): + return a + (b - a) * t + + +def prim_arr_smoothstep(edge0, edge1, x): + t = prim_arr_clip((x - edge0) / (edge1 - edge0), 0.0, 1.0) + return t * t * (3 - 2 * t) + + +# Creation +def prim_arr_zeros(shape): + return np.zeros(shape, dtype=np.float32) + + +def prim_arr_ones(shape): + return np.ones(shape, dtype=np.float32) + + +def prim_arr_full(shape, value): + return np.full(shape, value, dtype=np.float32) + + +def prim_arr_arange(start, stop, step=1): + return np.arange(start, stop, step, dtype=np.float32) + + +def prim_arr_linspace(start, stop, num): + return np.linspace(start, stop, num, dtype=np.float32) + + +def prim_arr_meshgrid(x, y): + return np.meshgrid(x, y) + + +# Coordinate transforms +def prim_polar_from_center(map_x, map_y, cx, cy): + """Convert Cartesian to polar coordinates centered at (cx, cy).""" + dx = map_x - cx + dy = map_y - cy + r = np.sqrt(dx**2 + dy**2) + theta = np.arctan2(dy, dx) + return (r, theta) + + +def prim_cart_from_polar(r, theta, cx, cy): + """Convert polar to Cartesian, adding center offset.""" + x = r * np.cos(theta) + cx + y = r * np.sin(theta) + cy + return (x, y) + + +PRIMITIVES = { + # Arithmetic + 'arr+': prim_arr_add, + 'arr-': prim_arr_sub, + 'arr*': prim_arr_mul, + 'arr/': prim_arr_div, + 'arr-mod': prim_arr_mod, + 'arr-neg': prim_arr_neg, + + # Math + 'arr-sin': prim_arr_sin, + 'arr-cos': prim_arr_cos, + 'arr-tan': prim_arr_tan, + 'arr-sqrt': prim_arr_sqrt, + 'arr-pow': prim_arr_pow, + 'arr-abs': prim_arr_abs, + 'arr-exp': prim_arr_exp, + 'arr-log': prim_arr_log, + 'arr-atan2': prim_arr_atan2, + + # Selection + 'arr-min': prim_arr_min, + 'arr-max': prim_arr_max, + 'arr-clip': prim_arr_clip, + 'arr-where': prim_arr_where, + 'arr-floor': prim_arr_floor, + 'arr-ceil': prim_arr_ceil, + 'arr-round': prim_arr_round, + + # Interpolation + 'arr-lerp': prim_arr_lerp, + 'arr-smoothstep': prim_arr_smoothstep, + + # Creation + 'arr-zeros': prim_arr_zeros, + 'arr-ones': prim_arr_ones, + 'arr-full': prim_arr_full, + 'arr-arange': prim_arr_arange, + 'arr-linspace': prim_arr_linspace, + 'arr-meshgrid': prim_arr_meshgrid, + + # Coordinates + 'polar-from-center': prim_polar_from_center, + 'cart-from-polar': prim_cart_from_polar, +} diff --git a/sexp_effects/primitive_libs/ascii.py b/sexp_effects/primitive_libs/ascii.py new file mode 100644 index 0000000..858f010 --- /dev/null +++ b/sexp_effects/primitive_libs/ascii.py @@ -0,0 +1,388 @@ +""" +ASCII Art Primitives Library + +ASCII art rendering with per-zone expression evaluation and cell effects. +""" +import numpy as np +import cv2 +from PIL import Image, ImageDraw, ImageFont +from typing import Any, Dict, List, Optional, Callable +import colorsys + + +# Character sets +CHAR_SETS = { + "standard": " .:-=+*#%@", + "blocks": " ░▒▓█", + "simple": " .:oO@", + "digits": "0123456789", + "binary": "01", + "ascii": " `.-':_,^=;><+!rc*/z?sLTv)J7(|Fi{C}fI31tlu[neoZ5Yxjya]2ESwqkP6h9d4VpOGbUAKXHm8RD#$Bg0MNWQ%&@", +} + +# Default font +_default_font = None + + +def _get_font(size: int): + """Get monospace font at given size.""" + global _default_font + try: + return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size) + except: + return ImageFont.load_default() + + +def _parse_color(color_str: str) -> tuple: + """Parse color string to RGB tuple.""" + if color_str.startswith('#'): + hex_color = color_str[1:] + if len(hex_color) == 3: + hex_color = ''.join(c*2 for c in hex_color) + return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) + + colors = { + 'black': (0, 0, 0), 'white': (255, 255, 255), + 'red': (255, 0, 0), 'green': (0, 255, 0), 'blue': (0, 0, 255), + 'yellow': (255, 255, 0), 'cyan': (0, 255, 255), 'magenta': (255, 0, 255), + 'gray': (128, 128, 128), 'grey': (128, 128, 128), + } + return colors.get(color_str.lower(), (0, 0, 0)) + + +def _cell_sample(frame: np.ndarray, cell_size: int): + """Sample frame into cells, returning colors and luminances. + + Uses cv2.resize with INTER_AREA (pixel-area averaging) which is + ~25x faster than numpy reshape+mean for block downsampling. + """ + h, w = frame.shape[:2] + rows = h // cell_size + cols = w // cell_size + + # Crop to exact grid then block-average via cv2 area interpolation. + cropped = frame[:rows * cell_size, :cols * cell_size] + colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA) + + luminances = ((0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]) / 255.0).astype(np.float32) + + return colors, luminances + + +def _luminance_to_char(lum: float, alphabet: str, contrast: float) -> str: + """Map luminance to character.""" + chars = CHAR_SETS.get(alphabet, alphabet) + lum = ((lum - 0.5) * contrast + 0.5) + lum = max(0, min(1, lum)) + idx = int(lum * (len(chars) - 1)) + return chars[idx] + + +def _render_char_cell(char: str, cell_size: int, color: tuple, bg_color: tuple) -> np.ndarray: + """Render a single character to a cell image.""" + img = Image.new('RGB', (cell_size, cell_size), bg_color) + draw = ImageDraw.Draw(img) + font = _get_font(cell_size) + + # Center the character + bbox = draw.textbbox((0, 0), char, font=font) + text_w = bbox[2] - bbox[0] + text_h = bbox[3] - bbox[1] + x = (cell_size - text_w) // 2 + y = (cell_size - text_h) // 2 - bbox[1] + + draw.text((x, y), char, fill=color, font=font) + return np.array(img) + + +def prim_ascii_fx_zone( + frame: np.ndarray, + cols: int = 80, + char_size: int = None, + alphabet: str = "standard", + color_mode: str = "color", + background: str = "black", + contrast: float = 1.5, + char_hue = None, + char_saturation = None, + char_brightness = None, + char_scale = None, + char_rotation = None, + char_jitter = None, + cell_effect = None, + energy: float = None, + rotation_scale: float = 0, + _interp = None, + _env = None, + **extra_params +) -> np.ndarray: + """ + Render frame as ASCII art with per-zone effects. + + Args: + frame: Input image + cols: Number of character columns + char_size: Cell size in pixels (overrides cols if set) + alphabet: Character set name or custom string + color_mode: "color", "mono", "invert", or color name + background: Background color name or hex + contrast: Contrast for character selection + char_hue/saturation/brightness/scale/rotation/jitter: Per-zone expressions + cell_effect: Lambda (cell, zone) -> cell for per-cell effects + energy: Energy value from audio analysis + rotation_scale: Max rotation degrees + _interp: Interpreter (auto-injected) + _env: Environment (auto-injected) + **extra_params: Additional params passed to zone dict + """ + h, w = frame.shape[:2] + + # Calculate cell size + if char_size is None or char_size == 0: + cell_size = max(4, w // cols) + else: + cell_size = max(4, int(char_size)) + + # Sample cells + colors, luminances = _cell_sample(frame, cell_size) + rows, cols_actual = luminances.shape + + # Parse background color + bg_color = _parse_color(background) + + # Create output image + out_h = rows * cell_size + out_w = cols_actual * cell_size + output = np.full((out_h, out_w, 3), bg_color, dtype=np.uint8) + + # Check if we have cell_effect + has_cell_effect = cell_effect is not None + + # Process each cell + for r in range(rows): + for c in range(cols_actual): + lum = luminances[r, c] + cell_color = tuple(colors[r, c]) + + # Build zone context + zone = { + 'row': r, + 'col': c, + 'row-norm': r / max(1, rows - 1), + 'col-norm': c / max(1, cols_actual - 1), + 'lum': float(lum), + 'r': cell_color[0] / 255, + 'g': cell_color[1] / 255, + 'b': cell_color[2] / 255, + 'cell_size': cell_size, + } + + # Add HSV + r_f, g_f, b_f = cell_color[0]/255, cell_color[1]/255, cell_color[2]/255 + hsv = colorsys.rgb_to_hsv(r_f, g_f, b_f) + zone['hue'] = hsv[0] * 360 + zone['sat'] = hsv[1] + + # Add energy and rotation_scale + if energy is not None: + zone['energy'] = energy + zone['rotation_scale'] = rotation_scale + + # Add extra params + for k, v in extra_params.items(): + if isinstance(v, (int, float, str, bool)) or v is None: + zone[k] = v + + # Get character + char = _luminance_to_char(lum, alphabet, contrast) + zone['char'] = char + + # Determine cell color based on mode + if color_mode == "mono": + render_color = (255, 255, 255) + elif color_mode == "invert": + render_color = tuple(255 - c for c in cell_color) + elif color_mode == "color": + render_color = cell_color + else: + render_color = _parse_color(color_mode) + + zone['color'] = render_color + + # Render character to cell + cell_img = _render_char_cell(char, cell_size, render_color, bg_color) + + # Apply cell_effect if provided + if has_cell_effect and _interp is not None: + cell_img = _apply_cell_effect(cell_img, zone, cell_effect, _interp, _env, extra_params) + + # Paste cell to output + y1, y2 = r * cell_size, (r + 1) * cell_size + x1, x2 = c * cell_size, (c + 1) * cell_size + output[y1:y2, x1:x2] = cell_img + + # Resize to match input dimensions + if output.shape[:2] != frame.shape[:2]: + output = cv2.resize(output, (w, h), interpolation=cv2.INTER_LINEAR) + + return output + + +def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params): + """Apply cell_effect lambda to a cell image. + + cell_effect is a Lambda object with params and body. + We create a child environment with zone variables and cell, + then evaluate the lambda body. + """ + # Get Environment class from the interpreter's module + Environment = type(env) + + # Create child environment with zone variables + cell_env = Environment(env) + + # Bind zone variables + for k, v in zone.items(): + cell_env.set(k, v) + + # Also bind with zone- prefix for consistency + cell_env.set('zone-row', zone.get('row', 0)) + cell_env.set('zone-col', zone.get('col', 0)) + cell_env.set('zone-row-norm', zone.get('row-norm', 0)) + cell_env.set('zone-col-norm', zone.get('col-norm', 0)) + cell_env.set('zone-lum', zone.get('lum', 0)) + cell_env.set('zone-sat', zone.get('sat', 0)) + cell_env.set('zone-hue', zone.get('hue', 0)) + cell_env.set('zone-r', zone.get('r', 0)) + cell_env.set('zone-g', zone.get('g', 0)) + cell_env.set('zone-b', zone.get('b', 0)) + + # Inject loaded effects as callable functions + if hasattr(interp, 'effects'): + for effect_name in interp.effects: + def make_effect_fn(name): + def effect_fn(frame, *args): + params = {} + if name == 'blur' and len(args) >= 1: + params['radius'] = args[0] + elif name == 'rotate' and len(args) >= 1: + params['angle'] = args[0] + elif name == 'brightness' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'contrast' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'saturation' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'hue_shift' and len(args) >= 1: + params['degrees'] = args[0] + elif name == 'rgb_split' and len(args) >= 2: + params['offset_x'] = args[0] + params['offset_y'] = args[1] + elif name == 'pixelate' and len(args) >= 1: + params['size'] = args[0] + elif name == 'invert': + pass + result, _ = interp.run_effect(name, frame, params, {}) + return result + return effect_fn + cell_env.set(effect_name, make_effect_fn(effect_name)) + + # Bind cell image and zone dict + cell_env.set('cell', cell_img) + cell_env.set('zone', zone) + + # Evaluate the cell_effect lambda + # Lambda has params and body - we need to bind the params then evaluate + if hasattr(cell_effect, 'params') and hasattr(cell_effect, 'body'): + # Bind lambda parameters: (lambda [cell zone] body) + if len(cell_effect.params) >= 1: + cell_env.set(cell_effect.params[0], cell_img) + if len(cell_effect.params) >= 2: + cell_env.set(cell_effect.params[1], zone) + + result = interp.eval(cell_effect.body, cell_env) + elif isinstance(cell_effect, list): + # Raw S-expression lambda like (lambda [cell zone] body) or (fn [cell zone] body) + # Check if it's a lambda expression + head = cell_effect[0] if cell_effect else None + head_name = head.name if head and hasattr(head, 'name') else str(head) if head else None + is_lambda = head_name in ('lambda', 'fn') + + if is_lambda: + # (lambda [params...] body) + params = cell_effect[1] if len(cell_effect) > 1 else [] + body = cell_effect[2] if len(cell_effect) > 2 else None + + # Bind lambda parameters + if isinstance(params, list) and len(params) >= 1: + param_name = params[0].name if hasattr(params[0], 'name') else str(params[0]) + cell_env.set(param_name, cell_img) + if isinstance(params, list) and len(params) >= 2: + param_name = params[1].name if hasattr(params[1], 'name') else str(params[1]) + cell_env.set(param_name, zone) + + result = interp.eval(body, cell_env) if body else cell_img + else: + # Some other expression - just evaluate it + result = interp.eval(cell_effect, cell_env) + elif callable(cell_effect): + # It's a callable + result = cell_effect(cell_img, zone) + else: + raise ValueError(f"cell_effect must be a Lambda, list, or callable, got {type(cell_effect)}") + + if isinstance(result, np.ndarray) and result.shape == cell_img.shape: + return result + elif isinstance(result, np.ndarray): + # Shape mismatch - resize to fit + result = cv2.resize(result, (cell_img.shape[1], cell_img.shape[0])) + return result + + raise ValueError(f"cell_effect must return an image array, got {type(result)}") + + +def _get_legacy_ascii_primitives(): + """Import ASCII primitives from legacy primitives module. + + These are loaded lazily to avoid import issues during module loading. + By the time a primitive library is loaded, sexp_effects.primitives + is already in sys.modules (imported by sexp_effects.__init__). + """ + from sexp_effects.primitives import ( + prim_cell_sample, + prim_luminance_to_chars, + prim_render_char_grid, + prim_render_char_grid_fx, + prim_alphabet_char, + prim_alphabet_length, + prim_map_char_grid, + prim_map_colors, + prim_make_char_grid, + prim_set_char, + prim_get_char, + prim_char_grid_dimensions, + cell_sample_extended, + ) + return { + 'cell-sample': prim_cell_sample, + 'cell-sample-extended': cell_sample_extended, + 'luminance-to-chars': prim_luminance_to_chars, + 'render-char-grid': prim_render_char_grid, + 'render-char-grid-fx': prim_render_char_grid_fx, + 'alphabet-char': prim_alphabet_char, + 'alphabet-length': prim_alphabet_length, + 'map-char-grid': prim_map_char_grid, + 'map-colors': prim_map_colors, + 'make-char-grid': prim_make_char_grid, + 'set-char': prim_set_char, + 'get-char': prim_get_char, + 'char-grid-dimensions': prim_char_grid_dimensions, + } + + +PRIMITIVES = { + 'ascii-fx-zone': prim_ascii_fx_zone, + **_get_legacy_ascii_primitives(), +} diff --git a/sexp_effects/primitive_libs/blending.py b/sexp_effects/primitive_libs/blending.py new file mode 100644 index 0000000..0bf345d --- /dev/null +++ b/sexp_effects/primitive_libs/blending.py @@ -0,0 +1,116 @@ +""" +Blending Primitives Library + +Image blending and compositing operations. +""" +import numpy as np + + +def prim_blend_images(a, b, alpha): + """Blend two images: a * (1-alpha) + b * alpha.""" + alpha = max(0.0, min(1.0, alpha)) + return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8) + + +def prim_blend_mode(a, b, mode): + """Blend using Photoshop-style blend modes.""" + a = a.astype(float) / 255 + b = b.astype(float) / 255 + + if mode == "multiply": + result = a * b + elif mode == "screen": + result = 1 - (1 - a) * (1 - b) + elif mode == "overlay": + mask = a < 0.5 + result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b)) + elif mode == "soft-light": + mask = b < 0.5 + result = np.where(mask, + a - (1 - 2 * b) * a * (1 - a), + a + (2 * b - 1) * (np.sqrt(a) - a)) + elif mode == "hard-light": + mask = b < 0.5 + result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b)) + elif mode == "color-dodge": + result = np.clip(a / (1 - b + 0.001), 0, 1) + elif mode == "color-burn": + result = 1 - np.clip((1 - a) / (b + 0.001), 0, 1) + elif mode == "difference": + result = np.abs(a - b) + elif mode == "exclusion": + result = a + b - 2 * a * b + elif mode == "add": + result = np.clip(a + b, 0, 1) + elif mode == "subtract": + result = np.clip(a - b, 0, 1) + elif mode == "darken": + result = np.minimum(a, b) + elif mode == "lighten": + result = np.maximum(a, b) + else: + # Default to normal (just return b) + result = b + + return (result * 255).astype(np.uint8) + + +def prim_mask(img, mask_img): + """Apply grayscale mask to image (white=opaque, black=transparent).""" + if len(mask_img.shape) == 3: + mask = mask_img[:, :, 0].astype(float) / 255 + else: + mask = mask_img.astype(float) / 255 + + mask = mask[:, :, np.newaxis] + return (img.astype(float) * mask).astype(np.uint8) + + +def prim_alpha_composite(base, overlay, alpha_channel): + """Composite overlay onto base using alpha channel.""" + if len(alpha_channel.shape) == 3: + alpha = alpha_channel[:, :, 0].astype(float) / 255 + else: + alpha = alpha_channel.astype(float) / 255 + + alpha = alpha[:, :, np.newaxis] + result = base.astype(float) * (1 - alpha) + overlay.astype(float) * alpha + return result.astype(np.uint8) + + +def prim_overlay(base, overlay, x, y, alpha=1.0): + """Overlay image at position (x, y) with optional alpha.""" + result = base.copy() + x, y = int(x), int(y) + oh, ow = overlay.shape[:2] + bh, bw = base.shape[:2] + + # Clip to bounds + sx1 = max(0, -x) + sy1 = max(0, -y) + dx1 = max(0, x) + dy1 = max(0, y) + sx2 = min(ow, bw - x) + sy2 = min(oh, bh - y) + + if sx2 > sx1 and sy2 > sy1: + src = overlay[sy1:sy2, sx1:sx2] + dst = result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] + blended = (dst.astype(float) * (1 - alpha) + src.astype(float) * alpha) + result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = blended.astype(np.uint8) + + return result + + +PRIMITIVES = { + # Basic blending + 'blend-images': prim_blend_images, + 'blend-mode': prim_blend_mode, + + # Masking + 'mask': prim_mask, + 'alpha-composite': prim_alpha_composite, + + # Overlay + 'overlay': prim_overlay, +} diff --git a/sexp_effects/primitive_libs/color.py b/sexp_effects/primitive_libs/color.py new file mode 100644 index 0000000..0b6854b --- /dev/null +++ b/sexp_effects/primitive_libs/color.py @@ -0,0 +1,137 @@ +""" +Color Primitives Library + +Color manipulation: RGB, HSV, blending, luminance. +""" +import numpy as np +import colorsys + + +def prim_rgb(r, g, b): + """Create RGB color as [r, g, b] (0-255).""" + return [int(max(0, min(255, r))), + int(max(0, min(255, g))), + int(max(0, min(255, b)))] + + +def prim_red(c): + return c[0] + + +def prim_green(c): + return c[1] + + +def prim_blue(c): + return c[2] + + +def prim_luminance(c): + """Perceived luminance (0-1) using standard weights.""" + return (0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]) / 255 + + +def prim_rgb_to_hsv(c): + """Convert RGB [0-255] to HSV [h:0-360, s:0-1, v:0-1].""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + h, s, v = colorsys.rgb_to_hsv(r, g, b) + return [h * 360, s, v] + + +def prim_hsv_to_rgb(hsv): + """Convert HSV [h:0-360, s:0-1, v:0-1] to RGB [0-255].""" + h, s, v = hsv[0] / 360, hsv[1], hsv[2] + r, g, b = colorsys.hsv_to_rgb(h, s, v) + return [int(r * 255), int(g * 255), int(b * 255)] + + +def prim_rgb_to_hsl(c): + """Convert RGB [0-255] to HSL [h:0-360, s:0-1, l:0-1].""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + h, l, s = colorsys.rgb_to_hls(r, g, b) + return [h * 360, s, l] + + +def prim_hsl_to_rgb(hsl): + """Convert HSL [h:0-360, s:0-1, l:0-1] to RGB [0-255].""" + h, s, l = hsl[0] / 360, hsl[1], hsl[2] + r, g, b = colorsys.hls_to_rgb(h, l, s) + return [int(r * 255), int(g * 255), int(b * 255)] + + +def prim_blend_color(c1, c2, alpha): + """Blend two colors: c1 * (1-alpha) + c2 * alpha.""" + return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)] + + +def prim_average_color(img): + """Get average color of an image.""" + mean = np.mean(img, axis=(0, 1)) + return [int(mean[0]), int(mean[1]), int(mean[2])] + + +def prim_dominant_color(img, k=1): + """Get dominant color using k-means (simplified: just average for now).""" + return prim_average_color(img) + + +def prim_invert_color(c): + """Invert a color.""" + return [255 - c[0], 255 - c[1], 255 - c[2]] + + +def prim_grayscale_color(c): + """Convert color to grayscale.""" + gray = int(0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]) + return [gray, gray, gray] + + +def prim_saturate(c, amount): + """Adjust saturation of color. amount=0 is grayscale, 1 is unchanged, >1 is more saturated.""" + hsv = prim_rgb_to_hsv(c) + hsv[1] = max(0, min(1, hsv[1] * amount)) + return prim_hsv_to_rgb(hsv) + + +def prim_brighten(c, amount): + """Adjust brightness. amount=0 is black, 1 is unchanged, >1 is brighter.""" + return [int(max(0, min(255, c[i] * amount))) for i in range(3)] + + +def prim_shift_hue(c, degrees): + """Shift hue by degrees.""" + hsv = prim_rgb_to_hsv(c) + hsv[0] = (hsv[0] + degrees) % 360 + return prim_hsv_to_rgb(hsv) + + +PRIMITIVES = { + # Construction + 'rgb': prim_rgb, + + # Component access + 'red': prim_red, + 'green': prim_green, + 'blue': prim_blue, + 'luminance': prim_luminance, + + # Color space conversion + 'rgb->hsv': prim_rgb_to_hsv, + 'hsv->rgb': prim_hsv_to_rgb, + 'rgb->hsl': prim_rgb_to_hsl, + 'hsl->rgb': prim_hsl_to_rgb, + + # Blending + 'blend-color': prim_blend_color, + + # Analysis + 'average-color': prim_average_color, + 'dominant-color': prim_dominant_color, + + # Manipulation + 'invert-color': prim_invert_color, + 'grayscale-color': prim_grayscale_color, + 'saturate': prim_saturate, + 'brighten': prim_brighten, + 'shift-hue': prim_shift_hue, +} diff --git a/sexp_effects/primitive_libs/color_ops.py b/sexp_effects/primitive_libs/color_ops.py new file mode 100644 index 0000000..dd9076c --- /dev/null +++ b/sexp_effects/primitive_libs/color_ops.py @@ -0,0 +1,90 @@ +""" +Color Operations Primitives Library + +Vectorized color adjustments: brightness, contrast, saturation, invert, HSV. +These operate on entire images for fast processing. +""" +import numpy as np +import cv2 + + +def prim_adjust(img, brightness=0, contrast=1): + """Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+.""" + result = (img.astype(np.float32) - 128) * contrast + 128 + brightness + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_mix_gray(img, amount): + """Mix image with its grayscale version. 0=original, 1=grayscale.""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + gray_rgb = np.stack([gray, gray, gray], axis=-1) + result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_invert_img(img): + """Invert all pixel values.""" + return (255 - img).astype(np.uint8) + + +def prim_shift_hsv(img, h=0, s=1, v=1): + """Shift HSV: h=degrees offset, s/v=multipliers.""" + hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32) + hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180 + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255) + hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255) + return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) + + +def prim_add_noise(img, amount): + """Add gaussian noise to image.""" + noise = np.random.normal(0, amount, img.shape) + result = img.astype(np.float32) + noise + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_quantize(img, levels): + """Reduce to N color levels per channel.""" + levels = max(2, int(levels)) + factor = 256 / levels + result = (img // factor) * factor + factor // 2 + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_sepia(img, intensity=1.0): + """Apply sepia tone effect.""" + sepia_matrix = np.array([ + [0.393, 0.769, 0.189], + [0.349, 0.686, 0.168], + [0.272, 0.534, 0.131] + ]) + sepia = np.dot(img, sepia_matrix.T) + result = img.astype(np.float32) * (1 - intensity) + sepia * intensity + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_grayscale(img): + """Convert to grayscale (still RGB output).""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + return np.stack([gray, gray, gray], axis=-1).astype(np.uint8) + + +PRIMITIVES = { + # Brightness/Contrast + 'adjust': prim_adjust, + + # Saturation + 'mix-gray': prim_mix_gray, + 'grayscale': prim_grayscale, + + # HSV manipulation + 'shift-hsv': prim_shift_hsv, + + # Inversion + 'invert-img': prim_invert_img, + + # Effects + 'add-noise': prim_add_noise, + 'quantize': prim_quantize, + 'sepia': prim_sepia, +} diff --git a/sexp_effects/primitive_libs/core.py b/sexp_effects/primitive_libs/core.py new file mode 100644 index 0000000..352cbd3 --- /dev/null +++ b/sexp_effects/primitive_libs/core.py @@ -0,0 +1,271 @@ +""" +Core Primitives - Always available, minimal essential set. + +These are the primitives that form the foundation of the language. +They cannot be overridden by libraries. +""" + + +# Arithmetic +def prim_add(*args): + if len(args) == 0: + return 0 + result = args[0] + for arg in args[1:]: + result = result + arg + return result + + +def prim_sub(a, b=None): + if b is None: + return -a + return a - b + + +def prim_mul(*args): + if len(args) == 0: + return 1 + result = args[0] + for arg in args[1:]: + result = result * arg + return result + + +def prim_div(a, b): + return a / b + + +def prim_mod(a, b): + return a % b + + +def prim_abs(x): + return abs(x) + + +def prim_min(*args): + return min(args) + + +def prim_max(*args): + return max(args) + + +def prim_round(x): + return round(x) + + +def prim_floor(x): + import math + return math.floor(x) + + +def prim_ceil(x): + import math + return math.ceil(x) + + +# Comparison +def prim_lt(a, b): + return a < b + + +def prim_gt(a, b): + return a > b + + +def prim_le(a, b): + return a <= b + + +def prim_ge(a, b): + return a >= b + + +def prim_eq(a, b): + if isinstance(a, float) or isinstance(b, float): + return abs(a - b) < 1e-9 + return a == b + + +def prim_ne(a, b): + return not prim_eq(a, b) + + +# Logic +def prim_not(x): + return not x + + +def prim_and(*args): + for a in args: + if not a: + return False + return True + + +def prim_or(*args): + for a in args: + if a: + return True + return False + + +# Basic data access +def prim_get(obj, key, default=None): + """Get value from dict or list.""" + if isinstance(obj, dict): + return obj.get(key, default) + elif isinstance(obj, (list, tuple)): + try: + return obj[int(key)] + except (IndexError, ValueError): + return default + return default + + +def prim_nth(seq, i): + i = int(i) + if 0 <= i < len(seq): + return seq[i] + return None + + +def prim_first(seq): + return seq[0] if seq else None + + +def prim_length(seq): + return len(seq) + + +def prim_list(*args): + return list(args) + + +# Type checking +def prim_is_number(x): + return isinstance(x, (int, float)) + + +def prim_is_string(x): + return isinstance(x, str) + + +def prim_is_list(x): + return isinstance(x, (list, tuple)) + + +def prim_is_dict(x): + return isinstance(x, dict) + + +def prim_is_nil(x): + return x is None + + +# Higher-order / iteration +def prim_reduce(seq, init, fn): + """(reduce seq init fn) — fold left: fn(fn(fn(init, s0), s1), s2) ...""" + acc = init + for item in seq: + acc = fn(acc, item) + return acc + + +def prim_map(seq, fn): + """(map seq fn) — apply fn to each element, return new list.""" + return [fn(item) for item in seq] + + +def prim_range(*args): + """(range end), (range start end), or (range start end step) — integer range.""" + if len(args) == 1: + return list(range(int(args[0]))) + elif len(args) == 2: + return list(range(int(args[0]), int(args[1]))) + elif len(args) >= 3: + return list(range(int(args[0]), int(args[1]), int(args[2]))) + return [] + + +# Random +import random +_rng = random.Random() + +def prim_rand(): + """Return random float in [0, 1).""" + return _rng.random() + +def prim_rand_int(lo, hi): + """Return random integer in [lo, hi].""" + return _rng.randint(int(lo), int(hi)) + +def prim_rand_range(lo, hi): + """Return random float in [lo, hi).""" + return lo + _rng.random() * (hi - lo) + +def prim_map_range(val, from_lo, from_hi, to_lo, to_hi): + """Map value from one range to another.""" + if from_hi == from_lo: + return to_lo + t = (val - from_lo) / (from_hi - from_lo) + return to_lo + t * (to_hi - to_lo) + + +# Core primitives dict +PRIMITIVES = { + # Arithmetic + '+': prim_add, + '-': prim_sub, + '*': prim_mul, + '/': prim_div, + 'mod': prim_mod, + 'abs': prim_abs, + 'min': prim_min, + 'max': prim_max, + 'round': prim_round, + 'floor': prim_floor, + 'ceil': prim_ceil, + + # Comparison + '<': prim_lt, + '>': prim_gt, + '<=': prim_le, + '>=': prim_ge, + '=': prim_eq, + '!=': prim_ne, + + # Logic + 'not': prim_not, + 'and': prim_and, + 'or': prim_or, + + # Data access + 'get': prim_get, + 'nth': prim_nth, + 'first': prim_first, + 'length': prim_length, + 'len': prim_length, + 'list': prim_list, + + # Type predicates + 'number?': prim_is_number, + 'string?': prim_is_string, + 'list?': prim_is_list, + 'dict?': prim_is_dict, + 'nil?': prim_is_nil, + 'is-nil': prim_is_nil, + + # Higher-order / iteration + 'reduce': prim_reduce, + 'fold': prim_reduce, + 'map': prim_map, + 'range': prim_range, + + # Random + 'rand': prim_rand, + 'rand-int': prim_rand_int, + 'rand-range': prim_rand_range, + 'map-range': prim_map_range, +} diff --git a/sexp_effects/primitive_libs/drawing.py b/sexp_effects/primitive_libs/drawing.py new file mode 100644 index 0000000..ddd1a01 --- /dev/null +++ b/sexp_effects/primitive_libs/drawing.py @@ -0,0 +1,136 @@ +""" +Drawing Primitives Library + +Draw shapes, text, and characters on images. +""" +import numpy as np +import cv2 +from PIL import Image, ImageDraw, ImageFont + + +# Default font (will be loaded lazily) +_default_font = None + + +def _get_default_font(size=16): + """Get default font, creating if needed.""" + global _default_font + if _default_font is None or _default_font.size != size: + try: + _default_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size) + except: + _default_font = ImageFont.load_default() + return _default_font + + +def prim_draw_char(img, char, x, y, font_size=16, color=None): + """Draw a single character at (x, y).""" + if color is None: + color = [255, 255, 255] + + pil_img = Image.fromarray(img) + draw = ImageDraw.Draw(pil_img) + font = _get_default_font(font_size) + draw.text((x, y), char, fill=tuple(color), font=font) + return np.array(pil_img) + + +def prim_draw_text(img, text, x, y, font_size=16, color=None): + """Draw text string at (x, y).""" + if color is None: + color = [255, 255, 255] + + pil_img = Image.fromarray(img) + draw = ImageDraw.Draw(pil_img) + font = _get_default_font(font_size) + draw.text((x, y), text, fill=tuple(color), font=font) + return np.array(pil_img) + + +def prim_fill_rect(img, x, y, w, h, color=None): + """Fill a rectangle with color.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + x, y, w, h = int(x), int(y), int(w), int(h) + result[y:y+h, x:x+w] = color + return result + + +def prim_draw_rect(img, x, y, w, h, color=None, thickness=1): + """Draw rectangle outline.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + cv2.rectangle(result, (int(x), int(y)), (int(x+w), int(y+h)), + tuple(color), thickness) + return result + + +def prim_draw_line(img, x1, y1, x2, y2, color=None, thickness=1): + """Draw a line from (x1, y1) to (x2, y2).""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)), + tuple(color), thickness) + return result + + +def prim_draw_circle(img, cx, cy, radius, color=None, thickness=1, fill=False): + """Draw a circle.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + t = -1 if fill else thickness + cv2.circle(result, (int(cx), int(cy)), int(radius), tuple(color), t) + return result + + +def prim_draw_ellipse(img, cx, cy, rx, ry, angle=0, color=None, thickness=1, fill=False): + """Draw an ellipse.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + t = -1 if fill else thickness + cv2.ellipse(result, (int(cx), int(cy)), (int(rx), int(ry)), + angle, 0, 360, tuple(color), t) + return result + + +def prim_draw_polygon(img, points, color=None, thickness=1, fill=False): + """Draw a polygon from list of [x, y] points.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + pts = np.array(points, dtype=np.int32).reshape((-1, 1, 2)) + + if fill: + cv2.fillPoly(result, [pts], tuple(color)) + else: + cv2.polylines(result, [pts], True, tuple(color), thickness) + + return result + + +PRIMITIVES = { + # Text + 'draw-char': prim_draw_char, + 'draw-text': prim_draw_text, + + # Rectangles + 'fill-rect': prim_fill_rect, + 'draw-rect': prim_draw_rect, + + # Lines and shapes + 'draw-line': prim_draw_line, + 'draw-circle': prim_draw_circle, + 'draw-ellipse': prim_draw_ellipse, + 'draw-polygon': prim_draw_polygon, +} diff --git a/sexp_effects/primitive_libs/filters.py b/sexp_effects/primitive_libs/filters.py new file mode 100644 index 0000000..a66f107 --- /dev/null +++ b/sexp_effects/primitive_libs/filters.py @@ -0,0 +1,119 @@ +""" +Filters Primitives Library + +Image filters: blur, sharpen, edges, convolution. +""" +import numpy as np +import cv2 + + +def prim_blur(img, radius): + """Gaussian blur with given radius.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.GaussianBlur(img, (ksize, ksize), 0) + + +def prim_box_blur(img, radius): + """Box blur with given radius.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.blur(img, (ksize, ksize)) + + +def prim_median_blur(img, radius): + """Median blur (good for noise removal).""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.medianBlur(img, ksize) + + +def prim_bilateral(img, d=9, sigma_color=75, sigma_space=75): + """Bilateral filter (edge-preserving blur).""" + return cv2.bilateralFilter(img, d, sigma_color, sigma_space) + + +def prim_sharpen(img, amount=1.0): + """Sharpen image using unsharp mask.""" + blurred = cv2.GaussianBlur(img, (0, 0), 3) + return cv2.addWeighted(img, 1.0 + amount, blurred, -amount, 0) + + +def prim_edges(img, low=50, high=150): + """Canny edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + edges = cv2.Canny(gray, low, high) + return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) + + +def prim_sobel(img, ksize=3): + """Sobel edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize) + sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize) + mag = np.sqrt(sobelx**2 + sobely**2) + mag = np.clip(mag, 0, 255).astype(np.uint8) + return cv2.cvtColor(mag, cv2.COLOR_GRAY2RGB) + + +def prim_laplacian(img, ksize=3): + """Laplacian edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + lap = cv2.Laplacian(gray, cv2.CV_64F, ksize=ksize) + lap = np.abs(lap) + lap = np.clip(lap, 0, 255).astype(np.uint8) + return cv2.cvtColor(lap, cv2.COLOR_GRAY2RGB) + + +def prim_emboss(img): + """Emboss effect.""" + kernel = np.array([[-2, -1, 0], + [-1, 1, 1], + [ 0, 1, 2]]) + result = cv2.filter2D(img, -1, kernel) + return np.clip(result + 128, 0, 255).astype(np.uint8) + + +def prim_dilate(img, size=1): + """Morphological dilation.""" + kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8) + return cv2.dilate(img, kernel) + + +def prim_erode(img, size=1): + """Morphological erosion.""" + kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8) + return cv2.erode(img, kernel) + + +def prim_convolve(img, kernel): + """Apply custom convolution kernel.""" + kernel = np.array(kernel, dtype=np.float32) + return cv2.filter2D(img, -1, kernel) + + +PRIMITIVES = { + # Blur + 'blur': prim_blur, + 'box-blur': prim_box_blur, + 'median-blur': prim_median_blur, + 'bilateral': prim_bilateral, + + # Sharpen + 'sharpen': prim_sharpen, + + # Edges + 'edges': prim_edges, + 'sobel': prim_sobel, + 'laplacian': prim_laplacian, + + # Effects + 'emboss': prim_emboss, + + # Morphology + 'dilate': prim_dilate, + 'erode': prim_erode, + + # Custom + 'convolve': prim_convolve, +} diff --git a/sexp_effects/primitive_libs/geometry.py b/sexp_effects/primitive_libs/geometry.py new file mode 100644 index 0000000..5b385a4 --- /dev/null +++ b/sexp_effects/primitive_libs/geometry.py @@ -0,0 +1,143 @@ +""" +Geometry Primitives Library + +Geometric transforms: rotate, scale, flip, translate, remap. +""" +import numpy as np +import cv2 + + +def prim_translate(img, dx, dy): + """Translate image by (dx, dy) pixels.""" + h, w = img.shape[:2] + M = np.float32([[1, 0, dx], [0, 1, dy]]) + return cv2.warpAffine(img, M, (w, h)) + + +def prim_rotate(img, angle, cx=None, cy=None): + """Rotate image by angle degrees around center (cx, cy).""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0) + return cv2.warpAffine(img, M, (w, h)) + + +def prim_scale(img, sx, sy, cx=None, cy=None): + """Scale image by (sx, sy) around center (cx, cy).""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + # Build transform matrix + M = np.float32([ + [sx, 0, cx * (1 - sx)], + [0, sy, cy * (1 - sy)] + ]) + return cv2.warpAffine(img, M, (w, h)) + + +def prim_flip_h(img): + """Flip image horizontally.""" + return cv2.flip(img, 1) + + +def prim_flip_v(img): + """Flip image vertically.""" + return cv2.flip(img, 0) + + +def prim_flip(img, direction="horizontal"): + """Flip image in given direction.""" + if direction in ("horizontal", "h"): + return prim_flip_h(img) + elif direction in ("vertical", "v"): + return prim_flip_v(img) + elif direction in ("both", "hv", "vh"): + return cv2.flip(img, -1) + return img + + +def prim_transpose(img): + """Transpose image (swap x and y).""" + return np.transpose(img, (1, 0, 2)) + + +def prim_remap(img, map_x, map_y): + """Remap image using coordinate maps.""" + return cv2.remap(img, map_x.astype(np.float32), + map_y.astype(np.float32), + cv2.INTER_LINEAR) + + +def prim_make_coords(w, h): + """Create coordinate grids for remapping.""" + x = np.arange(w, dtype=np.float32) + y = np.arange(h, dtype=np.float32) + map_x, map_y = np.meshgrid(x, y) + return (map_x, map_y) + + +def prim_perspective(img, src_pts, dst_pts): + """Apply perspective transform.""" + src = np.float32(src_pts) + dst = np.float32(dst_pts) + M = cv2.getPerspectiveTransform(src, dst) + h, w = img.shape[:2] + return cv2.warpPerspective(img, M, (w, h)) + + +def prim_affine(img, src_pts, dst_pts): + """Apply affine transform using 3 point pairs.""" + src = np.float32(src_pts) + dst = np.float32(dst_pts) + M = cv2.getAffineTransform(src, dst) + h, w = img.shape[:2] + return cv2.warpAffine(img, M, (w, h)) + + +def _get_legacy_geometry_primitives(): + """Import geometry primitives from legacy primitives module.""" + from sexp_effects.primitives import ( + prim_coords_x, + prim_coords_y, + prim_ripple_displace, + prim_fisheye_displace, + prim_kaleidoscope_displace, + ) + return { + 'coords-x': prim_coords_x, + 'coords-y': prim_coords_y, + 'ripple-displace': prim_ripple_displace, + 'fisheye-displace': prim_fisheye_displace, + 'kaleidoscope-displace': prim_kaleidoscope_displace, + } + + +PRIMITIVES = { + # Basic transforms + 'translate': prim_translate, + 'rotate-img': prim_rotate, + 'scale-img': prim_scale, + + # Flips + 'flip-h': prim_flip_h, + 'flip-v': prim_flip_v, + 'flip': prim_flip, + 'transpose': prim_transpose, + + # Remapping + 'remap': prim_remap, + 'make-coords': prim_make_coords, + + # Advanced transforms + 'perspective': prim_perspective, + 'affine': prim_affine, + + # Displace / coordinate ops (from legacy primitives) + **_get_legacy_geometry_primitives(), +} diff --git a/sexp_effects/primitive_libs/image.py b/sexp_effects/primitive_libs/image.py new file mode 100644 index 0000000..beae3ce --- /dev/null +++ b/sexp_effects/primitive_libs/image.py @@ -0,0 +1,144 @@ +""" +Image Primitives Library + +Basic image operations: dimensions, pixels, resize, crop, paste. +""" +import numpy as np +import cv2 + + +def prim_width(img): + return img.shape[1] + + +def prim_height(img): + return img.shape[0] + + +def prim_make_image(w, h, color=None): + """Create a new image filled with color (default black).""" + if color is None: + color = [0, 0, 0] + img = np.zeros((h, w, 3), dtype=np.uint8) + img[:] = color + return img + + +def prim_copy(img): + return img.copy() + + +def prim_pixel(img, x, y): + """Get pixel color at (x, y) as [r, g, b].""" + h, w = img.shape[:2] + if 0 <= x < w and 0 <= y < h: + return list(img[int(y), int(x)]) + return [0, 0, 0] + + +def prim_set_pixel(img, x, y, color): + """Set pixel at (x, y) to color, returns modified image.""" + result = img.copy() + h, w = result.shape[:2] + if 0 <= x < w and 0 <= y < h: + result[int(y), int(x)] = color + return result + + +def prim_sample(img, x, y): + """Bilinear sample at float coordinates, returns [r, g, b] as floats.""" + h, w = img.shape[:2] + x = max(0, min(w - 1.001, x)) + y = max(0, min(h - 1.001, y)) + + x0, y0 = int(x), int(y) + x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1) + fx, fy = x - x0, y - y0 + + c00 = img[y0, x0].astype(float) + c10 = img[y0, x1].astype(float) + c01 = img[y1, x0].astype(float) + c11 = img[y1, x1].astype(float) + + top = c00 * (1 - fx) + c10 * fx + bottom = c01 * (1 - fx) + c11 * fx + return list(top * (1 - fy) + bottom * fy) + + +def prim_channel(img, c): + """Extract single channel (0=R, 1=G, 2=B).""" + return img[:, :, c] + + +def prim_merge_channels(r, g, b): + """Merge three single-channel arrays into RGB image.""" + return np.stack([r, g, b], axis=2).astype(np.uint8) + + +def prim_resize(img, w, h, mode="linear"): + """Resize image to w x h.""" + interp = cv2.INTER_LINEAR + if mode == "nearest": + interp = cv2.INTER_NEAREST + elif mode == "cubic": + interp = cv2.INTER_CUBIC + elif mode == "area": + interp = cv2.INTER_AREA + return cv2.resize(img, (int(w), int(h)), interpolation=interp) + + +def prim_crop(img, x, y, w, h): + """Crop rectangle from image.""" + x, y, w, h = int(x), int(y), int(w), int(h) + ih, iw = img.shape[:2] + x = max(0, min(x, iw - 1)) + y = max(0, min(y, ih - 1)) + w = min(w, iw - x) + h = min(h, ih - y) + return img[y:y+h, x:x+w].copy() + + +def prim_paste(dst, src, x, y): + """Paste src onto dst at position (x, y).""" + result = dst.copy() + x, y = int(x), int(y) + sh, sw = src.shape[:2] + dh, dw = dst.shape[:2] + + # Clip to bounds + sx1 = max(0, -x) + sy1 = max(0, -y) + dx1 = max(0, x) + dy1 = max(0, y) + sx2 = min(sw, dw - x) + sy2 = min(sh, dh - y) + + if sx2 > sx1 and sy2 > sy1: + result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = src[sy1:sy2, sx1:sx2] + + return result + + +PRIMITIVES = { + # Dimensions + 'width': prim_width, + 'height': prim_height, + + # Creation + 'make-image': prim_make_image, + 'copy': prim_copy, + + # Pixel access + 'pixel': prim_pixel, + 'set-pixel': prim_set_pixel, + 'sample': prim_sample, + + # Channels + 'channel': prim_channel, + 'merge-channels': prim_merge_channels, + + # Geometry + 'resize': prim_resize, + 'crop': prim_crop, + 'paste': prim_paste, +} diff --git a/sexp_effects/primitive_libs/math.py b/sexp_effects/primitive_libs/math.py new file mode 100644 index 0000000..140ad3e --- /dev/null +++ b/sexp_effects/primitive_libs/math.py @@ -0,0 +1,164 @@ +""" +Math Primitives Library + +Trigonometry, rounding, clamping, random numbers, etc. +""" +import math +import random as rand_module + + +def prim_sin(x): + return math.sin(x) + + +def prim_cos(x): + return math.cos(x) + + +def prim_tan(x): + return math.tan(x) + + +def prim_asin(x): + return math.asin(x) + + +def prim_acos(x): + return math.acos(x) + + +def prim_atan(x): + return math.atan(x) + + +def prim_atan2(y, x): + return math.atan2(y, x) + + +def prim_sqrt(x): + return math.sqrt(x) + + +def prim_pow(x, y): + return math.pow(x, y) + + +def prim_exp(x): + return math.exp(x) + + +def prim_log(x, base=None): + if base is None: + return math.log(x) + return math.log(x, base) + + +def prim_abs(x): + return abs(x) + + +def prim_floor(x): + return math.floor(x) + + +def prim_ceil(x): + return math.ceil(x) + + +def prim_round(x): + return round(x) + + +def prim_min(*args): + if len(args) == 1 and hasattr(args[0], '__iter__'): + return min(args[0]) + return min(args) + + +def prim_max(*args): + if len(args) == 1 and hasattr(args[0], '__iter__'): + return max(args[0]) + return max(args) + + +def prim_clamp(x, lo, hi): + return max(lo, min(hi, x)) + + +def prim_lerp(a, b, t): + """Linear interpolation: a + (b - a) * t""" + return a + (b - a) * t + + +def prim_smoothstep(edge0, edge1, x): + """Smooth interpolation between 0 and 1.""" + t = prim_clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0) + return t * t * (3 - 2 * t) + + +def prim_random(lo=0.0, hi=1.0): + return rand_module.uniform(lo, hi) + + +def prim_randint(lo, hi): + return rand_module.randint(lo, hi) + + +def prim_gaussian(mean=0.0, std=1.0): + return rand_module.gauss(mean, std) + + +def prim_sign(x): + if x > 0: + return 1 + elif x < 0: + return -1 + return 0 + + +def prim_fract(x): + """Fractional part of x.""" + return x - math.floor(x) + + +PRIMITIVES = { + # Trigonometry + 'sin': prim_sin, + 'cos': prim_cos, + 'tan': prim_tan, + 'asin': prim_asin, + 'acos': prim_acos, + 'atan': prim_atan, + 'atan2': prim_atan2, + + # Powers and roots + 'sqrt': prim_sqrt, + 'pow': prim_pow, + 'exp': prim_exp, + 'log': prim_log, + + # Rounding + 'abs': prim_abs, + 'floor': prim_floor, + 'ceil': prim_ceil, + 'round': prim_round, + 'sign': prim_sign, + 'fract': prim_fract, + + # Min/max/clamp + 'min': prim_min, + 'max': prim_max, + 'clamp': prim_clamp, + 'lerp': prim_lerp, + 'smoothstep': prim_smoothstep, + + # Random + 'random': prim_random, + 'randint': prim_randint, + 'gaussian': prim_gaussian, + + # Constants + 'pi': math.pi, + 'tau': math.tau, + 'e': math.e, +} diff --git a/sexp_effects/primitive_libs/streaming.py b/sexp_effects/primitive_libs/streaming.py new file mode 100644 index 0000000..9092087 --- /dev/null +++ b/sexp_effects/primitive_libs/streaming.py @@ -0,0 +1,462 @@ +""" +Streaming primitives for video/audio processing. + +These primitives handle video source reading and audio analysis, +keeping the interpreter completely generic. + +GPU Acceleration: +- Set STREAMING_GPU_PERSIST=1 to output CuPy arrays (frames stay on GPU) +- Hardware video decoding (NVDEC) is used when available +- Dramatically improves performance on GPU nodes +""" + +import os +import numpy as np +import subprocess +import json +from pathlib import Path + +# Try to import CuPy for GPU acceleration +try: + import cupy as cp + CUPY_AVAILABLE = True +except ImportError: + cp = None + CUPY_AVAILABLE = False + +# GPU persistence mode - output CuPy arrays instead of numpy +# Disabled by default until all primitives support GPU frames +GPU_PERSIST = os.environ.get("STREAMING_GPU_PERSIST", "0") == "1" and CUPY_AVAILABLE + +# Check for hardware decode support (cached) +_HWDEC_AVAILABLE = None + + +def _check_hwdec(): + """Check if NVIDIA hardware decode is available.""" + global _HWDEC_AVAILABLE + if _HWDEC_AVAILABLE is not None: + return _HWDEC_AVAILABLE + + try: + result = subprocess.run(["nvidia-smi"], capture_output=True, timeout=2) + if result.returncode != 0: + _HWDEC_AVAILABLE = False + return False + result = subprocess.run(["ffmpeg", "-hwaccels"], capture_output=True, text=True, timeout=5) + _HWDEC_AVAILABLE = "cuda" in result.stdout + except Exception: + _HWDEC_AVAILABLE = False + + return _HWDEC_AVAILABLE + + +class VideoSource: + """Video source with persistent streaming pipe for fast sequential reads.""" + + def __init__(self, path: str, fps: float = 30): + self.path = Path(path) + self.fps = fps # Output fps for the stream + self._frame_size = None + self._duration = None + self._proc = None # Persistent ffmpeg process + self._stream_time = 0.0 # Current position in stream + self._frame_time = 1.0 / fps # Time per frame at output fps + self._last_read_time = -1 + self._cached_frame = None + + # Check if file exists + if not self.path.exists(): + raise FileNotFoundError(f"Video file not found: {self.path}") + + # Get video info + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(self.path)] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"Failed to probe video '{self.path}': {result.stderr}") + try: + info = json.loads(result.stdout) + except json.JSONDecodeError: + raise RuntimeError(f"Invalid video file or ffprobe failed: {self.path}") + + for stream in info.get("streams", []): + if stream.get("codec_type") == "video": + self._frame_size = (stream.get("width", 720), stream.get("height", 720)) + # Try direct duration field first + if "duration" in stream: + self._duration = float(stream["duration"]) + # Fall back to tags.DURATION (webm format: "00:01:00.124000000") + elif "tags" in stream and "DURATION" in stream["tags"]: + dur_str = stream["tags"]["DURATION"] + parts = dur_str.split(":") + if len(parts) == 3: + h, m, s = parts + self._duration = int(h) * 3600 + int(m) * 60 + float(s) + break + + # Fallback: check format duration if stream duration not found + if self._duration is None and "format" in info and "duration" in info["format"]: + self._duration = float(info["format"]["duration"]) + + if not self._frame_size: + self._frame_size = (720, 720) + + import sys + print(f"VideoSource: {self.path.name} duration={self._duration} size={self._frame_size}", file=sys.stderr) + + def _start_stream(self, seek_time: float = 0): + """Start or restart the ffmpeg streaming process. + + Uses NVIDIA hardware decoding (NVDEC) when available for better performance. + """ + if self._proc: + self._proc.kill() + self._proc = None + + # Check file exists before trying to open + if not self.path.exists(): + raise FileNotFoundError(f"Video file not found: {self.path}") + + w, h = self._frame_size + + # Build ffmpeg command with optional hardware decode + cmd = ["ffmpeg", "-v", "error"] + + # Use hardware decode if available (significantly faster) + if _check_hwdec(): + cmd.extend(["-hwaccel", "cuda"]) + + cmd.extend([ + "-ss", f"{seek_time:.3f}", + "-i", str(self.path), + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{w}x{h}", + "-r", str(self.fps), # Output at specified fps + "-" + ]) + + self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + self._stream_time = seek_time + + # Check if process started successfully by reading first bit of stderr + import select + import sys + readable, _, _ = select.select([self._proc.stderr], [], [], 0.5) + if readable: + err = self._proc.stderr.read(4096).decode('utf-8', errors='ignore') + if err: + print(f"ffmpeg error for {self.path.name}: {err}", file=sys.stderr) + + def _read_frame_from_stream(self): + """Read one frame from the stream. + + Returns CuPy array if GPU_PERSIST is enabled, numpy array otherwise. + """ + w, h = self._frame_size + frame_size = w * h * 3 + + if not self._proc or self._proc.poll() is not None: + return None + + data = self._proc.stdout.read(frame_size) + if len(data) < frame_size: + return None + + frame = np.frombuffer(data, dtype=np.uint8).reshape((h, w, 3)).copy() + + # Transfer to GPU if persistence mode enabled + if GPU_PERSIST: + return cp.asarray(frame) + return frame + + def read(self) -> np.ndarray: + """Read frame (uses last cached or t=0).""" + if self._cached_frame is not None: + return self._cached_frame + return self.read_at(0) + + def read_at(self, t: float) -> np.ndarray: + """Read frame at specific time using streaming with smart seeking.""" + # Cache check - return same frame for same time + if t == self._last_read_time and self._cached_frame is not None: + return self._cached_frame + + w, h = self._frame_size + + # Loop time if video is shorter + seek_time = t + if self._duration and self._duration > 0: + seek_time = t % self._duration + # If we're within 0.1s of the end, wrap to beginning to avoid EOF issues + if seek_time > self._duration - 0.1: + seek_time = 0.0 + + # Decide whether to seek or continue streaming + # Seek if: no stream, going backwards (more than 1 frame), or jumping more than 2 seconds ahead + # Allow small backward tolerance to handle floating point and timing jitter + need_seek = ( + self._proc is None or + self._proc.poll() is not None or + seek_time < self._stream_time - self._frame_time or # More than 1 frame backward + seek_time > self._stream_time + 2.0 + ) + + if need_seek: + import sys + reason = "no proc" if self._proc is None else "proc dead" if self._proc.poll() is not None else "backward" if seek_time < self._stream_time else "jump" + print(f"SEEK {self.path.name}: t={t:.4f} seek={seek_time:.4f} stream={self._stream_time:.4f} ({reason})", file=sys.stderr) + self._start_stream(seek_time) + + # Skip frames to reach target time + skip_retries = 0 + while self._stream_time + self._frame_time <= seek_time: + frame = self._read_frame_from_stream() + if frame is None: + # Stream ended or failed - restart from seek point + import time + skip_retries += 1 + if skip_retries > 3: + # Give up skipping, just start fresh at seek_time + self._start_stream(seek_time) + time.sleep(0.1) + break + self._start_stream(seek_time) + time.sleep(0.05) + continue + self._stream_time += self._frame_time + skip_retries = 0 # Reset on successful read + + # Read the target frame with retry logic + frame = None + max_retries = 3 + for attempt in range(max_retries): + frame = self._read_frame_from_stream() + if frame is not None: + break + + # Stream failed - try restarting + import sys + import time + print(f"RETRY {self.path.name}: attempt {attempt+1}/{max_retries} at t={t:.2f}", file=sys.stderr) + + # Check for ffmpeg errors + if self._proc and self._proc.stderr: + try: + import select + readable, _, _ = select.select([self._proc.stderr], [], [], 0.1) + if readable: + err = self._proc.stderr.read(4096).decode('utf-8', errors='ignore') + if err: + print(f"ffmpeg error: {err}", file=sys.stderr) + except: + pass + + # Wait a bit and restart + time.sleep(0.1) + self._start_stream(seek_time) + + # Give ffmpeg time to start + time.sleep(0.1) + + if frame is None: + import sys + raise RuntimeError(f"Failed to read video frame from {self.path.name} at t={t:.2f} after {max_retries} retries") + else: + self._stream_time += self._frame_time + + self._last_read_time = t + self._cached_frame = frame + return frame + + def skip(self): + """No-op for seek-based reading.""" + pass + + @property + def size(self): + return self._frame_size + + def close(self): + if self._proc: + self._proc.kill() + self._proc = None + + +class AudioAnalyzer: + """Audio analyzer for energy and beat detection.""" + + def __init__(self, path: str, sample_rate: int = 22050): + self.path = Path(path) + self.sample_rate = sample_rate + + # Check if file exists + if not self.path.exists(): + raise FileNotFoundError(f"Audio file not found: {self.path}") + + # Load audio via ffmpeg + cmd = ["ffmpeg", "-v", "error", "-i", str(self.path), + "-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"] + result = subprocess.run(cmd, capture_output=True) + if result.returncode != 0: + raise RuntimeError(f"Failed to load audio '{self.path}': {result.stderr.decode()}") + self._audio = np.frombuffer(result.stdout, dtype=np.float32) + if len(self._audio) == 0: + raise RuntimeError(f"Audio file is empty or invalid: {self.path}") + + # Get duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(self.path)] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + raise RuntimeError(f"Failed to probe audio '{self.path}': {result.stderr}") + info = json.loads(result.stdout) + self.duration = float(info.get("format", {}).get("duration", 60)) + + # Beat detection state + self._flux_history = [] + self._last_beat_time = -1 + self._beat_count = 0 + self._last_beat_check_time = -1 + # Cache beat result for current time (so multiple scans see same result) + self._beat_cache_time = -1 + self._beat_cache_result = False + + def get_energy(self, t: float) -> float: + """Get energy level at time t (0-1).""" + idx = int(t * self.sample_rate) + start = max(0, idx - 512) + end = min(len(self._audio), idx + 512) + if start >= end: + return 0.0 + return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0) + + def get_beat(self, t: float) -> bool: + """Check if there's a beat at time t.""" + # Return cached result if same time (multiple scans query same frame) + if t == self._beat_cache_time: + return self._beat_cache_result + + idx = int(t * self.sample_rate) + size = 2048 + + start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2) + if end - start < size/2: + self._beat_cache_time = t + self._beat_cache_result = False + return False + curr = self._audio[start:end] + + pstart, pend = max(0, start - 512), max(0, end - 512) + if pend <= pstart: + self._beat_cache_time = t + self._beat_cache_result = False + return False + prev = self._audio[pstart:pend] + + curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr)))) + prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev)))) + + n = min(len(curr_spec), len(prev_spec)) + flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1) + + self._flux_history.append((t, flux)) + if len(self._flux_history) > 50: + self._flux_history = self._flux_history[-50:] + + if len(self._flux_history) < 5: + self._beat_cache_time = t + self._beat_cache_result = False + return False + + recent = [f for _, f in self._flux_history[-20:]] + threshold = np.mean(recent) + 1.5 * np.std(recent) + + is_beat = flux > threshold and (t - self._last_beat_time) > 0.1 + if is_beat: + self._last_beat_time = t + if t > self._last_beat_check_time: + self._beat_count += 1 + self._last_beat_check_time = t + + # Cache result for this time + self._beat_cache_time = t + self._beat_cache_result = is_beat + return is_beat + + def get_beat_count(self, t: float) -> int: + """Get cumulative beat count up to time t.""" + # Ensure beat detection has run up to this time + self.get_beat(t) + return self._beat_count + + +# === Primitives === + +def prim_make_video_source(path: str, fps: float = 30): + """Create a video source from a file path.""" + return VideoSource(path, fps) + + +def prim_source_read(source: VideoSource, t: float = None): + """Read a frame from a video source.""" + import sys + if t is not None: + frame = source.read_at(t) + # Debug: show source and time + if int(t * 10) % 10 == 0: # Every second + print(f"READ {source.path.name}: t={t:.2f} stream={source._stream_time:.2f}", file=sys.stderr) + return frame + return source.read() + + +def prim_source_skip(source: VideoSource): + """Skip a frame (keep pipe in sync).""" + source.skip() + + +def prim_source_size(source: VideoSource): + """Get (width, height) of source.""" + return source.size + + +def prim_make_audio_analyzer(path: str): + """Create an audio analyzer from a file path.""" + return AudioAnalyzer(path) + + +def prim_audio_energy(analyzer: AudioAnalyzer, t: float) -> float: + """Get energy level (0-1) at time t.""" + return analyzer.get_energy(t) + + +def prim_audio_beat(analyzer: AudioAnalyzer, t: float) -> bool: + """Check if there's a beat at time t.""" + return analyzer.get_beat(t) + + +def prim_audio_beat_count(analyzer: AudioAnalyzer, t: float) -> int: + """Get cumulative beat count up to time t.""" + return analyzer.get_beat_count(t) + + +def prim_audio_duration(analyzer: AudioAnalyzer) -> float: + """Get audio duration in seconds.""" + return analyzer.duration + + +# Export primitives +PRIMITIVES = { + # Video source + 'make-video-source': prim_make_video_source, + 'source-read': prim_source_read, + 'source-skip': prim_source_skip, + 'source-size': prim_source_size, + + # Audio analyzer + 'make-audio-analyzer': prim_make_audio_analyzer, + 'audio-energy': prim_audio_energy, + 'audio-beat': prim_audio_beat, + 'audio-beat-count': prim_audio_beat_count, + 'audio-duration': prim_audio_duration, +} diff --git a/sexp_effects/primitives.py b/sexp_effects/primitives.py new file mode 100644 index 0000000..8bdca5c --- /dev/null +++ b/sexp_effects/primitives.py @@ -0,0 +1,3043 @@ +""" +Safe Primitives for S-Expression Effects + +These are the building blocks that user-defined effects can use. +All primitives operate only on image data - no filesystem, network, etc. +""" + +import numpy as np +import cv2 +from typing import Any, Callable, Dict, List, Tuple, Optional +from dataclasses import dataclass +import math + + +@dataclass +class ZoneContext: + """Context for a single cell/zone in ASCII art grid.""" + row: int + col: int + row_norm: float # Normalized row position 0-1 + col_norm: float # Normalized col position 0-1 + luminance: float # Cell luminance 0-1 + saturation: float # Cell saturation 0-1 + hue: float # Cell hue 0-360 + r: float # Red component 0-1 + g: float # Green component 0-1 + b: float # Blue component 0-1 + + +class DeterministicRNG: + """Seeded RNG for reproducible effects.""" + + def __init__(self, seed: int = 42): + self._rng = np.random.RandomState(seed) + + def random(self, low: float = 0, high: float = 1) -> float: + return self._rng.uniform(low, high) + + def randint(self, low: int, high: int) -> int: + return self._rng.randint(low, high + 1) + + def gaussian(self, mean: float = 0, std: float = 1) -> float: + return self._rng.normal(mean, std) + + +# Global RNG instance (reset per frame with seed param) +_rng = DeterministicRNG() + + +def reset_rng(seed: int): + """Reset the global RNG with a new seed.""" + global _rng + _rng = DeterministicRNG(seed) + + +# ============================================================================= +# Color Names (FFmpeg/X11 compatible) +# ============================================================================= + +NAMED_COLORS = { + # Basic colors + "black": (0, 0, 0), + "white": (255, 255, 255), + "red": (255, 0, 0), + "green": (0, 128, 0), + "blue": (0, 0, 255), + "yellow": (255, 255, 0), + "cyan": (0, 255, 255), + "magenta": (255, 0, 255), + + # Grays + "gray": (128, 128, 128), + "grey": (128, 128, 128), + "darkgray": (169, 169, 169), + "darkgrey": (169, 169, 169), + "lightgray": (211, 211, 211), + "lightgrey": (211, 211, 211), + "dimgray": (105, 105, 105), + "dimgrey": (105, 105, 105), + "silver": (192, 192, 192), + + # Reds + "darkred": (139, 0, 0), + "firebrick": (178, 34, 34), + "crimson": (220, 20, 60), + "indianred": (205, 92, 92), + "lightcoral": (240, 128, 128), + "salmon": (250, 128, 114), + "darksalmon": (233, 150, 122), + "lightsalmon": (255, 160, 122), + "tomato": (255, 99, 71), + "orangered": (255, 69, 0), + "coral": (255, 127, 80), + + # Oranges + "orange": (255, 165, 0), + "darkorange": (255, 140, 0), + + # Yellows + "gold": (255, 215, 0), + "lightyellow": (255, 255, 224), + "lemonchiffon": (255, 250, 205), + "papayawhip": (255, 239, 213), + "moccasin": (255, 228, 181), + "peachpuff": (255, 218, 185), + "palegoldenrod": (238, 232, 170), + "khaki": (240, 230, 140), + "darkkhaki": (189, 183, 107), + + # Greens + "lime": (0, 255, 0), + "limegreen": (50, 205, 50), + "forestgreen": (34, 139, 34), + "darkgreen": (0, 100, 0), + "seagreen": (46, 139, 87), + "mediumseagreen": (60, 179, 113), + "springgreen": (0, 255, 127), + "mediumspringgreen": (0, 250, 154), + "lightgreen": (144, 238, 144), + "palegreen": (152, 251, 152), + "darkseagreen": (143, 188, 143), + "greenyellow": (173, 255, 47), + "chartreuse": (127, 255, 0), + "lawngreen": (124, 252, 0), + "olivedrab": (107, 142, 35), + "olive": (128, 128, 0), + "darkolivegreen": (85, 107, 47), + "yellowgreen": (154, 205, 50), + + # Cyans/Teals + "aqua": (0, 255, 255), + "teal": (0, 128, 128), + "darkcyan": (0, 139, 139), + "lightcyan": (224, 255, 255), + "aquamarine": (127, 255, 212), + "mediumaquamarine": (102, 205, 170), + "paleturquoise": (175, 238, 238), + "turquoise": (64, 224, 208), + "mediumturquoise": (72, 209, 204), + "darkturquoise": (0, 206, 209), + "cadetblue": (95, 158, 160), + + # Blues + "navy": (0, 0, 128), + "darkblue": (0, 0, 139), + "mediumblue": (0, 0, 205), + "royalblue": (65, 105, 225), + "cornflowerblue": (100, 149, 237), + "steelblue": (70, 130, 180), + "dodgerblue": (30, 144, 255), + "deepskyblue": (0, 191, 255), + "lightskyblue": (135, 206, 250), + "skyblue": (135, 206, 235), + "lightsteelblue": (176, 196, 222), + "lightblue": (173, 216, 230), + "powderblue": (176, 224, 230), + "slateblue": (106, 90, 205), + "mediumslateblue": (123, 104, 238), + "darkslateblue": (72, 61, 139), + "midnightblue": (25, 25, 112), + + # Purples/Violets + "purple": (128, 0, 128), + "darkmagenta": (139, 0, 139), + "darkviolet": (148, 0, 211), + "blueviolet": (138, 43, 226), + "darkorchid": (153, 50, 204), + "mediumorchid": (186, 85, 211), + "orchid": (218, 112, 214), + "violet": (238, 130, 238), + "plum": (221, 160, 221), + "thistle": (216, 191, 216), + "lavender": (230, 230, 250), + "indigo": (75, 0, 130), + "mediumpurple": (147, 112, 219), + "fuchsia": (255, 0, 255), + "hotpink": (255, 105, 180), + "deeppink": (255, 20, 147), + "mediumvioletred": (199, 21, 133), + "palevioletred": (219, 112, 147), + + # Pinks + "pink": (255, 192, 203), + "lightpink": (255, 182, 193), + "mistyrose": (255, 228, 225), + + # Browns + "brown": (165, 42, 42), + "maroon": (128, 0, 0), + "saddlebrown": (139, 69, 19), + "sienna": (160, 82, 45), + "chocolate": (210, 105, 30), + "peru": (205, 133, 63), + "sandybrown": (244, 164, 96), + "burlywood": (222, 184, 135), + "tan": (210, 180, 140), + "rosybrown": (188, 143, 143), + "goldenrod": (218, 165, 32), + "darkgoldenrod": (184, 134, 11), + + # Whites + "snow": (255, 250, 250), + "honeydew": (240, 255, 240), + "mintcream": (245, 255, 250), + "azure": (240, 255, 255), + "aliceblue": (240, 248, 255), + "ghostwhite": (248, 248, 255), + "whitesmoke": (245, 245, 245), + "seashell": (255, 245, 238), + "beige": (245, 245, 220), + "oldlace": (253, 245, 230), + "floralwhite": (255, 250, 240), + "ivory": (255, 255, 240), + "antiquewhite": (250, 235, 215), + "linen": (250, 240, 230), + "lavenderblush": (255, 240, 245), + "wheat": (245, 222, 179), + "cornsilk": (255, 248, 220), + "blanchedalmond": (255, 235, 205), + "bisque": (255, 228, 196), + "navajowhite": (255, 222, 173), + + # Special + "transparent": (0, 0, 0), # Note: no alpha support, just black +} + + +def parse_color(color_spec: str) -> Optional[Tuple[int, int, int]]: + """ + Parse a color specification into RGB tuple. + + Supports: + - Named colors: "red", "green", "lime", "navy", etc. + - Hex colors: "#FF0000", "#f00", "0xFF0000" + - Special modes: "color", "mono", "invert" return None (handled separately) + + Returns: + RGB tuple (r, g, b) or None for special modes + """ + if color_spec is None: + return None + + color_spec = str(color_spec).strip().lower() + + # Special modes handled elsewhere + if color_spec in ("color", "mono", "invert"): + return None + + # Check named colors + if color_spec in NAMED_COLORS: + return NAMED_COLORS[color_spec] + + # Handle hex colors + hex_str = None + if color_spec.startswith("#"): + hex_str = color_spec[1:] + elif color_spec.startswith("0x"): + hex_str = color_spec[2:] + elif all(c in "0123456789abcdef" for c in color_spec) and len(color_spec) in (3, 6): + hex_str = color_spec + + if hex_str: + try: + if len(hex_str) == 3: + # Short form: #RGB -> #RRGGBB + r = int(hex_str[0] * 2, 16) + g = int(hex_str[1] * 2, 16) + b = int(hex_str[2] * 2, 16) + return (r, g, b) + elif len(hex_str) == 6: + r = int(hex_str[0:2], 16) + g = int(hex_str[2:4], 16) + b = int(hex_str[4:6], 16) + return (r, g, b) + except ValueError: + pass + + # Unknown color - default to None (will use original colors) + return None + + +# ============================================================================= +# Image Primitives +# ============================================================================= + +def prim_width(img: np.ndarray) -> int: + """Get image width.""" + return img.shape[1] + + +def prim_height(img: np.ndarray) -> int: + """Get image height.""" + return img.shape[0] + + +def prim_make_image(w: int, h: int, color: List[int]) -> np.ndarray: + """Create a new image filled with color.""" + img = np.zeros((int(h), int(w), 3), dtype=np.uint8) + if color: + img[:, :] = color[:3] + return img + + +def prim_copy(img: np.ndarray) -> np.ndarray: + """Copy an image.""" + return img.copy() + + +def prim_pixel(img: np.ndarray, x: int, y: int) -> List[int]: + """Get pixel at (x, y) as [r, g, b].""" + h, w = img.shape[:2] + x, y = int(x), int(y) + if 0 <= x < w and 0 <= y < h: + return list(img[y, x]) + return [0, 0, 0] + + +def prim_set_pixel(img: np.ndarray, x: int, y: int, color: List[int]) -> np.ndarray: + """Set pixel at (x, y). Returns modified image.""" + h, w = img.shape[:2] + x, y = int(x), int(y) + if 0 <= x < w and 0 <= y < h: + img[y, x] = color[:3] + return img + + +def prim_sample(img: np.ndarray, x: float, y: float) -> List[float]: + """Bilinear sample at float coordinates.""" + h, w = img.shape[:2] + x = np.clip(x, 0, w - 1) + y = np.clip(y, 0, h - 1) + + x0, y0 = int(x), int(y) + x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1) + fx, fy = x - x0, y - y0 + + c00 = img[y0, x0].astype(float) + c10 = img[y0, x1].astype(float) + c01 = img[y1, x0].astype(float) + c11 = img[y1, x1].astype(float) + + c = (c00 * (1 - fx) * (1 - fy) + + c10 * fx * (1 - fy) + + c01 * (1 - fx) * fy + + c11 * fx * fy) + + return list(c) + + +def prim_channel(img: np.ndarray, c: int) -> np.ndarray: + """Extract a single channel as 2D array.""" + return img[:, :, int(c)].copy() + + +def prim_merge_channels(r: np.ndarray, g: np.ndarray, b: np.ndarray) -> np.ndarray: + """Merge three channels into RGB image.""" + return np.stack([r, g, b], axis=-1).astype(np.uint8) + + +def prim_resize(img: np.ndarray, w: int, h: int, mode: str = "linear") -> np.ndarray: + """Resize image. Mode: linear, nearest, area.""" + w, h = int(w), int(h) + if w < 1 or h < 1: + return img + interp = { + "linear": cv2.INTER_LINEAR, + "nearest": cv2.INTER_NEAREST, + "area": cv2.INTER_AREA, + }.get(mode, cv2.INTER_LINEAR) + return cv2.resize(img, (w, h), interpolation=interp) + + +def prim_crop(img: np.ndarray, x: int, y: int, w: int, h: int) -> np.ndarray: + """Crop a region from image.""" + ih, iw = img.shape[:2] + x, y, w, h = int(x), int(y), int(w), int(h) + x = max(0, min(x, iw)) + y = max(0, min(y, ih)) + w = max(0, min(w, iw - x)) + h = max(0, min(h, ih - y)) + return img[y:y + h, x:x + w].copy() + + +def prim_paste(dst: np.ndarray, src: np.ndarray, x: int, y: int) -> np.ndarray: + """Paste src onto dst at position (x, y).""" + dh, dw = dst.shape[:2] + sh, sw = src.shape[:2] + x, y = int(x), int(y) + + # Calculate valid regions + sx1 = max(0, -x) + sy1 = max(0, -y) + sx2 = min(sw, dw - x) + sy2 = min(sh, dh - y) + + dx1 = max(0, x) + dy1 = max(0, y) + dx2 = dx1 + (sx2 - sx1) + dy2 = dy1 + (sy2 - sy1) + + if dx2 > dx1 and dy2 > dy1: + dst[dy1:dy2, dx1:dx2] = src[sy1:sy2, sx1:sx2] + + return dst + + +# ============================================================================= +# Color Primitives +# ============================================================================= + +def prim_rgb(r: float, g: float, b: float) -> List[int]: + """Create RGB color.""" + return [int(np.clip(r, 0, 255)), + int(np.clip(g, 0, 255)), + int(np.clip(b, 0, 255))] + + +def prim_red(c: List[int]) -> int: + return c[0] if c else 0 + + +def prim_green(c: List[int]) -> int: + return c[1] if len(c) > 1 else 0 + + +def prim_blue(c: List[int]) -> int: + return c[2] if len(c) > 2 else 0 + + +def prim_luminance(c: List[int]) -> float: + """Calculate luminance (grayscale value).""" + if not c: + return 0 + return 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] + + +def prim_rgb_to_hsv(c: List[int]) -> List[float]: + """Convert RGB to HSV.""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + mx, mn = max(r, g, b), min(r, g, b) + diff = mx - mn + + if diff == 0: + h = 0 + elif mx == r: + h = (60 * ((g - b) / diff) + 360) % 360 + elif mx == g: + h = (60 * ((b - r) / diff) + 120) % 360 + else: + h = (60 * ((r - g) / diff) + 240) % 360 + + s = 0 if mx == 0 else diff / mx + v = mx + + return [h, s * 100, v * 100] + + +def prim_hsv_to_rgb(hsv: List[float]) -> List[int]: + """Convert HSV to RGB.""" + h, s, v = hsv[0], hsv[1] / 100, hsv[2] / 100 + c = v * s + x = c * (1 - abs((h / 60) % 2 - 1)) + m = v - c + + if h < 60: + r, g, b = c, x, 0 + elif h < 120: + r, g, b = x, c, 0 + elif h < 180: + r, g, b = 0, c, x + elif h < 240: + r, g, b = 0, x, c + elif h < 300: + r, g, b = x, 0, c + else: + r, g, b = c, 0, x + + return [int((r + m) * 255), int((g + m) * 255), int((b + m) * 255)] + + +def prim_blend_color(c1: List[int], c2: List[int], alpha: float) -> List[int]: + """Blend two colors.""" + alpha = np.clip(alpha, 0, 1) + return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)] + + +def prim_average_color(img: np.ndarray) -> List[int]: + """Get average color of image/region.""" + return [int(x) for x in img.mean(axis=(0, 1))] + + +# ============================================================================= +# Image Operations (Bulk) +# ============================================================================= + +def prim_map_pixels(img: np.ndarray, fn: Callable) -> np.ndarray: + """Apply function to each pixel: fn(x, y, [r,g,b]) -> [r,g,b].""" + result = img.copy() + h, w = img.shape[:2] + for y in range(h): + for x in range(w): + color = list(img[y, x]) + new_color = fn(x, y, color) + if new_color is not None: + result[y, x] = new_color[:3] + return result + + +def prim_map_rows(img: np.ndarray, fn: Callable) -> np.ndarray: + """Apply function to each row: fn(y, row) -> row.""" + result = img.copy() + h = img.shape[0] + for y in range(h): + row = img[y].copy() + new_row = fn(y, row) + if new_row is not None: + result[y] = new_row + return result + + +def prim_for_grid(img: np.ndarray, cell_size: int, fn: Callable) -> np.ndarray: + """Iterate over grid cells: fn(gx, gy, cell_img) for side effects.""" + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + for gy in range(rows): + for gx in range(cols): + y, x = gy * cell_size, gx * cell_size + cell = img[y:y + cell_size, x:x + cell_size] + fn(gx, gy, cell) + + return img + + +def prim_fold_pixels(img: np.ndarray, init: Any, fn: Callable) -> Any: + """Fold over pixels: fn(acc, x, y, color) -> acc.""" + acc = init + h, w = img.shape[:2] + for y in range(h): + for x in range(w): + color = list(img[y, x]) + acc = fn(acc, x, y, color) + return acc + + +# ============================================================================= +# Convolution / Filters +# ============================================================================= + +def prim_convolve(img: np.ndarray, kernel: List[List[float]]) -> np.ndarray: + """Apply convolution kernel.""" + k = np.array(kernel, dtype=np.float32) + return cv2.filter2D(img, -1, k) + + +def prim_blur(img: np.ndarray, radius: int) -> np.ndarray: + """Gaussian blur.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.GaussianBlur(img, (ksize, ksize), 0) + + +def prim_box_blur(img: np.ndarray, radius: int) -> np.ndarray: + """Box blur (faster than Gaussian).""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.blur(img, (ksize, ksize)) + + +def prim_edges(img: np.ndarray, low: int = 50, high: int = 150) -> np.ndarray: + """Canny edge detection, returns grayscale edges.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + edges = cv2.Canny(gray, int(low), int(high)) + return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) + + +def prim_sobel(img: np.ndarray) -> np.ndarray: + """Sobel edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).astype(np.float32) + sx = cv2.Sobel(gray, cv2.CV_32F, 1, 0) + sy = cv2.Sobel(gray, cv2.CV_32F, 0, 1) + magnitude = np.sqrt(sx ** 2 + sy ** 2) + magnitude = np.clip(magnitude, 0, 255).astype(np.uint8) + return cv2.cvtColor(magnitude, cv2.COLOR_GRAY2RGB) + + +def prim_dilate(img: np.ndarray, size: int = 1) -> np.ndarray: + """Morphological dilation.""" + kernel = np.ones((size, size), np.uint8) + return cv2.dilate(img, kernel, iterations=1) + + +def prim_erode(img: np.ndarray, size: int = 1) -> np.ndarray: + """Morphological erosion.""" + kernel = np.ones((size, size), np.uint8) + return cv2.erode(img, kernel, iterations=1) + + +# ============================================================================= +# Geometric Transforms +# ============================================================================= + +def prim_translate(img: np.ndarray, dx: float, dy: float) -> np.ndarray: + """Translate image.""" + h, w = img.shape[:2] + M = np.float32([[1, 0, dx], [0, 1, dy]]) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_rotate(img: np.ndarray, angle: float, cx: float = None, cy: float = None) -> np.ndarray: + """Rotate image around center.""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_scale(img: np.ndarray, sx: float, sy: float, cx: float = None, cy: float = None) -> np.ndarray: + """Scale image around center.""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + M = np.float32([ + [sx, 0, cx * (1 - sx)], + [0, sy, cy * (1 - sy)] + ]) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_flip_h(img: np.ndarray) -> np.ndarray: + """Flip horizontally.""" + return cv2.flip(img, 1) + + +def prim_flip_v(img: np.ndarray) -> np.ndarray: + """Flip vertically.""" + return cv2.flip(img, 0) + + +def prim_remap(img: np.ndarray, map_x: np.ndarray, map_y: np.ndarray) -> np.ndarray: + """Remap using coordinate maps.""" + return cv2.remap(img, map_x.astype(np.float32), map_y.astype(np.float32), + cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + +def prim_make_coords(w: int, h: int) -> Tuple[np.ndarray, np.ndarray]: + """Create coordinate grid (map_x, map_y).""" + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + return map_x, map_y + + +# ============================================================================= +# Blending +# ============================================================================= + +def prim_blend_images(a: np.ndarray, b: np.ndarray, alpha: float) -> np.ndarray: + """Blend two images. Auto-resizes b to match a if sizes differ.""" + alpha = np.clip(alpha, 0, 1) + # Auto-resize b to match a if different sizes + if a.shape[:2] != b.shape[:2]: + b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR) + return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8) + + +def prim_blend_mode(a: np.ndarray, b: np.ndarray, mode: str) -> np.ndarray: + """Blend with various modes: add, multiply, screen, overlay, difference. + Auto-resizes b to match a if sizes differ.""" + # Auto-resize b to match a if different sizes + if a.shape[:2] != b.shape[:2]: + b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR) + af = a.astype(float) / 255 + bf = b.astype(float) / 255 + + if mode == "add": + result = af + bf + elif mode == "multiply": + result = af * bf + elif mode == "screen": + result = 1 - (1 - af) * (1 - bf) + elif mode == "overlay": + mask = af < 0.5 + result = np.where(mask, 2 * af * bf, 1 - 2 * (1 - af) * (1 - bf)) + elif mode == "difference": + result = np.abs(af - bf) + elif mode == "lighten": + result = np.maximum(af, bf) + elif mode == "darken": + result = np.minimum(af, bf) + else: + result = af + + return (np.clip(result, 0, 1) * 255).astype(np.uint8) + + +def prim_mask(img: np.ndarray, mask_img: np.ndarray) -> np.ndarray: + """Apply grayscale mask to image.""" + if len(mask_img.shape) == 3: + mask = cv2.cvtColor(mask_img, cv2.COLOR_RGB2GRAY) + else: + mask = mask_img + mask_f = mask.astype(float) / 255 + result = img.astype(float) * mask_f[:, :, np.newaxis] + return result.astype(np.uint8) + + +# ============================================================================= +# Drawing +# ============================================================================= + +# Simple font (5x7 bitmap characters) +FONT_5X7 = { + ' ': [0, 0, 0, 0, 0, 0, 0], + '.': [0, 0, 0, 0, 0, 0, 4], + ':': [0, 0, 4, 0, 4, 0, 0], + '-': [0, 0, 0, 14, 0, 0, 0], + '=': [0, 0, 14, 0, 14, 0, 0], + '+': [0, 4, 4, 31, 4, 4, 0], + '*': [0, 4, 21, 14, 21, 4, 0], + '#': [10, 31, 10, 10, 31, 10, 0], + '%': [19, 19, 4, 8, 25, 25, 0], + '@': [14, 17, 23, 21, 23, 16, 14], + '0': [14, 17, 19, 21, 25, 17, 14], + '1': [4, 12, 4, 4, 4, 4, 14], + '2': [14, 17, 1, 2, 4, 8, 31], + '3': [31, 2, 4, 2, 1, 17, 14], + '4': [2, 6, 10, 18, 31, 2, 2], + '5': [31, 16, 30, 1, 1, 17, 14], + '6': [6, 8, 16, 30, 17, 17, 14], + '7': [31, 1, 2, 4, 8, 8, 8], + '8': [14, 17, 17, 14, 17, 17, 14], + '9': [14, 17, 17, 15, 1, 2, 12], +} + +# Add uppercase letters +for i, c in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ'): + FONT_5X7[c] = [0] * 7 # Placeholder + + +def prim_draw_char(img: np.ndarray, char: str, x: int, y: int, + size: int, color: List[int]) -> np.ndarray: + """Draw a character at position.""" + # Use OpenCV's built-in font for simplicity + font = cv2.FONT_HERSHEY_SIMPLEX + scale = size / 20.0 + thickness = max(1, int(size / 10)) + cv2.putText(img, char, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness) + return img + + +def prim_draw_text(img: np.ndarray, text: str, x: int, y: int, + size: int, color: List[int]) -> np.ndarray: + """Draw text at position.""" + font = cv2.FONT_HERSHEY_SIMPLEX + scale = size / 20.0 + thickness = max(1, int(size / 10)) + cv2.putText(img, text, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness) + return img + + +def prim_fill_rect(img: np.ndarray, x: int, y: int, w: int, h: int, + color: List[int]) -> np.ndarray: + """Fill rectangle.""" + x, y, w, h = int(x), int(y), int(w), int(h) + img[y:y + h, x:x + w] = color[:3] + return img + + +def prim_draw_line(img: np.ndarray, x1: int, y1: int, x2: int, y2: int, + color: List[int], thickness: int = 1) -> np.ndarray: + """Draw line.""" + cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), tuple(color[:3]), int(thickness)) + return img + + +# ============================================================================= +# Math Primitives +# ============================================================================= + +def prim_sin(x: float) -> float: + return math.sin(x) + + +def prim_cos(x: float) -> float: + return math.cos(x) + + +def prim_tan(x: float) -> float: + return math.tan(x) + + +def prim_atan2(y: float, x: float) -> float: + return math.atan2(y, x) + + +def prim_sqrt(x: float) -> float: + return math.sqrt(max(0, x)) + + +def prim_pow(x: float, y: float) -> float: + return math.pow(x, y) + + +def prim_abs(x: float) -> float: + return abs(x) + + +def prim_floor(x: float) -> int: + return int(math.floor(x)) + + +def prim_ceil(x: float) -> int: + return int(math.ceil(x)) + + +def prim_round(x: float) -> int: + return int(round(x)) + + +def prim_min(*args) -> float: + return min(args) + + +def prim_max(*args) -> float: + return max(args) + + +def prim_clamp(x: float, lo: float, hi: float) -> float: + return max(lo, min(hi, x)) + + +def prim_lerp(a: float, b: float, t: float) -> float: + """Linear interpolation.""" + return a + (b - a) * t + + +def prim_mod(a: float, b: float) -> float: + return a % b + + +def prim_random(lo: float = 0, hi: float = 1) -> float: + """Random number from global RNG.""" + return _rng.random(lo, hi) + + +def prim_randint(lo: int, hi: int) -> int: + """Random integer from global RNG.""" + return _rng.randint(lo, hi) + + +def prim_gaussian(mean: float = 0, std: float = 1) -> float: + """Gaussian random from global RNG.""" + return _rng.gaussian(mean, std) + + +def prim_assert(condition, message: str = "Assertion failed"): + """Assert that condition is true, raise error with message if false.""" + if not condition: + raise RuntimeError(f"Assertion error: {message}") + return True + + +# ============================================================================= +# Array/List Primitives +# ============================================================================= + +def prim_length(seq) -> int: + return len(seq) + + +def prim_nth(seq, i: int): + i = int(i) + if 0 <= i < len(seq): + return seq[i] + return None + + +def prim_first(seq): + return seq[0] if seq else None + + +def prim_rest(seq): + return seq[1:] if seq else [] + + +def prim_take(seq, n: int): + return seq[:int(n)] + + +def prim_drop(seq, n: int): + return seq[int(n):] + + +def prim_cons(x, seq): + return [x] + list(seq) + + +def prim_append(*seqs): + result = [] + for s in seqs: + result.extend(s) + return result + + +def prim_reverse(seq): + return list(reversed(seq)) + + +def prim_range(start: int, end: int, step: int = 1) -> List[int]: + return list(range(int(start), int(end), int(step))) + + +def prim_roll(arr: np.ndarray, shift: int, axis: int = 0) -> np.ndarray: + """Circular roll of array.""" + return np.roll(arr, int(shift), axis=int(axis)) + + +def prim_list(*args) -> list: + """Create a list.""" + return list(args) + + +# ============================================================================= +# Primitive Registry +# ============================================================================= + +def prim_add(*args): + return sum(args) + +def prim_sub(a, b=None): + if b is None: + return -a # Unary negation + return a - b + +def prim_mul(*args): + result = 1 + for x in args: + result *= x + return result + +def prim_div(a, b): + return a / b if b != 0 else 0 + +def prim_lt(a, b): + return a < b + +def prim_gt(a, b): + return a > b + +def prim_le(a, b): + return a <= b + +def prim_ge(a, b): + return a >= b + +def prim_eq(a, b): + # Handle None/nil comparisons with numpy arrays + if a is None: + return b is None + if b is None: + return a is None + if isinstance(a, np.ndarray) or isinstance(b, np.ndarray): + if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): + return np.array_equal(a, b) + return False # array vs non-array + return a == b + +def prim_ne(a, b): + return not prim_eq(a, b) + + +# ============================================================================= +# Vectorized Bulk Operations (true primitives for composing effects) +# ============================================================================= + +def prim_color_matrix(img: np.ndarray, matrix: List[List[float]]) -> np.ndarray: + """Apply a 3x3 color transformation matrix to all pixels.""" + m = np.array(matrix, dtype=np.float32) + result = img.astype(np.float32) @ m.T + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_adjust(img: np.ndarray, brightness: float = 0, contrast: float = 1) -> np.ndarray: + """Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+.""" + result = (img.astype(np.float32) - 128) * contrast + 128 + brightness + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_mix_gray(img: np.ndarray, amount: float) -> np.ndarray: + """Mix image with its grayscale version. 0=original, 1=grayscale.""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + gray_rgb = np.stack([gray, gray, gray], axis=-1) + result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_invert_img(img: np.ndarray) -> np.ndarray: + """Invert all pixel values.""" + return (255 - img).astype(np.uint8) + + +def prim_add_noise(img: np.ndarray, amount: float) -> np.ndarray: + """Add gaussian noise to image.""" + noise = _rng._rng.normal(0, amount, img.shape) + result = img.astype(np.float32) + noise + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_quantize(img: np.ndarray, levels: int) -> np.ndarray: + """Reduce to N color levels per channel.""" + levels = max(2, int(levels)) + factor = 256 / levels + result = (img // factor) * factor + factor // 2 + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_shift_hsv(img: np.ndarray, h: float = 0, s: float = 1, v: float = 1) -> np.ndarray: + """Shift HSV: h=degrees offset, s/v=multipliers.""" + hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32) + hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180 + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255) + hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255) + return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) + + +# ============================================================================= +# Array Math Primitives (vectorized operations on coordinate arrays) +# ============================================================================= + +def prim_arr_add(a: np.ndarray, b) -> np.ndarray: + """Element-wise addition. b can be array or scalar.""" + return (np.asarray(a) + np.asarray(b)).astype(np.float32) + + +def prim_arr_sub(a: np.ndarray, b) -> np.ndarray: + """Element-wise subtraction. b can be array or scalar.""" + return (np.asarray(a) - np.asarray(b)).astype(np.float32) + + +def prim_arr_mul(a: np.ndarray, b) -> np.ndarray: + """Element-wise multiplication. b can be array or scalar.""" + return (np.asarray(a) * np.asarray(b)).astype(np.float32) + + +def prim_arr_div(a: np.ndarray, b) -> np.ndarray: + """Element-wise division. b can be array or scalar.""" + b = np.asarray(b) + # Avoid division by zero + with np.errstate(divide='ignore', invalid='ignore'): + result = np.asarray(a) / np.where(b == 0, 1e-10, b) + return result.astype(np.float32) + + +def prim_arr_mod(a: np.ndarray, b) -> np.ndarray: + """Element-wise modulo.""" + return (np.asarray(a) % np.asarray(b)).astype(np.float32) + + +def prim_arr_sin(a: np.ndarray) -> np.ndarray: + """Element-wise sine.""" + return np.sin(np.asarray(a)).astype(np.float32) + + +def prim_arr_cos(a: np.ndarray) -> np.ndarray: + """Element-wise cosine.""" + return np.cos(np.asarray(a)).astype(np.float32) + + +def prim_arr_tan(a: np.ndarray) -> np.ndarray: + """Element-wise tangent.""" + return np.tan(np.asarray(a)).astype(np.float32) + + +def prim_arr_sqrt(a: np.ndarray) -> np.ndarray: + """Element-wise square root.""" + return np.sqrt(np.maximum(0, np.asarray(a))).astype(np.float32) + + +def prim_arr_pow(a: np.ndarray, b) -> np.ndarray: + """Element-wise power.""" + return np.power(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_abs(a: np.ndarray) -> np.ndarray: + """Element-wise absolute value.""" + return np.abs(np.asarray(a)).astype(np.float32) + + +def prim_arr_neg(a: np.ndarray) -> np.ndarray: + """Element-wise negation.""" + return (-np.asarray(a)).astype(np.float32) + + +def prim_arr_exp(a: np.ndarray) -> np.ndarray: + """Element-wise exponential.""" + return np.exp(np.asarray(a)).astype(np.float32) + + +def prim_arr_atan2(y: np.ndarray, x: np.ndarray) -> np.ndarray: + """Element-wise atan2(y, x).""" + return np.arctan2(np.asarray(y), np.asarray(x)).astype(np.float32) + + +def prim_arr_min(a: np.ndarray, b) -> np.ndarray: + """Element-wise minimum.""" + return np.minimum(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_max(a: np.ndarray, b) -> np.ndarray: + """Element-wise maximum.""" + return np.maximum(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_clip(a: np.ndarray, lo, hi) -> np.ndarray: + """Element-wise clip to range.""" + return np.clip(np.asarray(a), lo, hi).astype(np.float32) + + +def prim_arr_where(cond: np.ndarray, a, b) -> np.ndarray: + """Element-wise conditional: where cond is true, use a, else b.""" + return np.where(np.asarray(cond), np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_floor(a: np.ndarray) -> np.ndarray: + """Element-wise floor.""" + return np.floor(np.asarray(a)).astype(np.float32) + + +def prim_arr_lerp(a: np.ndarray, b: np.ndarray, t) -> np.ndarray: + """Element-wise linear interpolation.""" + a, b = np.asarray(a), np.asarray(b) + return (a + (b - a) * t).astype(np.float32) + + +# ============================================================================= +# Coordinate Transformation Primitives +# ============================================================================= + +def prim_polar_from_center(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create polar coordinates (r, theta) from image center. + + Usage: + (polar-from-center img) ; center of image + (polar-from-center img cx cy) ; custom center + (polar-from-center w h cx cy) ; explicit dimensions + + Returns: (r, theta) tuple of arrays + """ + if isinstance(img_or_w, np.ndarray): + h, w = img_or_w.shape[:2] + if h_or_cx is None: + cx, cy = w / 2, h / 2 + else: + cx, cy = h_or_cx, cx if cx is not None else h / 2 + else: + w = int(img_or_w) + h = int(h_or_cx) + cx = cx if cx is not None else w / 2 + cy = cy if cy is not None else h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + r = np.sqrt(dx**2 + dy**2) + theta = np.arctan2(dy, dx) + + return (r, theta) + + +def prim_cart_from_polar(r: np.ndarray, theta: np.ndarray, cx: float, cy: float) -> Tuple[np.ndarray, np.ndarray]: + """ + Convert polar coordinates back to Cartesian. + + Args: + r: radius array + theta: angle array + cx, cy: center point + + Returns: (x, y) tuple of coordinate arrays + """ + x = (cx + r * np.cos(theta)).astype(np.float32) + y = (cy + r * np.sin(theta)).astype(np.float32) + return (x, y) + + +def prim_normalize_coords(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create normalized coordinates (-1 to 1) from center. + + Returns: (x_norm, y_norm) tuple of arrays where center is (0,0) + """ + if isinstance(img_or_w, np.ndarray): + h, w = img_or_w.shape[:2] + if h_or_cx is None: + cx, cy = w / 2, h / 2 + else: + cx, cy = h_or_cx, cx if cx is not None else h / 2 + else: + w = int(img_or_w) + h = int(h_or_cx) + cx = cx if cx is not None else w / 2 + cy = cy if cy is not None else h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + + return (x_norm, y_norm) + + +def prim_coords_x(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray: + """Get x/first component from coordinate tuple.""" + return coords[0] + + +def prim_coords_y(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray: + """Get y/second component from coordinate tuple.""" + return coords[1] + + +def prim_make_coords_centered(w: int, h: int, cx: float = None, cy: float = None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create coordinate grids centered at (cx, cy). + Like make-coords but returns coordinates relative to center. + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + return (x_coords - cx, y_coords - cy) + + +# ============================================================================= +# Specialized Distortion Primitives +# ============================================================================= + +def prim_wave_displace(w: int, h: int, axis: str, freq: float, amp: float, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create wave displacement maps. + + Args: + w, h: dimensions + axis: "x" (horizontal waves) or "y" (vertical waves) + freq: wave frequency (waves per image width/height) + amp: wave amplitude in pixels + phase: phase offset in radians + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + + if axis == "x" or axis == "horizontal": + # Horizontal waves: displace x based on y + wave = np.sin(2 * np.pi * freq * map_y / h + phase) * amp + map_x = map_x + wave + elif axis == "y" or axis == "vertical": + # Vertical waves: displace y based on x + wave = np.sin(2 * np.pi * freq * map_x / w + phase) * amp + map_y = map_y + wave + elif axis == "both": + wave_x = np.sin(2 * np.pi * freq * map_y / h + phase) * amp + wave_y = np.sin(2 * np.pi * freq * map_x / w + phase) * amp + map_x = map_x + wave_x + map_y = map_y + wave_y + + return (map_x, map_y) + + +def prim_swirl_displace(w: int, h: int, strength: float, radius: float = 0.5, + cx: float = None, cy: float = None, falloff: str = "quadratic") -> Tuple[np.ndarray, np.ndarray]: + """ + Create swirl displacement maps. + + Args: + w, h: dimensions + strength: swirl strength in radians + radius: effect radius as fraction of max dimension + cx, cy: center (defaults to image center) + falloff: "linear", "quadratic", or "gaussian" + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + radius_px = max(w, h) * radius + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + angle = np.arctan2(dy, dx) + + # Normalized distance for falloff + norm_dist = dist / radius_px + + # Calculate falloff factor + if falloff == "linear": + factor = np.maximum(0, 1 - norm_dist) + elif falloff == "gaussian": + factor = np.exp(-norm_dist**2 * 2) + else: # quadratic + factor = np.maximum(0, 1 - norm_dist**2) + + # Apply swirl rotation + new_angle = angle + strength * factor + + # Calculate new coordinates + map_x = (cx + dist * np.cos(new_angle)).astype(np.float32) + map_y = (cy + dist * np.sin(new_angle)).astype(np.float32) + + return (map_x, map_y) + + +def prim_fisheye_displace(w: int, h: int, strength: float, cx: float = None, cy: float = None, + zoom_correct: bool = True) -> Tuple[np.ndarray, np.ndarray]: + """ + Create fisheye/barrel distortion displacement maps. + + Args: + w, h: dimensions + strength: distortion strength (-1 to 1, positive=bulge, negative=pinch) + cx, cy: center (defaults to image center) + zoom_correct: auto-zoom to hide black edges + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize coordinates + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + r = np.sqrt(x_norm**2 + y_norm**2) + + # Apply barrel/pincushion distortion + if strength > 0: + r_distorted = r * (1 + strength * r**2) + else: + r_distorted = r / (1 - strength * r**2 + 0.001) + + # Calculate scale factor + with np.errstate(divide='ignore', invalid='ignore'): + scale = np.where(r > 0, r_distorted / r, 1) + + # Apply zoom correction + if zoom_correct and strength > 0: + zoom = 1 + strength * 0.5 + scale = scale / zoom + + # Calculate new coordinates + map_x = (x_norm * scale * (w / 2) + cx).astype(np.float32) + map_y = (y_norm * scale * (h / 2) + cy).astype(np.float32) + + return (map_x, map_y) + + +def prim_kaleidoscope_displace(w: int, h: int, segments: int, rotation: float = 0, + cx: float = None, cy: float = None, zoom: float = 1.0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create kaleidoscope displacement maps. + + Args: + w, h: dimensions + segments: number of symmetry segments (3-16) + rotation: rotation angle in degrees + cx, cy: center (defaults to image center) + zoom: zoom factor + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + segments = max(3, min(int(segments), 16)) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + segment_angle = 2 * np.pi / segments + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Translate to center + x_centered = x_coords - cx + y_centered = y_coords - cy + + # Convert to polar + r = np.sqrt(x_centered**2 + y_centered**2) + theta = np.arctan2(y_centered, x_centered) + + # Apply rotation + theta = theta - np.deg2rad(rotation) + + # Fold angle into first segment and mirror + theta_normalized = theta % (2 * np.pi) + segment_idx = (theta_normalized / segment_angle).astype(int) + theta_in_segment = theta_normalized - segment_idx * segment_angle + + # Mirror alternating segments + mirror_mask = (segment_idx % 2) == 1 + theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment) + + # Apply zoom + r = r / zoom + + # Convert back to Cartesian + map_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32) + map_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32) + + return (map_x, map_y) + + +# ============================================================================= +# Character/ASCII Art Primitives +# ============================================================================= + +# Character sets ordered by visual density (light to dark) +CHAR_ALPHABETS = { + "standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$", + "blocks": " ░▒▓█", + "simple": " .-:=+*#%@", + "digits": " 0123456789", +} + +# Global atlas cache: keyed on (frozenset(chars), cell_size) -> +# (atlas_array, char_to_idx) where atlas_array is (N, cell_size, cell_size) uint8. +_char_atlas_cache = {} +_CHAR_ATLAS_CACHE_MAX = 32 + + +def _get_char_atlas(alphabet: str, cell_size: int) -> dict: + """Get or create character atlas for alphabet (legacy dict version).""" + atlas_arr, char_to_idx = _get_render_atlas(alphabet, cell_size) + # Build legacy dict from array + idx_to_char = {v: k for k, v in char_to_idx.items()} + return {idx_to_char[i]: atlas_arr[i] for i in range(len(atlas_arr))} + + +def _get_render_atlas(unique_chars_or_alphabet, cell_size: int): + """Get or build a stacked numpy atlas for vectorised rendering. + + Args: + unique_chars_or_alphabet: Either an alphabet name (str looked up in + CHAR_ALPHABETS), a literal character string, or a set/frozenset + of characters. + cell_size: Pixel size of each cell. + + Returns: + (atlas_array, char_to_idx) where + atlas_array: (num_chars, cell_size, cell_size) uint8 masks + char_to_idx: dict mapping character -> index in atlas_array + """ + if isinstance(unique_chars_or_alphabet, (set, frozenset)): + chars_tuple = tuple(sorted(unique_chars_or_alphabet)) + else: + resolved = CHAR_ALPHABETS.get(unique_chars_or_alphabet, unique_chars_or_alphabet) + chars_tuple = tuple(resolved) + + cache_key = (chars_tuple, cell_size) + cached = _char_atlas_cache.get(cache_key) + if cached is not None: + return cached + + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + n = len(chars_tuple) + atlas = np.zeros((n, cell_size, cell_size), dtype=np.uint8) + char_to_idx = {} + + for i, char in enumerate(chars_tuple): + char_to_idx[char] = i + if char and char != ' ': + try: + (text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness) + text_x = max(0, (cell_size - text_w) // 2) + text_y = (cell_size + text_h) // 2 + cv2.putText(atlas[i], char, (text_x, text_y), + font, font_scale, 255, thickness, cv2.LINE_AA) + except Exception: + pass + + # Evict oldest entry if cache is full + if len(_char_atlas_cache) >= _CHAR_ATLAS_CACHE_MAX: + _char_atlas_cache.pop(next(iter(_char_atlas_cache))) + + _char_atlas_cache[cache_key] = (atlas, char_to_idx) + return atlas, char_to_idx + + +def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Sample image into cell grid, returning average colors and luminances. + + Uses cv2.resize with INTER_AREA (pixel-area averaging) which is + ~25x faster than numpy reshape+mean for block downsampling. + + Args: + img: source image + cell_size: size of each cell in pixels + + Returns: (colors, luminances) tuple + - colors: (rows, cols, 3) array of average RGB per cell + - luminances: (rows, cols) array of average brightness 0-255 + """ + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + if rows < 1 or cols < 1: + return (np.zeros((1, 1, 3), dtype=np.uint8), + np.zeros((1, 1), dtype=np.float32)) + + # Crop to exact grid then block-average via cv2 area interpolation. + grid_h, grid_w = rows * cell_size, cols * cell_size + cropped = img[:grid_h, :grid_w] + colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA) + + # Compute luminance + luminances = (0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]).astype(np.float32) + + return (colors, luminances) + + +def cell_sample_extended(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray, List[List[ZoneContext]]]: + """ + Sample image into cell grid, returning colors, luminances, and full zone contexts. + + Args: + img: source image (RGB) + cell_size: size of each cell in pixels + + Returns: (colors, luminances, zone_contexts) tuple + - colors: (rows, cols, 3) array of average RGB per cell + - luminances: (rows, cols) array of average brightness 0-255 + - zone_contexts: 2D list of ZoneContext objects with full cell data + """ + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + if rows < 1 or cols < 1: + return (np.zeros((1, 1, 3), dtype=np.uint8), + np.zeros((1, 1), dtype=np.float32), + [[ZoneContext(0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)]]) + + # Crop to grid + grid_h, grid_w = rows * cell_size, cols * cell_size + cropped = img[:grid_h, :grid_w] + + # Reshape and average + reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3) + colors = reshaped.mean(axis=(1, 3)).astype(np.uint8) + + # Compute luminance (0-255) + luminances = (0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]).astype(np.float32) + + # Normalize colors to 0-1 for HSV/saturation calculations + colors_float = colors.astype(np.float32) / 255.0 + + # Compute HSV values for each cell + max_c = colors_float.max(axis=2) + min_c = colors_float.min(axis=2) + diff = max_c - min_c + + # Saturation + saturation = np.where(max_c > 0, diff / max_c, 0) + + # Hue (0-360) + hue = np.zeros((rows, cols), dtype=np.float32) + # Avoid division by zero + mask = diff > 0 + r, g, b = colors_float[:, :, 0], colors_float[:, :, 1], colors_float[:, :, 2] + + # Red is max + red_max = mask & (max_c == r) + hue[red_max] = 60 * (((g[red_max] - b[red_max]) / diff[red_max]) % 6) + + # Green is max + green_max = mask & (max_c == g) + hue[green_max] = 60 * ((b[green_max] - r[green_max]) / diff[green_max] + 2) + + # Blue is max + blue_max = mask & (max_c == b) + hue[blue_max] = 60 * ((r[blue_max] - g[blue_max]) / diff[blue_max] + 4) + + # Ensure hue is in 0-360 range + hue = hue % 360 + + # Build zone contexts + zone_contexts = [] + for row in range(rows): + row_contexts = [] + for col in range(cols): + ctx = ZoneContext( + row=row, + col=col, + row_norm=row / max(1, rows - 1) if rows > 1 else 0.5, + col_norm=col / max(1, cols - 1) if cols > 1 else 0.5, + luminance=luminances[row, col] / 255.0, # Normalize to 0-1 + saturation=float(saturation[row, col]), + hue=float(hue[row, col]), + r=float(colors_float[row, col, 0]), + g=float(colors_float[row, col, 1]), + b=float(colors_float[row, col, 2]), + ) + row_contexts.append(ctx) + zone_contexts.append(row_contexts) + + return (colors, luminances, zone_contexts) + + +def prim_luminance_to_chars(luminances: np.ndarray, alphabet: str, contrast: float = 1.0) -> List[List[str]]: + """ + Map luminance values to characters from alphabet. + + Args: + luminances: (rows, cols) array of brightness values 0-255 + alphabet: character set name or literal string (light to dark) + contrast: contrast boost factor + + Returns: 2D list of single-character strings + """ + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + num_chars = len(chars) + + # Apply contrast + lum = luminances.astype(np.float32) + if contrast != 1.0: + lum = (lum - 128) * contrast + 128 + lum = np.clip(lum, 0, 255) + + # Map to indices + indices = ((lum / 255) * (num_chars - 1)).astype(np.int32) + indices = np.clip(indices, 0, num_chars - 1) + + # Vectorised conversion via numpy char array lookup + chars_arr = np.array(list(chars)) + char_grid = chars_arr[indices.ravel()].reshape(indices.shape) + + return char_grid.tolist() + + +def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray, + cell_size: int, color_mode: str = "color", + background_color: str = "black", + invert_colors: bool = False) -> np.ndarray: + """ + Render a grid of characters onto an image. + + Uses vectorised numpy operations instead of per-cell Python loops: + the character atlas is looked up via fancy indexing and the full + mask + colour image are assembled in bulk. + + Args: + img: source image (for dimensions) + chars: 2D list of single characters + colors: (rows, cols, 3) array of colors per cell + cell_size: size of each cell + color_mode: "color" (original colors), "mono" (white), "invert", + or any color name/hex value ("green", "lime", "#00ff00") + background_color: background color name/hex ("black", "navy", "#001100") + invert_colors: if True, swap foreground and background colors + + Returns: rendered image + """ + # Parse color_mode - may be a named color or hex value + fg_color = parse_color(color_mode) + + # Parse background_color + if isinstance(background_color, (list, tuple)): + bg_color = tuple(int(c) for c in background_color[:3]) + else: + bg_color = parse_color(background_color) + if bg_color is None: + bg_color = (0, 0, 0) + + # Handle invert_colors - swap fg and bg + if invert_colors and fg_color is not None: + fg_color, bg_color = bg_color, fg_color + + cell_size = max(1, int(cell_size)) + + if not chars or not chars[0]: + return img.copy() + + rows = len(chars) + cols = len(chars[0]) + h, w = rows * cell_size, cols * cell_size + + bg = list(bg_color) + + # --- Build atlas & index grid --- + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + atlas, char_to_idx = _get_render_atlas(unique_chars, cell_size) + + # Convert 2D char list to index array using ordinal lookup table + # (avoids per-cell Python dict lookup). + space_idx = char_to_idx.get(' ', 0) + max_ord = max(ord(ch) for ch in char_to_idx) + 1 + ord_lookup = np.full(max_ord, space_idx, dtype=np.int32) + for ch, idx in char_to_idx.items(): + if ch: + ord_lookup[ord(ch)] = idx + + flat = [ch for row in chars for ch in row] + ords = np.frombuffer(np.array(flat, dtype='U1'), dtype=np.uint32) + char_indices = ord_lookup[ords].reshape(rows, cols) + + # --- Vectorised mask assembly --- + # atlas[char_indices] -> (rows, cols, cell_size, cell_size) + # Transpose to (rows, cell_size, cols, cell_size) then reshape to full image. + all_masks = atlas[char_indices] + full_mask = all_masks.transpose(0, 2, 1, 3).reshape(h, w) + + # Expand per-cell colours to per-pixel (only when needed). + need_color_full = (color_mode in ("color", "invert") + or (fg_color is None and color_mode != "mono")) + + if need_color_full: + color_full = np.repeat( + np.repeat(colors[:rows, :cols], cell_size, axis=0), + cell_size, axis=1) + + # --- Vectorised colour composite --- + # Use element-wise multiply/np.where instead of boolean-indexed scatter + # for much better memory access patterns. + mask_u8 = (full_mask > 0).astype(np.uint8)[:, :, np.newaxis] + + if color_mode == "invert": + # Background is source colour; characters are black. + # result = color_full * (1 - mask) + result = color_full * (1 - mask_u8) + elif fg_color is not None: + # Fixed foreground colour on background. + fg = np.array(fg_color, dtype=np.uint8) + bg_arr = np.array(bg, dtype=np.uint8) + result = np.where(mask_u8, fg, bg_arr).astype(np.uint8) + elif color_mode == "mono": + bg_arr = np.array(bg, dtype=np.uint8) + result = np.where(mask_u8, np.uint8(255), bg_arr).astype(np.uint8) + else: + # "color" mode – each cell uses its source colour on bg. + if bg == [0, 0, 0]: + result = color_full * mask_u8 + else: + bg_arr = np.array(bg, dtype=np.uint8) + result = np.where(mask_u8, color_full, bg_arr).astype(np.uint8) + + # Resize to match original if needed + orig_h, orig_w = img.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(h, orig_h) + copy_w = min(w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_render_char_grid_fx(img: np.ndarray, chars: List[List[str]], colors: np.ndarray, + luminances: np.ndarray, cell_size: int, + color_mode: str = "color", + background_color: str = "black", + invert_colors: bool = False, + char_jitter: float = 0.0, + char_scale: float = 1.0, + char_rotation: float = 0.0, + char_hue_shift: float = 0.0, + jitter_source: str = "none", + scale_source: str = "none", + rotation_source: str = "none", + hue_source: str = "none") -> np.ndarray: + """ + Render a grid of characters with per-character effects. + + Args: + img: source image (for dimensions) + chars: 2D list of single characters + colors: (rows, cols, 3) array of colors per cell + luminances: (rows, cols) array of luminance values (0-255) + cell_size: size of each cell + color_mode: "color", "mono", "invert", or any color name/hex + background_color: background color name/hex + invert_colors: if True, swap foreground and background colors + char_jitter: base jitter amount in pixels + char_scale: base scale factor (1.0 = normal) + char_rotation: base rotation in degrees + char_hue_shift: base hue shift in degrees (0-360) + jitter_source: source for jitter modulation ("none", "luminance", "position", "random") + scale_source: source for scale modulation + rotation_source: source for rotation modulation + hue_source: source for hue modulation + + Per-character effect sources: + "none" - use base value only + "luminance" - modulate by cell luminance (0-1) + "inv_luminance" - modulate by inverse luminance (dark = high) + "saturation" - modulate by cell color saturation + "position_x" - modulate by horizontal position (0-1) + "position_y" - modulate by vertical position (0-1) + "position_diag" - modulate by diagonal position + "random" - random per-cell value (deterministic from position) + "center_dist" - distance from center (0=center, 1=corner) + + Returns: rendered image + """ + # Parse colors + fg_color = parse_color(color_mode) + + if isinstance(background_color, (list, tuple)): + bg_color = tuple(int(c) for c in background_color[:3]) + else: + bg_color = parse_color(background_color) + if bg_color is None: + bg_color = (0, 0, 0) + + if invert_colors and fg_color is not None: + fg_color, bg_color = bg_color, fg_color + + cell_size = max(1, int(cell_size)) + + if not chars or not chars[0]: + return img.copy() + + rows = len(chars) + cols = len(chars[0]) + h, w = rows * cell_size, cols * cell_size + + bg = list(bg_color) + result = np.full((h, w, 3), bg, dtype=np.uint8) + + # Normalize luminances to 0-1 + lum_normalized = luminances.astype(np.float32) / 255.0 + + # Compute saturation from colors + colors_float = colors.astype(np.float32) / 255.0 + max_c = colors_float.max(axis=2) + min_c = colors_float.min(axis=2) + saturation = np.where(max_c > 0, (max_c - min_c) / max_c, 0) + + # Helper to get modulation value for a cell + def get_mod_value(source: str, r: int, c: int) -> float: + if source == "none": + return 1.0 + elif source == "luminance": + return lum_normalized[r, c] + elif source == "inv_luminance": + return 1.0 - lum_normalized[r, c] + elif source == "saturation": + return saturation[r, c] + elif source == "position_x": + return c / max(1, cols - 1) if cols > 1 else 0.5 + elif source == "position_y": + return r / max(1, rows - 1) if rows > 1 else 0.5 + elif source == "position_diag": + px = c / max(1, cols - 1) if cols > 1 else 0.5 + py = r / max(1, rows - 1) if rows > 1 else 0.5 + return (px + py) / 2.0 + elif source == "random": + # Deterministic random based on position + seed = (r * 1000 + c) % 10000 + return ((seed * 9301 + 49297) % 233280) / 233280.0 + elif source == "center_dist": + cx, cy = (cols - 1) / 2.0, (rows - 1) / 2.0 + dx = (c - cx) / max(1, cx) if cx > 0 else 0 + dy = (r - cy) / max(1, cy) if cy > 0 else 0 + return min(1.0, math.sqrt(dx*dx + dy*dy)) + else: + return 1.0 + + # Build character atlas at base size + font = cv2.FONT_HERSHEY_SIMPLEX + base_font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + # For rotation/scale, we need to render characters larger then transform + max_scale = max(1.0, char_scale * 1.5) # Allow headroom for scaling + atlas_size = int(cell_size * max_scale * 1.5) + + atlas = {} + for char in unique_chars: + if char and char != ' ': + try: + char_img = np.zeros((atlas_size, atlas_size), dtype=np.uint8) + scaled_font = base_font_scale * max_scale + (text_w, text_h), _ = cv2.getTextSize(char, font, scaled_font, thickness) + text_x = max(0, (atlas_size - text_w) // 2) + text_y = (atlas_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, scaled_font, 255, thickness, cv2.LINE_AA) + atlas[char] = char_img + except: + atlas[char] = None + else: + atlas[char] = None + + # Render characters with effects + for r in range(rows): + for c in range(cols): + char = chars[r][c] + if not char or char == ' ': + continue + + char_img = atlas.get(char) + if char_img is None: + continue + + # Get per-cell modulation values + jitter_mod = get_mod_value(jitter_source, r, c) + scale_mod = get_mod_value(scale_source, r, c) + rot_mod = get_mod_value(rotation_source, r, c) + hue_mod = get_mod_value(hue_source, r, c) + + # Compute effective values + eff_jitter = char_jitter * jitter_mod + eff_scale = char_scale * (0.5 + 0.5 * scale_mod) if scale_source != "none" else char_scale + eff_rotation = char_rotation * (rot_mod * 2 - 1) # -1 to 1 range + eff_hue_shift = char_hue_shift * hue_mod + + # Apply transformations + transformed = char_img.copy() + + # Rotation + if abs(eff_rotation) > 0.5: + center = (atlas_size // 2, atlas_size // 2) + rot_matrix = cv2.getRotationMatrix2D(center, eff_rotation, 1.0) + transformed = cv2.warpAffine(transformed, rot_matrix, (atlas_size, atlas_size)) + + # Scale - resize to target size + target_size = max(1, int(cell_size * eff_scale)) + if target_size != atlas_size: + transformed = cv2.resize(transformed, (target_size, target_size), interpolation=cv2.INTER_LINEAR) + + # Compute position with jitter + base_y = r * cell_size + base_x = c * cell_size + + if eff_jitter > 0: + # Deterministic jitter based on position + jx = ((r * 7 + c * 13) % 100) / 100.0 - 0.5 + jy = ((r * 11 + c * 17) % 100) / 100.0 - 0.5 + base_x += int(jx * eff_jitter * 2) + base_y += int(jy * eff_jitter * 2) + + # Center the character in the cell + offset = (target_size - cell_size) // 2 + y1 = base_y - offset + x1 = base_x - offset + + # Determine color + if fg_color is not None: + color = np.array(fg_color, dtype=np.uint8) + elif color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + # Fill cell with source color first + cy1 = max(0, r * cell_size) + cy2 = min(h, (r + 1) * cell_size) + cx1 = max(0, c * cell_size) + cx2 = min(w, (c + 1) * cell_size) + result[cy1:cy2, cx1:cx2] = colors[r, c] + color = np.array([0, 0, 0], dtype=np.uint8) + else: # color mode + color = colors[r, c].copy() + + # Apply hue shift + if abs(eff_hue_shift) > 0.5 and color_mode not in ("mono", "invert") and fg_color is None: + # Convert to HSV, shift hue, convert back + color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV) + # Cast to int to avoid uint8 overflow, then back to uint8 + new_hue = (int(color_hsv[0, 0, 0]) + int(eff_hue_shift * 180 / 360)) % 180 + color_hsv[0, 0, 0] = np.uint8(new_hue) + color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten() + + # Blit character to result + mask = transformed > 0 + th, tw = transformed.shape[:2] + + for dy in range(th): + for dx in range(tw): + py = y1 + dy + px = x1 + dx + if 0 <= py < h and 0 <= px < w and mask[dy, dx]: + result[py, px] = color + + # Resize to match original if needed + orig_h, orig_w = img.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(h, orig_h) + copy_w = min(w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def _render_with_cell_effect( + frame: np.ndarray, + chars: List[List[str]], + colors: np.ndarray, + luminances: np.ndarray, + zone_contexts: List[List['ZoneContext']], + cell_size: int, + bg_color: tuple, + fg_color: tuple, + color_mode: str, + cell_effect, # Lambda or callable: (cell_image, zone_dict) -> cell_image + extra_params: dict, + interp, + env, + result: np.ndarray, +) -> np.ndarray: + """ + Render ASCII art using a cell_effect lambda for arbitrary per-cell transforms. + + Each character is rendered to a cell image, the cell_effect is called with + (cell_image, zone_dict), and the returned cell is composited into result. + + This allows arbitrary effects (rotate, blur, etc.) to be applied per-character. + """ + grid_rows = len(chars) + grid_cols = len(chars[0]) if chars else 0 + out_h, out_w = result.shape[:2] + + # Build character atlas (cell-sized colored characters on transparent bg) + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + # Helper to render a single character cell + def render_char_cell(char: str, color: np.ndarray) -> np.ndarray: + """Render a character onto a cell-sized RGB image.""" + cell = np.full((cell_size, cell_size, 3), bg_color, dtype=np.uint8) + if not char or char == ' ': + return cell + + try: + (text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness) + text_x = max(0, (cell_size - text_w) // 2) + text_y = (cell_size + text_h) // 2 + + # Render character in white on mask, then apply color + mask = np.zeros((cell_size, cell_size), dtype=np.uint8) + cv2.putText(mask, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA) + + # Apply color where mask is set + for ch in range(3): + cell[:, :, ch] = np.where(mask > 0, color[ch], bg_color[ch]) + except: + pass + + return cell + + # Helper to evaluate cell_effect (handles artdag Lambda objects) + def eval_cell_effect(cell_img: np.ndarray, zone_dict: dict) -> np.ndarray: + """Call cell_effect with (cell_image, zone_dict), handle Lambda objects.""" + if callable(cell_effect): + return cell_effect(cell_img, zone_dict) + + # Check if it's an artdag Lambda object + try: + from artdag.sexp.parser import Lambda as ArtdagLambda + from artdag.sexp.evaluator import evaluate as artdag_evaluate + if isinstance(cell_effect, ArtdagLambda): + # Build env with closure values + eval_env = dict(cell_effect.closure) if cell_effect.closure else {} + # Bind lambda parameters + if len(cell_effect.params) >= 2: + eval_env[cell_effect.params[0]] = cell_img + eval_env[cell_effect.params[1]] = zone_dict + elif len(cell_effect.params) == 1: + # Single param gets zone_dict with cell as 'cell' key + zone_dict['cell'] = cell_img + eval_env[cell_effect.params[0]] = zone_dict + + # Add primitives to eval env + eval_env.update(PRIMITIVES) + + # Add effect runner - allows calling any loaded sexp effect on a cell + # Usage: (apply-effect "effect_name" cell {"param" value ...}) + # Or: (apply-effect "effect_name" cell) for defaults + def apply_effect_fn(effect_name, frame, params=None): + """Run a loaded sexp effect on a frame (cell).""" + if interp and hasattr(interp, 'run_effect'): + if params is None: + params = {} + result, _ = interp.run_effect(effect_name, frame, params, {}) + return result + return frame + eval_env['apply-effect'] = apply_effect_fn + + # Also inject loaded effects directly as callable functions + # These wrappers take positional args in common order for each effect + # Usage: (blur cell 5) or (rotate cell 45) etc. + if interp and hasattr(interp, 'effects'): + for effect_name in interp.effects: + # Create a wrapper that calls run_effect with positional-to-named mapping + def make_effect_fn(name): + def effect_fn(frame, *args): + # Map common positional args to named params + params = {} + if name == 'blur' and len(args) >= 1: + params['radius'] = args[0] + elif name == 'rotate' and len(args) >= 1: + params['angle'] = args[0] + elif name == 'brightness' and len(args) >= 1: + params['factor'] = args[0] + elif name == 'contrast' and len(args) >= 1: + params['factor'] = args[0] + elif name == 'saturation' and len(args) >= 1: + params['factor'] = args[0] + elif name == 'hue_shift' and len(args) >= 1: + params['degrees'] = args[0] + elif name == 'rgb_split' and len(args) >= 1: + params['offset_x'] = args[0] + if len(args) >= 2: + params['offset_y'] = args[1] + elif name == 'pixelate' and len(args) >= 1: + params['block_size'] = args[0] + elif name == 'wave' and len(args) >= 1: + params['amplitude'] = args[0] + if len(args) >= 2: + params['frequency'] = args[1] + elif name == 'noise' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'posterize' and len(args) >= 1: + params['levels'] = args[0] + elif name == 'threshold' and len(args) >= 1: + params['level'] = args[0] + elif name == 'sharpen' and len(args) >= 1: + params['amount'] = args[0] + elif len(args) == 1 and isinstance(args[0], dict): + # Accept dict as single arg + params = args[0] + result, _ = interp.run_effect(name, frame, params, {}) + return result + return effect_fn + eval_env[effect_name] = make_effect_fn(effect_name) + + result = artdag_evaluate(cell_effect.body, eval_env) + if isinstance(result, np.ndarray): + return result + return cell_img + except ImportError: + pass + + # Fallback: return cell unchanged + return cell_img + + # Render each cell + for r in range(grid_rows): + for c in range(grid_cols): + char = chars[r][c] + zone = zone_contexts[r][c] + + # Determine character color + if fg_color is not None: + color = np.array(fg_color, dtype=np.uint8) + elif color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + color = np.array([0, 0, 0], dtype=np.uint8) + else: + color = colors[r, c].copy() + + # Render character to cell image + cell_img = render_char_cell(char, color) + + # Build zone dict + zone_dict = { + 'row': zone.row, + 'col': zone.col, + 'row-norm': zone.row_norm, + 'col-norm': zone.col_norm, + 'lum': zone.luminance, + 'sat': zone.saturation, + 'hue': zone.hue, + 'r': zone.r, + 'g': zone.g, + 'b': zone.b, + 'char': char, + 'color': color.tolist(), + 'cell_size': cell_size, + } + # Add extra params (energy, rotation_scale, etc.) + if extra_params: + zone_dict.update(extra_params) + + # Call cell_effect + modified_cell = eval_cell_effect(cell_img, zone_dict) + + # Ensure result is valid + if modified_cell is None or not isinstance(modified_cell, np.ndarray): + modified_cell = cell_img + if modified_cell.shape[:2] != (cell_size, cell_size): + # Resize if cell size changed + modified_cell = cv2.resize(modified_cell, (cell_size, cell_size)) + if len(modified_cell.shape) == 2: + # Convert grayscale to RGB + modified_cell = cv2.cvtColor(modified_cell, cv2.COLOR_GRAY2RGB) + + # Composite into result + y1 = r * cell_size + x1 = c * cell_size + y2 = min(y1 + cell_size, out_h) + x2 = min(x1 + cell_size, out_w) + ch = y2 - y1 + cw = x2 - x1 + result[y1:y2, x1:x2] = modified_cell[:ch, :cw] + + # Resize to match original frame if needed + orig_h, orig_w = frame.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + bg = list(bg_color) + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(out_h, orig_h) + copy_w = min(out_w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_ascii_fx_zone( + frame: np.ndarray, + cols: int, + char_size_override: int, # If set, overrides cols-based calculation + alphabet: str, + color_mode: str, + background: str, + contrast: float, + char_hue_expr, # Expression, literal, or None + char_sat_expr, # Expression, literal, or None + char_bright_expr, # Expression, literal, or None + char_scale_expr, # Expression, literal, or None + char_rotation_expr, # Expression, literal, or None + char_jitter_expr, # Expression, literal, or None + interp, # Interpreter for expression evaluation + env, # Environment with bound values + extra_params=None, # Extra params to include in zone dict for lambdas + cell_effect=None, # Lambda (cell_image, zone_dict) -> cell_image for arbitrary cell effects +) -> np.ndarray: + """ + Render ASCII art with per-zone expression-driven transforms. + + Args: + frame: Source image (H, W, 3) RGB uint8 + cols: Number of character columns + char_size_override: If set, use this cell size instead of cols-based + alphabet: Character set name or literal string + color_mode: "color", "mono", "invert", or color name/hex + background: Background color name or hex + contrast: Contrast boost for character selection + char_hue_expr: Expression for hue shift (evaluated per zone) + char_sat_expr: Expression for saturation adjustment (evaluated per zone) + char_bright_expr: Expression for brightness adjustment (evaluated per zone) + char_scale_expr: Expression for scale factor (evaluated per zone) + char_rotation_expr: Expression for rotation degrees (evaluated per zone) + char_jitter_expr: Expression for position jitter (evaluated per zone) + interp: Interpreter instance for expression evaluation + env: Environment with bound variables + cell_effect: Optional lambda that receives (cell_image, zone_dict) and returns + a modified cell_image. When provided, each character is rendered + to a cell image, passed to this lambda, and the result composited. + This allows arbitrary effects to be applied per-character. + + Zone variables available in expressions: + zone-row, zone-col: Grid position (integers) + zone-row-norm, zone-col-norm: Normalized position (0-1) + zone-lum: Cell luminance (0-1) + zone-sat: Cell saturation (0-1) + zone-hue: Cell hue (0-360) + zone-r, zone-g, zone-b: RGB components (0-1) + + Returns: Rendered image + """ + h, w = frame.shape[:2] + # Use char_size if provided, otherwise calculate from cols + if char_size_override is not None: + cell_size = max(4, int(char_size_override)) + else: + cell_size = max(4, w // cols) + + # Get zone data using extended sampling + colors, luminances, zone_contexts = cell_sample_extended(frame, cell_size) + + # Convert luminances to characters + chars = prim_luminance_to_chars(luminances, alphabet, contrast) + + grid_rows = len(chars) + grid_cols = len(chars[0]) if chars else 0 + + # Parse colors + fg_color = parse_color(color_mode) + if isinstance(background, (list, tuple)): + bg_color = tuple(int(c) for c in background[:3]) + else: + bg_color = parse_color(background) + if bg_color is None: + bg_color = (0, 0, 0) + + # Arrays for per-zone transform values + hue_shifts = np.zeros((grid_rows, grid_cols), dtype=np.float32) + saturations = np.ones((grid_rows, grid_cols), dtype=np.float32) + brightness = np.ones((grid_rows, grid_cols), dtype=np.float32) + scales = np.ones((grid_rows, grid_cols), dtype=np.float32) + rotations = np.zeros((grid_rows, grid_cols), dtype=np.float32) + jitters = np.zeros((grid_rows, grid_cols), dtype=np.float32) + + # Helper to evaluate expression or return literal value + def eval_expr(expr, zone, char): + if expr is None: + return None + if isinstance(expr, (int, float)): + return expr + + # Build zone dict for lambda calls + zone_dict = { + 'row': zone.row, + 'col': zone.col, + 'row-norm': zone.row_norm, + 'col-norm': zone.col_norm, + 'lum': zone.luminance, + 'sat': zone.saturation, + 'hue': zone.hue, + 'r': zone.r, + 'g': zone.g, + 'b': zone.b, + 'char': char, + } + # Add extra params (energy, rotation_scale, etc.) for lambdas to access + if extra_params: + zone_dict.update(extra_params) + + # Check if it's a Python callable + if callable(expr): + return expr(zone_dict) + + # Check if it's an artdag Lambda object + try: + from artdag.sexp.parser import Lambda as ArtdagLambda + from artdag.sexp.evaluator import evaluate as artdag_evaluate + if isinstance(expr, ArtdagLambda): + # Build env with zone dict and any closure values + eval_env = dict(expr.closure) if expr.closure else {} + # Bind the lambda parameter to zone_dict + if expr.params: + eval_env[expr.params[0]] = zone_dict + return artdag_evaluate(expr.body, eval_env) + except ImportError: + pass + + # It's an expression - evaluate with zone context (sexp_effects style) + return interp.eval_with_zone(expr, env, zone) + + # Evaluate expressions for each zone + for r in range(grid_rows): + for c in range(grid_cols): + zone = zone_contexts[r][c] + char = chars[r][c] + + val = eval_expr(char_hue_expr, zone, char) + if val is not None: + hue_shifts[r, c] = float(val) + + val = eval_expr(char_sat_expr, zone, char) + if val is not None: + saturations[r, c] = float(val) + + val = eval_expr(char_bright_expr, zone, char) + if val is not None: + brightness[r, c] = float(val) + + val = eval_expr(char_scale_expr, zone, char) + if val is not None: + scales[r, c] = float(val) + + val = eval_expr(char_rotation_expr, zone, char) + if val is not None: + rotations[r, c] = float(val) + + val = eval_expr(char_jitter_expr, zone, char) + if val is not None: + jitters[r, c] = float(val) + + # Now render with computed transform arrays + out_h, out_w = grid_rows * cell_size, grid_cols * cell_size + bg = list(bg_color) + result = np.full((out_h, out_w, 3), bg, dtype=np.uint8) + + # If cell_effect is provided, use the cell-mapper rendering path + if cell_effect is not None: + return _render_with_cell_effect( + frame, chars, colors, luminances, zone_contexts, + cell_size, bg_color, fg_color, color_mode, + cell_effect, extra_params, interp, env, result + ) + + # Build character atlas + font = cv2.FONT_HERSHEY_SIMPLEX + base_font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + # For rotation/scale, render characters larger then transform + max_scale = max(1.0, np.max(scales) * 1.5) + atlas_size = int(cell_size * max_scale * 1.5) + + atlas = {} + for char in unique_chars: + if char and char != ' ': + try: + char_img = np.zeros((atlas_size, atlas_size), dtype=np.uint8) + scaled_font = base_font_scale * max_scale + (text_w, text_h), _ = cv2.getTextSize(char, font, scaled_font, thickness) + text_x = max(0, (atlas_size - text_w) // 2) + text_y = (atlas_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, scaled_font, 255, thickness, cv2.LINE_AA) + atlas[char] = char_img + except: + atlas[char] = None + else: + atlas[char] = None + + # Render characters with per-zone effects + for r in range(grid_rows): + for c in range(grid_cols): + char = chars[r][c] + if not char or char == ' ': + continue + + char_img = atlas.get(char) + if char_img is None: + continue + + # Get per-cell values + eff_scale = scales[r, c] + eff_rotation = rotations[r, c] + eff_jitter = jitters[r, c] + eff_hue_shift = hue_shifts[r, c] + eff_brightness = brightness[r, c] + eff_saturation = saturations[r, c] + + # Apply transformations to character + transformed = char_img.copy() + + # Rotation + if abs(eff_rotation) > 0.5: + center = (atlas_size // 2, atlas_size // 2) + rot_matrix = cv2.getRotationMatrix2D(center, eff_rotation, 1.0) + transformed = cv2.warpAffine(transformed, rot_matrix, (atlas_size, atlas_size)) + + # Scale - resize to target size + target_size = max(1, int(cell_size * eff_scale)) + if target_size != atlas_size: + transformed = cv2.resize(transformed, (target_size, target_size), interpolation=cv2.INTER_LINEAR) + + # Compute position with jitter + base_y = r * cell_size + base_x = c * cell_size + + if eff_jitter > 0: + # Deterministic jitter based on position + jx = ((r * 7 + c * 13) % 100) / 100.0 - 0.5 + jy = ((r * 11 + c * 17) % 100) / 100.0 - 0.5 + base_x += int(jx * eff_jitter * 2) + base_y += int(jy * eff_jitter * 2) + + # Center the character in the cell + offset = (target_size - cell_size) // 2 + y1 = base_y - offset + x1 = base_x - offset + + # Determine color + if fg_color is not None: + color = np.array(fg_color, dtype=np.uint8) + elif color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + cy1 = max(0, r * cell_size) + cy2 = min(out_h, (r + 1) * cell_size) + cx1 = max(0, c * cell_size) + cx2 = min(out_w, (c + 1) * cell_size) + result[cy1:cy2, cx1:cx2] = colors[r, c] + color = np.array([0, 0, 0], dtype=np.uint8) + else: # color mode - use source colors + color = colors[r, c].copy() + + # Apply hue shift + if abs(eff_hue_shift) > 0.5 and color_mode not in ("mono", "invert") and fg_color is None: + color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV) + new_hue = (int(color_hsv[0, 0, 0]) + int(eff_hue_shift * 180 / 360)) % 180 + color_hsv[0, 0, 0] = np.uint8(new_hue) + color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten() + + # Apply saturation adjustment + if abs(eff_saturation - 1.0) > 0.01 and color_mode not in ("mono", "invert") and fg_color is None: + color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV) + new_sat = np.clip(int(color_hsv[0, 0, 1] * eff_saturation), 0, 255) + color_hsv[0, 0, 1] = np.uint8(new_sat) + color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten() + + # Apply brightness adjustment + if abs(eff_brightness - 1.0) > 0.01: + color = np.clip(color.astype(np.float32) * eff_brightness, 0, 255).astype(np.uint8) + + # Blit character to result + mask = transformed > 0 + th, tw = transformed.shape[:2] + + for dy in range(th): + for dx in range(tw): + py = y1 + dy + px = x1 + dx + if 0 <= py < out_h and 0 <= px < out_w and mask[dy, dx]: + result[py, px] = color + + # Resize to match original if needed + orig_h, orig_w = frame.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(out_h, orig_h) + copy_w = min(out_w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_make_char_grid(rows: int, cols: int, fill_char: str = " ") -> List[List[str]]: + """Create a character grid filled with a character.""" + return [[fill_char for _ in range(cols)] for _ in range(rows)] + + +def prim_set_char(chars: List[List[str]], row: int, col: int, char: str) -> List[List[str]]: + """Set a character at position (returns modified copy).""" + result = [r[:] for r in chars] # shallow copy rows + if 0 <= row < len(result) and 0 <= col < len(result[0]): + result[row][col] = char + return result + + +def prim_get_char(chars: List[List[str]], row: int, col: int) -> str: + """Get character at position.""" + if 0 <= row < len(chars) and 0 <= col < len(chars[0]): + return chars[row][col] + return " " + + +def prim_char_grid_dimensions(chars: List[List[str]]) -> Tuple[int, int]: + """Get (rows, cols) of character grid.""" + if not chars: + return (0, 0) + return (len(chars), len(chars[0]) if chars[0] else 0) + + +def prim_alphabet_char(alphabet: str, index: int) -> str: + """Get character at index from alphabet (wraps around).""" + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + if not chars: + return " " + return chars[int(index) % len(chars)] + + +def prim_alphabet_length(alphabet: str) -> int: + """Get length of alphabet.""" + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + return len(chars) + + +def prim_map_char_grid(chars: List[List[str]], luminances: np.ndarray, fn: Callable) -> List[List[str]]: + """ + Map a function over character grid. + + fn receives (row, col, char, luminance) and returns new character. + This allows per-cell character selection based on position, brightness, etc. + + Example: + (map-char-grid chars luminances + (lambda (r c ch lum) + (if (> lum 128) + (alphabet-char "blocks" (floor (/ lum 50))) + ch))) + """ + if not chars or not chars[0]: + return chars + + rows = len(chars) + cols = len(chars[0]) + result = [] + + for r in range(rows): + row = [] + for c in range(cols): + ch = chars[r][c] + lum = float(luminances[r, c]) if r < luminances.shape[0] and c < luminances.shape[1] else 0 + new_ch = fn(r, c, ch, lum) + row.append(str(new_ch) if new_ch else " ") + result.append(row) + + return result + + +def prim_map_colors(colors: np.ndarray, fn: Callable) -> np.ndarray: + """ + Map a function over color grid. + + fn receives (row, col, color) and returns new [r, g, b]. + Color is a list [r, g, b]. + """ + if colors.size == 0: + return colors + + rows, cols = colors.shape[:2] + result = colors.copy() + + for r in range(rows): + for c in range(cols): + color = list(colors[r, c]) + new_color = fn(r, c, color) + if new_color is not None: + result[r, c] = new_color[:3] + + return result + + +# ============================================================================= +# Glitch Art Primitives +# ============================================================================= + +def prim_pixelsort(img: np.ndarray, sort_by: str = "lightness", + threshold_low: float = 50, threshold_high: float = 200, + angle: float = 0, reverse: bool = False) -> np.ndarray: + """ + Pixel sorting glitch effect. + + Args: + img: source image + sort_by: "lightness", "hue", "saturation", "red", "green", "blue" + threshold_low: pixels below this aren't sorted + threshold_high: pixels above this aren't sorted + angle: 0 = horizontal, 90 = vertical + reverse: reverse sort order + """ + h, w = img.shape[:2] + + # Rotate for vertical sorting + if 45 <= (angle % 180) <= 135: + frame = np.transpose(img, (1, 0, 2)) + h, w = frame.shape[:2] + rotated = True + else: + frame = img + rotated = False + + result = frame.copy() + + # Get sort values + if sort_by == "lightness": + sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + elif sort_by == "hue": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + sort_values = hsv[:, :, 0].astype(np.float32) + elif sort_by == "saturation": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + sort_values = hsv[:, :, 1].astype(np.float32) + elif sort_by == "red": + sort_values = frame[:, :, 0].astype(np.float32) + elif sort_by == "green": + sort_values = frame[:, :, 1].astype(np.float32) + elif sort_by == "blue": + sort_values = frame[:, :, 2].astype(np.float32) + else: + sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + + # Create mask + mask = (sort_values >= threshold_low) & (sort_values <= threshold_high) + + # Sort each row + for y in range(h): + row = result[y].copy() + row_mask = mask[y] + row_values = sort_values[y] + + # Find contiguous segments + segments = [] + start = None + for i, val in enumerate(row_mask): + if val and start is None: + start = i + elif not val and start is not None: + segments.append((start, i)) + start = None + if start is not None: + segments.append((start, len(row_mask))) + + # Sort each segment + for seg_start, seg_end in segments: + if seg_end - seg_start > 1: + segment_values = row_values[seg_start:seg_end] + sort_indices = np.argsort(segment_values) + if reverse: + sort_indices = sort_indices[::-1] + row[seg_start:seg_end] = row[seg_start:seg_end][sort_indices] + + result[y] = row + + # Rotate back + if rotated: + result = np.transpose(result, (1, 0, 2)) + + return np.ascontiguousarray(result) + + +def prim_datamosh(img: np.ndarray, prev_frame: np.ndarray, + block_size: int = 32, corruption: float = 0.3, + max_offset: int = 50, color_corrupt: bool = True) -> np.ndarray: + """ + Datamosh/glitch block corruption effect. + + Args: + img: current frame + prev_frame: previous frame (or None) + block_size: size of corruption blocks + corruption: probability 0-1 of corrupting each block + max_offset: maximum pixel shift + color_corrupt: also apply color channel shifts + """ + if corruption <= 0: + return img.copy() + + block_size = max(8, min(int(block_size), 128)) + h, w = img.shape[:2] + result = img.copy() + + for by in range(0, h, block_size): + for bx in range(0, w, block_size): + bh = min(block_size, h - by) + bw = min(block_size, w - bx) + + if _rng.random() < corruption: + corruption_type = _rng.randint(0, 3) + + if corruption_type == 0 and max_offset > 0: + # Shift + ox = _rng.randint(-max_offset, max_offset) + oy = _rng.randint(-max_offset, max_offset) + src_x = max(0, min(bx + ox, w - bw)) + src_y = max(0, min(by + oy, h - bh)) + result[by:by+bh, bx:bx+bw] = img[src_y:src_y+bh, src_x:src_x+bw] + + elif corruption_type == 1 and prev_frame is not None: + # Duplicate from previous frame + if prev_frame.shape == img.shape: + result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw] + + elif corruption_type == 2 and color_corrupt: + # Color channel shift + block = result[by:by+bh, bx:bx+bw].copy() + shift = _rng.randint(1, 3) + channel = _rng.randint(0, 2) + block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0) + result[by:by+bh, bx:bx+bw] = block + + else: + # Swap with another block + other_bx = _rng.randint(0, max(0, w - bw)) + other_by = _rng.randint(0, max(0, h - bh)) + temp = result[by:by+bh, bx:bx+bw].copy() + result[by:by+bh, bx:bx+bw] = img[other_by:other_by+bh, other_bx:other_bx+bw] + result[other_by:other_by+bh, other_bx:other_bx+bw] = temp + + return result + + +def prim_ripple_displace(w: int, h: int, freq: float, amp: float, cx: float = None, cy: float = None, + decay: float = 0, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create radial ripple displacement maps. + + Args: + w, h: dimensions + freq: ripple frequency + amp: ripple amplitude in pixels + cx, cy: center + decay: how fast ripples decay with distance (0 = no decay) + phase: phase offset + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + + # Calculate ripple displacement (radial) + ripple = np.sin(2 * np.pi * freq * dist / max(w, h) + phase) * amp + + # Apply decay + if decay > 0: + ripple = ripple * np.exp(-dist * decay / max(w, h)) + + # Displace along radial direction + with np.errstate(divide='ignore', invalid='ignore'): + norm_dx = np.where(dist > 0, dx / dist, 0) + norm_dy = np.where(dist > 0, dy / dist, 0) + + map_x = (x_coords + ripple * norm_dx).astype(np.float32) + map_y = (y_coords + ripple * norm_dy).astype(np.float32) + + return (map_x, map_y) + + +PRIMITIVES = { + # Arithmetic + '+': prim_add, + '-': prim_sub, + '*': prim_mul, + '/': prim_div, + + # Comparison + '<': prim_lt, + '>': prim_gt, + '<=': prim_le, + '>=': prim_ge, + '=': prim_eq, + '!=': prim_ne, + + # Image + 'width': prim_width, + 'height': prim_height, + 'make-image': prim_make_image, + 'copy': prim_copy, + 'pixel': prim_pixel, + 'set-pixel': prim_set_pixel, + 'sample': prim_sample, + 'channel': prim_channel, + 'merge-channels': prim_merge_channels, + 'resize': prim_resize, + 'crop': prim_crop, + 'paste': prim_paste, + + # Color + 'rgb': prim_rgb, + 'red': prim_red, + 'green': prim_green, + 'blue': prim_blue, + 'luminance': prim_luminance, + 'rgb->hsv': prim_rgb_to_hsv, + 'hsv->rgb': prim_hsv_to_rgb, + 'blend-color': prim_blend_color, + 'average-color': prim_average_color, + + # Vectorized bulk operations + 'color-matrix': prim_color_matrix, + 'adjust': prim_adjust, + 'mix-gray': prim_mix_gray, + 'invert-img': prim_invert_img, + 'add-noise': prim_add_noise, + 'quantize': prim_quantize, + 'shift-hsv': prim_shift_hsv, + + # Bulk operations + 'map-pixels': prim_map_pixels, + 'map-rows': prim_map_rows, + 'for-grid': prim_for_grid, + 'fold-pixels': prim_fold_pixels, + + # Filters + 'convolve': prim_convolve, + 'blur': prim_blur, + 'box-blur': prim_box_blur, + 'edges': prim_edges, + 'sobel': prim_sobel, + 'dilate': prim_dilate, + 'erode': prim_erode, + + # Geometry + 'translate': prim_translate, + 'rotate-img': prim_rotate, + 'scale-img': prim_scale, + 'flip-h': prim_flip_h, + 'flip-v': prim_flip_v, + 'remap': prim_remap, + 'make-coords': prim_make_coords, + + # Blending + 'blend-images': prim_blend_images, + 'blend-mode': prim_blend_mode, + 'mask': prim_mask, + + # Drawing + 'draw-char': prim_draw_char, + 'draw-text': prim_draw_text, + 'fill-rect': prim_fill_rect, + 'draw-line': prim_draw_line, + + # Math + 'sin': prim_sin, + 'cos': prim_cos, + 'tan': prim_tan, + 'atan2': prim_atan2, + 'sqrt': prim_sqrt, + 'pow': prim_pow, + 'abs': prim_abs, + 'floor': prim_floor, + 'ceil': prim_ceil, + 'round': prim_round, + 'min': prim_min, + 'max': prim_max, + 'clamp': prim_clamp, + 'lerp': prim_lerp, + 'mod': prim_mod, + 'random': prim_random, + 'randint': prim_randint, + 'gaussian': prim_gaussian, + 'assert': prim_assert, + 'pi': math.pi, + 'tau': math.tau, + + # Array + 'length': prim_length, + 'len': prim_length, # alias + 'nth': prim_nth, + 'first': prim_first, + 'rest': prim_rest, + 'take': prim_take, + 'drop': prim_drop, + 'cons': prim_cons, + 'append': prim_append, + 'reverse': prim_reverse, + 'range': prim_range, + 'roll': prim_roll, + 'list': prim_list, + + # Array math (vectorized operations on coordinate arrays) + 'arr+': prim_arr_add, + 'arr-': prim_arr_sub, + 'arr*': prim_arr_mul, + 'arr/': prim_arr_div, + 'arr-mod': prim_arr_mod, + 'arr-sin': prim_arr_sin, + 'arr-cos': prim_arr_cos, + 'arr-tan': prim_arr_tan, + 'arr-sqrt': prim_arr_sqrt, + 'arr-pow': prim_arr_pow, + 'arr-abs': prim_arr_abs, + 'arr-neg': prim_arr_neg, + 'arr-exp': prim_arr_exp, + 'arr-atan2': prim_arr_atan2, + 'arr-min': prim_arr_min, + 'arr-max': prim_arr_max, + 'arr-clip': prim_arr_clip, + 'arr-where': prim_arr_where, + 'arr-floor': prim_arr_floor, + 'arr-lerp': prim_arr_lerp, + + # Coordinate transformations + 'polar-from-center': prim_polar_from_center, + 'cart-from-polar': prim_cart_from_polar, + 'normalize-coords': prim_normalize_coords, + 'coords-x': prim_coords_x, + 'coords-y': prim_coords_y, + 'make-coords-centered': prim_make_coords_centered, + + # Specialized distortion maps + 'wave-displace': prim_wave_displace, + 'swirl-displace': prim_swirl_displace, + 'fisheye-displace': prim_fisheye_displace, + 'kaleidoscope-displace': prim_kaleidoscope_displace, + 'ripple-displace': prim_ripple_displace, + + # Character/ASCII art + 'cell-sample': prim_cell_sample, + 'cell-sample-extended': cell_sample_extended, + 'luminance-to-chars': prim_luminance_to_chars, + 'render-char-grid': prim_render_char_grid, + 'render-char-grid-fx': prim_render_char_grid_fx, + 'ascii-fx-zone': prim_ascii_fx_zone, + 'make-char-grid': prim_make_char_grid, + 'set-char': prim_set_char, + 'get-char': prim_get_char, + 'char-grid-dimensions': prim_char_grid_dimensions, + 'alphabet-char': prim_alphabet_char, + 'alphabet-length': prim_alphabet_length, + 'map-char-grid': prim_map_char_grid, + 'map-colors': prim_map_colors, + + # Glitch art + 'pixelsort': prim_pixelsort, + 'datamosh': prim_datamosh, + +} diff --git a/sexp_effects/test_interpreter.py b/sexp_effects/test_interpreter.py new file mode 100644 index 0000000..550b21a --- /dev/null +++ b/sexp_effects/test_interpreter.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +""" +Test the S-expression effect interpreter. +""" + +import numpy as np +import sys +from pathlib import Path + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from sexp_effects import ( + get_interpreter, + load_effects_dir, + run_effect, + list_effects, + parse, +) + + +def test_parser(): + """Test S-expression parser.""" + print("Testing parser...") + + # Simple expressions + assert parse("42") == 42 + assert parse("3.14") == 3.14 + assert parse('"hello"') == "hello" + assert parse("true") == True + + # Lists + assert parse("(+ 1 2)")[0].name == "+" + assert parse("(+ 1 2)")[1] == 1 + + # Nested + expr = parse("(define x (+ 1 2))") + assert expr[0].name == "define" + + print(" Parser OK") + + +def test_interpreter_basics(): + """Test basic interpreter operations.""" + print("Testing interpreter basics...") + + interp = get_interpreter() + + # Math + assert interp.eval(parse("(+ 1 2)")) == 3 + assert interp.eval(parse("(* 3 4)")) == 12 + assert interp.eval(parse("(- 10 3)")) == 7 + + # Comparison + assert interp.eval(parse("(< 1 2)")) == True + assert interp.eval(parse("(> 1 2)")) == False + + # Let binding + assert interp.eval(parse("(let ((x 5)) x)")) == 5 + assert interp.eval(parse("(let ((x 5) (y 3)) (+ x y))")) == 8 + + # Lambda + result = interp.eval(parse("((lambda (x) (* x 2)) 5)")) + assert result == 10 + + # If + assert interp.eval(parse("(if true 1 2)")) == 1 + assert interp.eval(parse("(if false 1 2)")) == 2 + + print(" Interpreter basics OK") + + +def test_primitives(): + """Test image primitives.""" + print("Testing primitives...") + + interp = get_interpreter() + + # Create test image + img = np.zeros((100, 100, 3), dtype=np.uint8) + img[50, 50] = [255, 128, 64] + + interp.global_env.set('test_img', img) + + # Width/height + assert interp.eval(parse("(width test_img)")) == 100 + assert interp.eval(parse("(height test_img)")) == 100 + + # Pixel + pixel = interp.eval(parse("(pixel test_img 50 50)")) + assert pixel == [255, 128, 64] + + # RGB + color = interp.eval(parse("(rgb 100 150 200)")) + assert color == [100, 150, 200] + + # Luminance + lum = interp.eval(parse("(luminance (rgb 100 100 100))")) + assert abs(lum - 100) < 1 + + print(" Primitives OK") + + +def test_effect_loading(): + """Test loading effects from .sexp files.""" + print("Testing effect loading...") + + # Load all effects + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + effects = list_effects() + print(f" Loaded {len(effects)} effects: {', '.join(sorted(effects))}") + + assert len(effects) > 0 + print(" Effect loading OK") + + +def test_effect_execution(): + """Test running effects on images.""" + print("Testing effect execution...") + + # Create test image + img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) + + # Load effects + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + # Test each effect + effects = list_effects() + passed = 0 + failed = [] + + for name in sorted(effects): + try: + result, state = run_effect(name, img.copy(), {'_time': 0.5}, {}) + assert isinstance(result, np.ndarray) + assert result.shape == img.shape + passed += 1 + print(f" {name}: OK") + except Exception as e: + failed.append((name, str(e))) + print(f" {name}: FAILED - {e}") + + print(f" Passed: {passed}/{len(effects)}") + if failed: + print(f" Failed: {[f[0] for f in failed]}") + + return passed, failed + + +def test_ascii_fx_zone(): + """Test ascii_fx_zone effect with zone expressions.""" + print("Testing ascii_fx_zone...") + + interp = get_interpreter() + + # Load the effect + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + # Create gradient test frame + frame = np.zeros((120, 160, 3), dtype=np.uint8) + for x in range(160): + frame[:, x] = int(x / 160 * 255) + frame = np.stack([frame[:,:,0]]*3, axis=2) + + # Test 1: Basic without expressions + result, _ = run_effect('ascii_fx_zone', frame, {'cols': 20}, {}) + assert result.shape == frame.shape + print(" Basic run: OK") + + # Test 2: With zone-lum expression + expr = parse('(* zone-lum 180)') + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_hue': expr + }, {}) + assert result.shape == frame.shape + print(" Zone-lum expression: OK") + + # Test 3: With multiple expressions + scale_expr = parse('(+ 0.5 (* zone-lum 0.5))') + rot_expr = parse('(* zone-row-norm 30)') + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_scale': scale_expr, + 'char_rotation': rot_expr + }, {}) + assert result.shape == frame.shape + print(" Multiple expressions: OK") + + # Test 4: With numeric literals + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_hue': 90, + 'char_scale': 1.2 + }, {}) + assert result.shape == frame.shape + print(" Numeric literals: OK") + + # Test 5: Zone position expressions + col_expr = parse('(* zone-col-norm 360)') + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_hue': col_expr + }, {}) + assert result.shape == frame.shape + print(" Zone position expression: OK") + + print(" ascii_fx_zone OK") + + +def main(): + print("=" * 60) + print("S-Expression Effect Interpreter Tests") + print("=" * 60) + + test_parser() + test_interpreter_basics() + test_primitives() + test_effect_loading() + test_ascii_fx_zone() + passed, failed = test_effect_execution() + + print("=" * 60) + if not failed: + print("All tests passed!") + else: + print(f"Tests completed with {len(failed)} failures") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/streaming/__init__.py b/streaming/__init__.py new file mode 100644 index 0000000..2c007cc --- /dev/null +++ b/streaming/__init__.py @@ -0,0 +1,44 @@ +""" +Streaming video compositor for real-time effect processing. + +This module provides a frame-by-frame streaming architecture that: +- Reads from multiple video sources with automatic looping +- Applies effects inline (no intermediate files) +- Composites layers with time-varying weights +- Outputs to display, file, or stream + +Usage: + from streaming import StreamingCompositor, VideoSource, AudioAnalyzer + + compositor = StreamingCompositor( + sources=["video1.mp4", "video2.mp4"], + effects_per_source=[...], + compositor_config={...}, + ) + + # With live audio + audio = AudioAnalyzer(device=0) + compositor.run(output="output.mp4", duration=60, audio=audio) + + # With preview window + compositor.run(output="preview", duration=60) + +Backends: + - numpy: Works everywhere, ~3-5 fps (default) + - glsl: Requires GPU, 30+ fps real-time (future) +""" + +from .sources import VideoSource, ImageSource +from .compositor import StreamingCompositor +from .backends import NumpyBackend, get_backend +from .output import DisplayOutput, FileOutput + +__all__ = [ + "StreamingCompositor", + "VideoSource", + "ImageSource", + "NumpyBackend", + "get_backend", + "DisplayOutput", + "FileOutput", +] diff --git a/streaming/audio.py b/streaming/audio.py new file mode 100644 index 0000000..9d20937 --- /dev/null +++ b/streaming/audio.py @@ -0,0 +1,486 @@ +""" +Live audio analysis for reactive effects. + +Provides real-time audio features: +- Energy (RMS amplitude) +- Beat detection +- Frequency bands (bass, mid, high) +""" + +import numpy as np +from typing import Optional +import threading +import time + + +class AudioAnalyzer: + """ + Real-time audio analyzer using sounddevice. + + Captures audio from microphone/line-in and computes + features in real-time for effect parameter bindings. + + Example: + analyzer = AudioAnalyzer(device=0) + analyzer.start() + + # In compositor loop: + energy = analyzer.get_energy() + beat = analyzer.get_beat() + + analyzer.stop() + """ + + def __init__( + self, + device: int = None, + sample_rate: int = 44100, + block_size: int = 1024, + buffer_seconds: float = 0.5, + ): + """ + Initialize audio analyzer. + + Args: + device: Audio input device index (None = default) + sample_rate: Audio sample rate + block_size: Samples per block + buffer_seconds: Ring buffer duration + """ + self.sample_rate = sample_rate + self.block_size = block_size + self.device = device + + # Ring buffer for recent audio + buffer_size = int(sample_rate * buffer_seconds) + self._buffer = np.zeros(buffer_size, dtype=np.float32) + self._buffer_pos = 0 + self._lock = threading.Lock() + + # Beat detection state + self._last_energy = 0 + self._energy_history = [] + self._last_beat_time = 0 + self._beat_threshold = 1.5 # Energy ratio for beat detection + self._min_beat_interval = 0.1 # Min seconds between beats + + # Stream state + self._stream = None + self._running = False + + def _audio_callback(self, indata, frames, time_info, status): + """Called by sounddevice for each audio block.""" + with self._lock: + # Add to ring buffer + data = indata[:, 0] if len(indata.shape) > 1 else indata + n = len(data) + if self._buffer_pos + n <= len(self._buffer): + self._buffer[self._buffer_pos:self._buffer_pos + n] = data + else: + # Wrap around + first = len(self._buffer) - self._buffer_pos + self._buffer[self._buffer_pos:] = data[:first] + self._buffer[:n - first] = data[first:] + self._buffer_pos = (self._buffer_pos + n) % len(self._buffer) + + def start(self): + """Start audio capture.""" + try: + import sounddevice as sd + except ImportError: + print("Warning: sounddevice not installed. Audio analysis disabled.") + print("Install with: pip install sounddevice") + return + + self._stream = sd.InputStream( + device=self.device, + channels=1, + samplerate=self.sample_rate, + blocksize=self.block_size, + callback=self._audio_callback, + ) + self._stream.start() + self._running = True + + def stop(self): + """Stop audio capture.""" + if self._stream: + self._stream.stop() + self._stream.close() + self._stream = None + self._running = False + + def get_energy(self) -> float: + """ + Get current audio energy (RMS amplitude). + + Returns: + Energy value normalized to 0-1 range (approximately) + """ + with self._lock: + # Use recent samples + recent = 2048 + if self._buffer_pos >= recent: + data = self._buffer[self._buffer_pos - recent:self._buffer_pos] + else: + data = np.concatenate([ + self._buffer[-(recent - self._buffer_pos):], + self._buffer[:self._buffer_pos] + ]) + + # RMS energy + rms = np.sqrt(np.mean(data ** 2)) + + # Normalize (typical mic input is quite low) + normalized = min(1.0, rms * 10) + + return normalized + + def get_beat(self) -> bool: + """ + Detect if current moment is a beat. + + Simple onset detection based on energy spikes. + + Returns: + True if beat detected, False otherwise + """ + current_energy = self.get_energy() + now = time.time() + + # Update energy history + self._energy_history.append(current_energy) + if len(self._energy_history) > 20: + self._energy_history.pop(0) + + # Need enough history + if len(self._energy_history) < 5: + self._last_energy = current_energy + return False + + # Average recent energy + avg_energy = np.mean(self._energy_history[:-1]) + + # Beat if current energy is significantly above average + is_beat = ( + current_energy > avg_energy * self._beat_threshold and + now - self._last_beat_time > self._min_beat_interval and + current_energy > self._last_energy # Rising edge + ) + + if is_beat: + self._last_beat_time = now + + self._last_energy = current_energy + return is_beat + + def get_spectrum(self, bands: int = 3) -> np.ndarray: + """ + Get frequency spectrum divided into bands. + + Args: + bands: Number of frequency bands (default 3: bass, mid, high) + + Returns: + Array of band energies, normalized to 0-1 + """ + with self._lock: + # Use recent samples for FFT + n = 2048 + if self._buffer_pos >= n: + data = self._buffer[self._buffer_pos - n:self._buffer_pos] + else: + data = np.concatenate([ + self._buffer[-(n - self._buffer_pos):], + self._buffer[:self._buffer_pos] + ]) + + # FFT + fft = np.abs(np.fft.rfft(data * np.hanning(len(data)))) + + # Divide into bands + band_size = len(fft) // bands + result = np.zeros(bands) + for i in range(bands): + start = i * band_size + end = start + band_size + result[i] = np.mean(fft[start:end]) + + # Normalize + max_val = np.max(result) + if max_val > 0: + result = result / max_val + + return result + + @property + def is_running(self) -> bool: + return self._running + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args): + self.stop() + + +class FileAudioAnalyzer: + """ + Audio analyzer that reads from a file (for testing/development). + + Pre-computes analysis and plays back in sync with video. + """ + + def __init__(self, path: str, analysis_data: dict = None): + """ + Initialize from audio file. + + Args: + path: Path to audio file + analysis_data: Pre-computed analysis (times, values, etc.) + """ + self.path = path + self.analysis_data = analysis_data or {} + self._current_time = 0 + + def set_time(self, t: float): + """Set current playback time.""" + self._current_time = t + + def get_energy(self) -> float: + """Get energy at current time from pre-computed data.""" + track = self.analysis_data.get("energy", {}) + return self._interpolate(track, self._current_time) + + def get_beat(self) -> bool: + """Check if current time is near a beat.""" + track = self.analysis_data.get("beats", {}) + times = track.get("times", []) + + # Check if we're within 50ms of a beat + for beat_time in times: + if abs(beat_time - self._current_time) < 0.05: + return True + return False + + def _interpolate(self, track: dict, t: float) -> float: + """Interpolate value at time t.""" + times = track.get("times", []) + values = track.get("values", []) + + if not times or not values: + return 0.0 + + if t <= times[0]: + return values[0] + if t >= times[-1]: + return values[-1] + + # Find bracket and interpolate + for i in range(len(times) - 1): + if times[i] <= t <= times[i + 1]: + alpha = (t - times[i]) / (times[i + 1] - times[i]) + return values[i] * (1 - alpha) + values[i + 1] * alpha + + return values[-1] + + @property + def is_running(self) -> bool: + return True + + +class StreamingAudioAnalyzer: + """ + Real-time audio analyzer that streams from a file. + + Reads audio in sync with video time and computes features on-the-fly. + No pre-computation needed - analysis happens as frames are processed. + """ + + def __init__(self, path: str, sample_rate: int = 22050, hop_length: int = 512): + """ + Initialize streaming audio analyzer. + + Args: + path: Path to audio file + sample_rate: Sample rate for analysis + hop_length: Hop length for feature extraction + """ + import subprocess + import json + + self.path = path + self.sample_rate = sample_rate + self.hop_length = hop_length + self._current_time = 0.0 + + # Get audio duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(path)] + result = subprocess.run(cmd, capture_output=True, text=True) + info = json.loads(result.stdout) + self.duration = float(info["format"]["duration"]) + + # Audio buffer and state + self._audio_data = None + self._energy_history = [] + self._last_energy = 0 + self._last_beat_time = -1 + self._beat_threshold = 1.5 + self._min_beat_interval = 0.15 + + # Load audio lazily + self._loaded = False + + def _load_audio(self): + """Load audio data on first use.""" + if self._loaded: + return + + import subprocess + + # Use ffmpeg to decode audio to raw PCM + cmd = [ + "ffmpeg", "-v", "quiet", + "-i", str(self.path), + "-f", "f32le", # 32-bit float, little-endian + "-ac", "1", # mono + "-ar", str(self.sample_rate), + "-" + ] + result = subprocess.run(cmd, capture_output=True) + self._audio_data = np.frombuffer(result.stdout, dtype=np.float32) + self._loaded = True + + def set_time(self, t: float): + """Set current playback time.""" + self._current_time = t + + def get_energy(self) -> float: + """Compute energy at current time.""" + self._load_audio() + + if self._audio_data is None or len(self._audio_data) == 0: + return 0.0 + + # Get sample index for current time + sample_idx = int(self._current_time * self.sample_rate) + window_size = self.hop_length * 2 + + start = max(0, sample_idx - window_size // 2) + end = min(len(self._audio_data), sample_idx + window_size // 2) + + if start >= end: + return 0.0 + + # RMS energy + chunk = self._audio_data[start:end] + rms = np.sqrt(np.mean(chunk ** 2)) + + # Normalize to 0-1 range (approximate) + energy = min(1.0, rms * 3.0) + + self._last_energy = energy + return energy + + def get_beat(self) -> bool: + """Detect beat using spectral flux (change in frequency content).""" + self._load_audio() + + if self._audio_data is None or len(self._audio_data) == 0: + return False + + # Get audio chunks for current and previous frame + sample_idx = int(self._current_time * self.sample_rate) + chunk_size = self.hop_length * 2 + + # Current chunk + start = max(0, sample_idx - chunk_size // 2) + end = min(len(self._audio_data), sample_idx + chunk_size // 2) + if end - start < chunk_size // 2: + return False + current_chunk = self._audio_data[start:end] + + # Previous chunk (one hop back) + prev_start = max(0, start - self.hop_length) + prev_end = max(0, end - self.hop_length) + if prev_end <= prev_start: + return False + prev_chunk = self._audio_data[prev_start:prev_end] + + # Compute spectra + current_spec = np.abs(np.fft.rfft(current_chunk * np.hanning(len(current_chunk)))) + prev_spec = np.abs(np.fft.rfft(prev_chunk * np.hanning(len(prev_chunk)))) + + # Spectral flux: sum of positive differences (onset = new frequencies appearing) + min_len = min(len(current_spec), len(prev_spec)) + diff = current_spec[:min_len] - prev_spec[:min_len] + flux = np.sum(np.maximum(0, diff)) # Only count increases + + # Normalize by spectrum size + flux = flux / (min_len + 1) + + # Update flux history + self._energy_history.append((self._current_time, flux)) + while self._energy_history and self._energy_history[0][0] < self._current_time - 1.5: + self._energy_history.pop(0) + + if len(self._energy_history) < 3: + return False + + # Adaptive threshold based on recent flux values + flux_values = [f for t, f in self._energy_history] + mean_flux = np.mean(flux_values) + std_flux = np.std(flux_values) + 0.001 # Avoid division by zero + + # Beat if flux is above mean (more sensitive threshold) + threshold = mean_flux + std_flux * 0.3 # Lower = more sensitive + min_interval = 0.1 # Allow up to 600 BPM + time_ok = self._current_time - self._last_beat_time > min_interval + + is_beat = flux > threshold and time_ok + + if is_beat: + self._last_beat_time = self._current_time + + return is_beat + + def get_spectrum(self, bands: int = 3) -> np.ndarray: + """Get frequency spectrum at current time.""" + self._load_audio() + + if self._audio_data is None or len(self._audio_data) == 0: + return np.zeros(bands) + + sample_idx = int(self._current_time * self.sample_rate) + n = 2048 + + start = max(0, sample_idx - n // 2) + end = min(len(self._audio_data), sample_idx + n // 2) + + if end - start < n // 2: + return np.zeros(bands) + + chunk = self._audio_data[start:end] + + # FFT + fft = np.abs(np.fft.rfft(chunk * np.hanning(len(chunk)))) + + # Divide into bands + band_size = len(fft) // bands + result = np.zeros(bands) + for i in range(bands): + s, e = i * band_size, (i + 1) * band_size + result[i] = np.mean(fft[s:e]) + + # Normalize + max_val = np.max(result) + if max_val > 0: + result = result / max_val + + return result + + @property + def is_running(self) -> bool: + return True diff --git a/streaming/backends.py b/streaming/backends.py new file mode 100644 index 0000000..bc695d6 --- /dev/null +++ b/streaming/backends.py @@ -0,0 +1,308 @@ +""" +Effect processing backends. + +Provides abstraction over different rendering backends: +- numpy: CPU-based, works everywhere, ~3-5 fps +- glsl: GPU-based, requires OpenGL, 30+ fps (future) +""" + +import numpy as np +from abc import ABC, abstractmethod +from typing import List, Dict, Any, Optional +from pathlib import Path + + +class Backend(ABC): + """Abstract base class for effect processing backends.""" + + @abstractmethod + def process_frame( + self, + frames: List[np.ndarray], + effects_per_frame: List[List[Dict]], + compositor_config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """ + Process multiple input frames through effects and composite. + + Args: + frames: List of input frames (one per source) + effects_per_frame: List of effect chains (one per source) + compositor_config: How to blend the layers + t: Current time in seconds + analysis_data: Analysis data for binding resolution + + Returns: + Composited output frame + """ + pass + + @abstractmethod + def load_effect(self, effect_path: Path) -> Any: + """Load an effect definition.""" + pass + + +class NumpyBackend(Backend): + """ + CPU-based effect processing using NumPy. + + Uses existing sexp_effects interpreter for effect execution. + Works on any system, but limited to ~3-5 fps for complex effects. + """ + + def __init__(self, recipe_dir: Path = None, minimal_primitives: bool = True): + self.recipe_dir = recipe_dir or Path(".") + self.minimal_primitives = minimal_primitives + self._interpreter = None + self._loaded_effects = {} + + def _get_interpreter(self): + """Lazy-load the sexp interpreter.""" + if self._interpreter is None: + from sexp_effects import get_interpreter + self._interpreter = get_interpreter(minimal_primitives=self.minimal_primitives) + return self._interpreter + + def load_effect(self, effect_path: Path) -> Any: + """Load an effect from sexp file.""" + effect_key = str(effect_path) + if effect_key not in self._loaded_effects: + interp = self._get_interpreter() + interp.load_effect(str(effect_path)) + self._loaded_effects[effect_key] = effect_path.stem + return self._loaded_effects[effect_key] + + def _resolve_binding(self, value: Any, t: float, analysis_data: Dict) -> Any: + """Resolve a parameter binding to its value at time t.""" + if not isinstance(value, dict): + return value + + if "_binding" in value or "_bind" in value: + source = value.get("source") or value.get("_bind") + feature = value.get("feature", "values") + range_map = value.get("range") + + track = analysis_data.get(source, {}) + times = track.get("times", []) + values = track.get("values", []) + + if not times or not values: + return 0.0 + + # Find value at time t (linear interpolation) + if t <= times[0]: + val = values[0] + elif t >= times[-1]: + val = values[-1] + else: + # Binary search for bracket + for i in range(len(times) - 1): + if times[i] <= t <= times[i + 1]: + alpha = (t - times[i]) / (times[i + 1] - times[i]) + val = values[i] * (1 - alpha) + values[i + 1] * alpha + break + else: + val = values[-1] + + # Apply range mapping + if range_map and len(range_map) == 2: + val = range_map[0] + val * (range_map[1] - range_map[0]) + + return val + + return value + + def _apply_effect( + self, + frame: np.ndarray, + effect_name: str, + params: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """Apply a single effect to a frame.""" + # Resolve bindings in params + resolved_params = {"_time": t} + for key, value in params.items(): + if key in ("effect", "effect_path", "cid", "analysis_refs"): + continue + resolved_params[key] = self._resolve_binding(value, t, analysis_data) + + # Try fast native effects first + result = self._apply_native_effect(frame, effect_name, resolved_params) + if result is not None: + return result + + # Fall back to sexp interpreter for complex effects + interp = self._get_interpreter() + if effect_name in interp.effects: + result, _ = interp.run_effect(effect_name, frame, resolved_params, {}) + return result + + # Unknown effect - pass through + return frame + + def _apply_native_effect( + self, + frame: np.ndarray, + effect_name: str, + params: Dict, + ) -> Optional[np.ndarray]: + """Fast native numpy effects for real-time streaming.""" + import cv2 + + if effect_name == "zoom": + amount = float(params.get("amount", 1.0)) + if abs(amount - 1.0) < 0.01: + return frame + h, w = frame.shape[:2] + # Crop center and resize + new_w, new_h = int(w / amount), int(h / amount) + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = frame[y1:y1+new_h, x1:x1+new_w] + return cv2.resize(cropped, (w, h)) + + elif effect_name == "rotate": + angle = float(params.get("angle", 0)) + if abs(angle) < 0.5: + return frame + h, w = frame.shape[:2] + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + return cv2.warpAffine(frame, matrix, (w, h)) + + elif effect_name == "brightness": + amount = float(params.get("amount", 1.0)) + return np.clip(frame * amount, 0, 255).astype(np.uint8) + + elif effect_name == "invert": + amount = float(params.get("amount", 1.0)) + if amount < 0.5: + return frame + return 255 - frame + + # Not a native effect + return None + + def process_frame( + self, + frames: List[np.ndarray], + effects_per_frame: List[List[Dict]], + compositor_config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """ + Process frames through effects and composite. + """ + if not frames: + return np.zeros((720, 1280, 3), dtype=np.uint8) + + processed = [] + + # Apply effects to each input frame + for i, (frame, effects) in enumerate(zip(frames, effects_per_frame)): + result = frame.copy() + for effect_config in effects: + effect_name = effect_config.get("effect", "") + if effect_name: + result = self._apply_effect( + result, effect_name, effect_config, t, analysis_data + ) + processed.append(result) + + # Composite layers + if len(processed) == 1: + return processed[0] + + return self._composite(processed, compositor_config, t, analysis_data) + + def _composite( + self, + frames: List[np.ndarray], + config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """Composite multiple frames into one.""" + mode = config.get("mode", "alpha") + weights = config.get("weights", [1.0 / len(frames)] * len(frames)) + + # Resolve weight bindings + resolved_weights = [] + for w in weights: + resolved_weights.append(self._resolve_binding(w, t, analysis_data)) + + # Normalize weights + total = sum(resolved_weights) + if total > 0: + resolved_weights = [w / total for w in resolved_weights] + else: + resolved_weights = [1.0 / len(frames)] * len(frames) + + # Resize frames to match first frame + target_h, target_w = frames[0].shape[:2] + resized = [] + for frame in frames: + if frame.shape[:2] != (target_h, target_w): + import cv2 + frame = cv2.resize(frame, (target_w, target_h)) + resized.append(frame.astype(np.float32)) + + # Weighted blend + result = np.zeros_like(resized[0]) + for frame, weight in zip(resized, resolved_weights): + result += frame * weight + + return np.clip(result, 0, 255).astype(np.uint8) + + +class GLSLBackend(Backend): + """ + GPU-based effect processing using OpenGL/GLSL. + + Requires GPU with OpenGL 3.3+ support (or Mesa software renderer). + Achieves 30+ fps real-time processing. + + TODO: Implement when ready for GPU acceleration. + """ + + def __init__(self): + raise NotImplementedError( + "GLSL backend not yet implemented. Use NumpyBackend for now." + ) + + def load_effect(self, effect_path: Path) -> Any: + pass + + def process_frame( + self, + frames: List[np.ndarray], + effects_per_frame: List[List[Dict]], + compositor_config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + pass + + +def get_backend(name: str = "numpy", **kwargs) -> Backend: + """ + Get a backend by name. + + Args: + name: "numpy" or "glsl" + **kwargs: Backend-specific options + + Returns: + Backend instance + """ + if name == "numpy": + return NumpyBackend(**kwargs) + elif name == "glsl": + return GLSLBackend(**kwargs) + else: + raise ValueError(f"Unknown backend: {name}") diff --git a/streaming/compositor.py b/streaming/compositor.py new file mode 100644 index 0000000..477128f --- /dev/null +++ b/streaming/compositor.py @@ -0,0 +1,595 @@ +""" +Streaming video compositor. + +Main entry point for the streaming pipeline. Combines: +- Multiple video sources (with looping) +- Per-source effect chains +- Layer compositing +- Optional live audio analysis +- Output to display/file/stream +""" + +import time +import sys +import numpy as np +from typing import List, Dict, Any, Optional, Union +from pathlib import Path + +from .sources import Source, VideoSource +from .backends import Backend, NumpyBackend, get_backend +from .output import Output, DisplayOutput, FileOutput, MultiOutput + + +class StreamingCompositor: + """ + Real-time streaming video compositor. + + Reads frames from multiple sources, applies effects, composites layers, + and outputs the result - all frame-by-frame without intermediate files. + + Example: + compositor = StreamingCompositor( + sources=["video1.mp4", "video2.mp4"], + effects_per_source=[ + [{"effect": "rotate", "angle": 45}], + [{"effect": "zoom", "amount": 1.5}], + ], + compositor_config={"mode": "alpha", "weights": [0.5, 0.5]}, + ) + compositor.run(output="preview", duration=60) + """ + + def __init__( + self, + sources: List[Union[str, Source]], + effects_per_source: List[List[Dict]] = None, + compositor_config: Dict = None, + analysis_data: Dict = None, + backend: str = "numpy", + recipe_dir: Path = None, + fps: float = 30, + audio_source: str = None, + ): + """ + Initialize the streaming compositor. + + Args: + sources: List of video paths or Source objects + effects_per_source: List of effect chains, one per source + compositor_config: How to blend layers (mode, weights) + analysis_data: Pre-computed analysis data for bindings + backend: "numpy" or "glsl" + recipe_dir: Directory for resolving relative effect paths + fps: Output frame rate + audio_source: Path to audio file for streaming analysis + """ + self.fps = fps + self.recipe_dir = recipe_dir or Path(".") + self.analysis_data = analysis_data or {} + + # Initialize streaming audio analyzer if audio source provided + self._audio_analyzer = None + self._audio_source = audio_source + if audio_source: + from .audio import StreamingAudioAnalyzer + self._audio_analyzer = StreamingAudioAnalyzer(audio_source) + print(f"Streaming audio: {audio_source}", file=sys.stderr) + + # Initialize sources + self.sources: List[Source] = [] + for src in sources: + if isinstance(src, Source): + self.sources.append(src) + elif isinstance(src, (str, Path)): + self.sources.append(VideoSource(str(src), target_fps=fps)) + else: + raise ValueError(f"Unknown source type: {type(src)}") + + # Effect chains (default: no effects) + self.effects_per_source = effects_per_source or [[] for _ in self.sources] + if len(self.effects_per_source) != len(self.sources): + raise ValueError( + f"effects_per_source length ({len(self.effects_per_source)}) " + f"must match sources length ({len(self.sources)})" + ) + + # Compositor config (default: equal blend) + self.compositor_config = compositor_config or { + "mode": "alpha", + "weights": [1.0 / len(self.sources)] * len(self.sources), + } + + # Initialize backend + self.backend: Backend = get_backend( + backend, + recipe_dir=self.recipe_dir, + ) + + # Load effects + self._load_effects() + + def _load_effects(self): + """Pre-load all effect definitions.""" + for effects in self.effects_per_source: + for effect_config in effects: + effect_path = effect_config.get("effect_path") + if effect_path: + full_path = self.recipe_dir / effect_path + if full_path.exists(): + self.backend.load_effect(full_path) + + def _create_output( + self, + output: Union[str, Output], + size: tuple, + ) -> Output: + """Create output target from string or Output object.""" + if isinstance(output, Output): + return output + + if output == "preview": + return DisplayOutput("Streaming Preview", size, + audio_source=self._audio_source, fps=self.fps) + elif output == "null": + from .output import NullOutput + return NullOutput() + elif isinstance(output, str): + return FileOutput(output, size, fps=self.fps, audio_source=self._audio_source) + else: + raise ValueError(f"Unknown output type: {output}") + + def run( + self, + output: Union[str, Output] = "preview", + duration: float = None, + audio_analyzer=None, + show_fps: bool = True, + recipe_executor=None, + ): + """ + Run the streaming compositor. + + Args: + output: Output target - "preview", filename, or Output object + duration: Duration in seconds (None = run until quit) + audio_analyzer: Optional AudioAnalyzer for live audio reactivity + show_fps: Show FPS counter in console + recipe_executor: Optional StreamingRecipeExecutor for full recipe logic + """ + # Determine output size from first source + output_size = self.sources[0].size + + # Create output + out = self._create_output(output, output_size) + + # Determine duration + if duration is None: + # Run until stopped (or min source duration if not looping) + duration = min(s.duration for s in self.sources) + if duration == float('inf'): + duration = 3600 # 1 hour max for live sources + + total_frames = int(duration * self.fps) + frame_time = 1.0 / self.fps + + print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr) + print(f"Duration: {duration:.1f}s, {total_frames} frames @ {self.fps}fps", file=sys.stderr) + print(f"Output size: {output_size[0]}x{output_size[1]}", file=sys.stderr) + print(f"Press 'q' to quit (if preview)", file=sys.stderr) + + # Frame loop + start_time = time.time() + frame_count = 0 + fps_update_interval = 30 # Update FPS display every N frames + last_fps_time = start_time + last_fps_count = 0 + + try: + for frame_num in range(total_frames): + if not out.is_open: + print(f"\nOutput closed at frame {frame_num}", file=sys.stderr) + break + + t = frame_num * frame_time + + try: + # Update analysis data from streaming audio (file-based) + energy = 0.0 + is_beat = False + if self._audio_analyzer: + self._update_from_audio(self._audio_analyzer, t) + energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0] + is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5 + elif audio_analyzer: + self._update_from_audio(audio_analyzer, t) + energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0] + is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5 + + # Read frames from all sources + frames = [src.read_frame(t) for src in self.sources] + + # Process through recipe executor if provided + if recipe_executor: + result = self._process_with_executor( + frames, recipe_executor, energy, is_beat, t + ) + else: + # Simple backend processing + result = self.backend.process_frame( + frames, + self.effects_per_source, + self.compositor_config, + t, + self.analysis_data, + ) + + # Output + out.write(result, t) + frame_count += 1 + + # FPS display + if show_fps and frame_count % fps_update_interval == 0: + now = time.time() + elapsed = now - last_fps_time + if elapsed > 0: + current_fps = (frame_count - last_fps_count) / elapsed + progress = frame_num / total_frames * 100 + print( + f"\r {progress:5.1f}% | {current_fps:5.1f} fps | " + f"frame {frame_num}/{total_frames}", + end="", file=sys.stderr + ) + last_fps_time = now + last_fps_count = frame_count + + except Exception as e: + print(f"\nError at frame {frame_num}, t={t:.1f}s: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + break + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + finally: + out.close() + for src in self.sources: + if hasattr(src, 'close'): + src.close() + + # Final stats + elapsed = time.time() - start_time + avg_fps = frame_count / elapsed if elapsed > 0 else 0 + print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", file=sys.stderr) + + def _process_with_executor( + self, + frames: List[np.ndarray], + executor, + energy: float, + is_beat: bool, + t: float, + ) -> np.ndarray: + """ + Process frames using the recipe executor for full pipeline. + + Implements: + 1. process-pair: two clips per source with effects, blended + 2. cycle-crossfade: dynamic composition with zoom and weights + 3. Final effects: whole-spin, ripple + """ + import cv2 + + # Target size from first source + target_h, target_w = frames[0].shape[:2] + + # Resize all frames to target size (letterbox to preserve aspect ratio) + resized_frames = [] + for frame in frames: + fh, fw = frame.shape[:2] + if (fh, fw) != (target_h, target_w): + # Calculate scale to fit while preserving aspect ratio + scale = min(target_w / fw, target_h / fh) + new_w, new_h = int(fw * scale), int(fh * scale) + resized = cv2.resize(frame, (new_w, new_h)) + # Center on black canvas + canvas = np.zeros((target_h, target_w, 3), dtype=np.uint8) + x_off = (target_w - new_w) // 2 + y_off = (target_h - new_h) // 2 + canvas[y_off:y_off+new_h, x_off:x_off+new_w] = resized + resized_frames.append(canvas) + else: + resized_frames.append(frame) + frames = resized_frames + + # Update executor state + executor.on_frame(energy, is_beat, t) + + # Get weights to know which sources are active + weights = executor.get_cycle_weights() + + # Process each source as a "pair" (clip A and B with different effects) + processed_pairs = [] + + for i, frame in enumerate(frames): + # Skip sources with zero weight (but still need placeholder) + if i < len(weights) and weights[i] < 0.001: + processed_pairs.append(None) + continue + # Get effect params for clip A and B + params_a = executor.get_effect_params(i, "a", energy) + params_b = executor.get_effect_params(i, "b", energy) + pair_params = executor.get_pair_params(i) + + # Process clip A + clip_a = self._apply_clip_effects(frame.copy(), params_a, t) + + # Process clip B + clip_b = self._apply_clip_effects(frame.copy(), params_b, t) + + # Blend A and B using pair_mix opacity + opacity = pair_params["blend_opacity"] + blended = cv2.addWeighted( + clip_a, 1 - opacity, + clip_b, opacity, + 0 + ) + + # Apply pair rotation + h, w = blended.shape[:2] + center = (w // 2, h // 2) + angle = pair_params["pair_rotation"] + if abs(angle) > 0.5: + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + blended = cv2.warpAffine(blended, matrix, (w, h)) + + processed_pairs.append(blended) + + # Cycle-crossfade composition + weights = executor.get_cycle_weights() + zooms = executor.get_cycle_zooms() + + # Apply zoom per pair and composite + h, w = target_h, target_w + result = np.zeros((h, w, 3), dtype=np.float32) + + for idx, (pair, weight, zoom) in enumerate(zip(processed_pairs, weights, zooms)): + # Skip zero-weight sources + if pair is None or weight < 0.001: + continue + + orig_shape = pair.shape + + # Apply zoom + if zoom > 1.01: + # Zoom in: crop center and resize up + new_w, new_h = int(w / zoom), int(h / zoom) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = pair[y1:y1+new_h, x1:x1+new_w] + pair = cv2.resize(cropped, (w, h)) + elif zoom < 0.99: + # Zoom out: shrink video and center on black + scaled_w, scaled_h = int(w * zoom), int(h * zoom) + if scaled_w > 0 and scaled_h > 0: + shrunk = cv2.resize(pair, (scaled_w, scaled_h)) + canvas = np.zeros((h, w, 3), dtype=np.uint8) + x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2 + canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk + pair = canvas.copy() + + # Draw colored border - size indicates zoom level + border_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)] + color = border_colors[idx % 4] + thickness = max(3, int(10 * weight)) # Thicker border = higher weight + pair = np.ascontiguousarray(pair) + pair[:thickness, :] = color + pair[-thickness:, :] = color + pair[:, :thickness] = color + pair[:, -thickness:] = color + + result += pair.astype(np.float32) * weight + + result = np.clip(result, 0, 255).astype(np.uint8) + + # Apply final effects (whole-spin, ripple) + final_params = executor.get_final_effects(energy) + + # Whole spin + spin_angle = final_params["whole_spin_angle"] + if abs(spin_angle) > 0.5: + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, spin_angle, 1.0) + result = cv2.warpAffine(result, matrix, (w, h)) + + # Ripple effect + amp = final_params["ripple_amplitude"] + if amp > 1: + result = self._apply_ripple(result, amp, + final_params["ripple_cx"], + final_params["ripple_cy"], + t) + + return result + + def _apply_clip_effects(self, frame: np.ndarray, params: dict, t: float) -> np.ndarray: + """Apply per-clip effects: rotate, zoom, invert, hue_shift, ascii.""" + import cv2 + + h, w = frame.shape[:2] + + # Rotate + angle = params["rotate_angle"] + if abs(angle) > 0.5: + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + frame = cv2.warpAffine(frame, matrix, (w, h)) + + # Zoom + zoom = params["zoom_amount"] + if abs(zoom - 1.0) > 0.01: + new_w, new_h = int(w / zoom), int(h / zoom) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + x1, y1 = max(0, x1), max(0, y1) + x2, y2 = min(w, x1 + new_w), min(h, y1 + new_h) + if x2 > x1 and y2 > y1: + cropped = frame[y1:y2, x1:x2] + frame = cv2.resize(cropped, (w, h)) + + # Invert + if params["invert_amount"] > 0.5: + frame = 255 - frame + + # Hue shift + hue_deg = params["hue_degrees"] + if abs(hue_deg) > 1: + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int32) + int(hue_deg / 2)) % 180 + frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + # ASCII art + if params["ascii_mix"] > 0.5: + char_size = max(4, int(params["ascii_char_size"])) + frame = self._apply_ascii(frame, char_size) + + return frame + + def _apply_ascii(self, frame: np.ndarray, char_size: int) -> np.ndarray: + """Apply ASCII art effect.""" + import cv2 + from PIL import Image, ImageDraw, ImageFont + + h, w = frame.shape[:2] + chars = " .:-=+*#%@" + + # Get font + try: + font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", char_size) + except: + font = ImageFont.load_default() + + # Sample cells using area interpolation (fast block average) + rows = h // char_size + cols = w // char_size + if rows < 1 or cols < 1: + return frame + + # Crop to exact grid and downsample + cropped = frame[:rows * char_size, :cols * char_size] + cell_colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA) + + # Compute luminance + luminances = (0.299 * cell_colors[:, :, 0] + + 0.587 * cell_colors[:, :, 1] + + 0.114 * cell_colors[:, :, 2]) / 255.0 + + # Create output image + out_h = rows * char_size + out_w = cols * char_size + output = Image.new('RGB', (out_w, out_h), (0, 0, 0)) + draw = ImageDraw.Draw(output) + + # Draw characters + for r in range(rows): + for c in range(cols): + lum = luminances[r, c] + color = tuple(cell_colors[r, c]) + + # Map luminance to character + idx = int(lum * (len(chars) - 1)) + char = chars[idx] + + # Draw character + x = c * char_size + y = r * char_size + draw.text((x, y), char, fill=color, font=font) + + # Convert back to numpy and resize to original + result = np.array(output) + if result.shape[:2] != (h, w): + result = cv2.resize(result, (w, h), interpolation=cv2.INTER_LINEAR) + + return result + + def _apply_ripple(self, frame: np.ndarray, amplitude: float, + cx: float, cy: float, t: float = 0) -> np.ndarray: + """Apply ripple distortion effect.""" + import cv2 + + h, w = frame.shape[:2] + center_x, center_y = cx * w, cy * h + max_dim = max(w, h) + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Distance from center + dx = x_coords - center_x + dy = y_coords - center_y + dist = np.sqrt(dx*dx + dy*dy) + + # Ripple parameters (matching recipe: frequency=8, decay=2, speed=5) + freq = 8 + decay = 2 + speed = 5 + phase = t * speed * 2 * np.pi + + # Ripple displacement (matching original formula) + ripple = np.sin(2 * np.pi * freq * dist / max_dim + phase) * amplitude + + # Apply decay + if decay > 0: + ripple = ripple * np.exp(-dist * decay / max_dim) + + # Displace along radial direction + with np.errstate(divide='ignore', invalid='ignore'): + norm_dx = np.where(dist > 0, dx / dist, 0) + norm_dy = np.where(dist > 0, dy / dist, 0) + + map_x = (x_coords + ripple * norm_dx).astype(np.float32) + map_y = (y_coords + ripple * norm_dy).astype(np.float32) + + return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, + borderMode=cv2.BORDER_REFLECT) + + def _update_from_audio(self, analyzer, t: float): + """Update analysis data from audio analyzer (streaming or live).""" + # Set time for file-based streaming analyzers + if hasattr(analyzer, 'set_time'): + analyzer.set_time(t) + + # Get current audio features + energy = analyzer.get_energy() if hasattr(analyzer, 'get_energy') else 0 + beat = analyzer.get_beat() if hasattr(analyzer, 'get_beat') else False + + # Update analysis tracks - these can be referenced by effect bindings + self.analysis_data["live_energy"] = { + "times": [t], + "values": [energy], + "duration": float('inf'), + } + self.analysis_data["live_beat"] = { + "times": [t], + "values": [1.0 if beat else 0.0], + "duration": float('inf'), + } + + +def quick_preview( + sources: List[str], + effects: List[List[Dict]] = None, + duration: float = 10, + fps: float = 30, +): + """ + Quick preview helper - show sources with optional effects. + + Example: + quick_preview(["video1.mp4", "video2.mp4"], duration=30) + """ + compositor = StreamingCompositor( + sources=sources, + effects_per_source=effects, + fps=fps, + ) + compositor.run(output="preview", duration=duration) diff --git a/streaming/demo.py b/streaming/demo.py new file mode 100644 index 0000000..0b1899f --- /dev/null +++ b/streaming/demo.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +""" +Demo script for streaming compositor. + +Usage: + # Preview two videos blended + python -m streaming.demo preview video1.mp4 video2.mp4 + + # Record output to file + python -m streaming.demo record video1.mp4 video2.mp4 -o output.mp4 + + # Benchmark (no output) + python -m streaming.demo benchmark video1.mp4 --duration 10 +""" + +import argparse +import sys +from pathlib import Path + +# Add parent to path for imports +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from streaming import StreamingCompositor, VideoSource +from streaming.output import NullOutput + + +def demo_preview(sources: list, duration: float, effects: bool = False): + """Preview sources with optional simple effects.""" + effects_config = None + if effects: + effects_config = [ + [{"effect": "rotate", "angle": 15}], + [{"effect": "zoom", "amount": 1.2}], + ][:len(sources)] + + compositor = StreamingCompositor( + sources=sources, + effects_per_source=effects_config, + recipe_dir=Path(__file__).parent.parent, + ) + compositor.run(output="preview", duration=duration) + + +def demo_record(sources: list, output_path: str, duration: float): + """Record blended output to file.""" + compositor = StreamingCompositor( + sources=sources, + recipe_dir=Path(__file__).parent.parent, + ) + compositor.run(output=output_path, duration=duration) + + +def demo_benchmark(sources: list, duration: float): + """Benchmark processing speed (no output).""" + compositor = StreamingCompositor( + sources=sources, + recipe_dir=Path(__file__).parent.parent, + ) + compositor.run(output="null", duration=duration) + + +def demo_audio_reactive(sources: list, duration: float): + """Preview with live audio reactivity.""" + from streaming.audio import AudioAnalyzer + + # Create compositor with energy-reactive effects + effects_config = [ + [{ + "effect": "zoom", + "amount": {"_binding": True, "source": "live_energy", "feature": "values", "range": [1.0, 1.5]}, + }] + for _ in sources + ] + + compositor = StreamingCompositor( + sources=sources, + effects_per_source=effects_config, + recipe_dir=Path(__file__).parent.parent, + ) + + # Start audio analyzer + try: + with AudioAnalyzer() as audio: + print("Audio analyzer started. Make some noise!", file=sys.stderr) + compositor.run(output="preview", duration=duration, audio_analyzer=audio) + except Exception as e: + print(f"Audio not available: {e}", file=sys.stderr) + print("Running without audio...", file=sys.stderr) + compositor.run(output="preview", duration=duration) + + +def main(): + parser = argparse.ArgumentParser(description="Streaming compositor demo") + parser.add_argument("mode", choices=["preview", "record", "benchmark", "audio"], + help="Demo mode") + parser.add_argument("sources", nargs="+", help="Video source files") + parser.add_argument("-o", "--output", help="Output file (for record mode)") + parser.add_argument("-d", "--duration", type=float, default=30, + help="Duration in seconds") + parser.add_argument("--effects", action="store_true", + help="Apply simple effects (for preview)") + + args = parser.parse_args() + + # Verify sources exist + for src in args.sources: + if not Path(src).exists(): + print(f"Error: Source not found: {src}", file=sys.stderr) + sys.exit(1) + + if args.mode == "preview": + demo_preview(args.sources, args.duration, args.effects) + elif args.mode == "record": + if not args.output: + print("Error: --output required for record mode", file=sys.stderr) + sys.exit(1) + demo_record(args.sources, args.output, args.duration) + elif args.mode == "benchmark": + demo_benchmark(args.sources, args.duration) + elif args.mode == "audio": + demo_audio_reactive(args.sources, args.duration) + + +if __name__ == "__main__": + main() diff --git a/streaming/output.py b/streaming/output.py new file mode 100644 index 0000000..c273bd1 --- /dev/null +++ b/streaming/output.py @@ -0,0 +1,369 @@ +""" +Output targets for streaming compositor. + +Supports: +- Display window (preview) +- File output (recording) +- Stream output (RTMP, etc.) - future +""" + +import numpy as np +import subprocess +from abc import ABC, abstractmethod +from typing import Tuple, Optional +from pathlib import Path + + +class Output(ABC): + """Abstract base class for output targets.""" + + @abstractmethod + def write(self, frame: np.ndarray, t: float): + """Write a frame to the output.""" + pass + + @abstractmethod + def close(self): + """Close the output and clean up resources.""" + pass + + @property + @abstractmethod + def is_open(self) -> bool: + """Check if output is still open/valid.""" + pass + + +class DisplayOutput(Output): + """ + Display frames using mpv (handles Wayland properly). + + Useful for live preview. Press 'q' to quit. + """ + + def __init__(self, title: str = "Streaming Preview", size: Tuple[int, int] = None, + audio_source: str = None, fps: float = 30): + self.title = title + self.size = size + self.audio_source = audio_source + self.fps = fps + self._is_open = True + self._process = None + self._audio_process = None + + def _start_mpv(self, frame_size: Tuple[int, int]): + """Start mpv process for display.""" + import sys + w, h = frame_size + cmd = [ + "mpv", + "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={self.fps}", + f"--title={self.title}", + "-", + ] + print(f"Starting mpv: {' '.join(cmd)}", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + # Start audio playback if we have an audio source + if self.audio_source: + audio_cmd = [ + "ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + str(self.audio_source) + ] + print(f"Starting audio: {self.audio_source}", file=sys.stderr) + self._audio_process = subprocess.Popen( + audio_cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + def write(self, frame: np.ndarray, t: float): + """Display frame.""" + if not self._is_open: + return + + # Ensure frame is correct format + if frame.dtype != np.uint8: + frame = np.clip(frame, 0, 255).astype(np.uint8) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + + # Start mpv on first frame + if self._process is None: + self._start_mpv((frame.shape[1], frame.shape[0])) + + # Check if mpv is still running + if self._process.poll() is not None: + self._is_open = False + return + + try: + self._process.stdin.write(frame.tobytes()) + self._process.stdin.flush() # Prevent buffering + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close the display and audio.""" + if self._process: + try: + self._process.stdin.close() + except: + pass + self._process.terminate() + self._process.wait() + if self._audio_process: + self._audio_process.terminate() + self._audio_process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + if self._process and self._process.poll() is not None: + self._is_open = False + return self._is_open + + +class FileOutput(Output): + """ + Write frames to a video file using ffmpeg. + """ + + def __init__( + self, + path: str, + size: Tuple[int, int], + fps: float = 30, + codec: str = "libx264", + crf: int = 18, + preset: str = "fast", + audio_source: str = None, + ): + self.path = Path(path) + self.size = size + self.fps = fps + self._is_open = True + + # Build ffmpeg command + cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", + "-vcodec", "rawvideo", + "-pix_fmt", "rgb24", + "-s", f"{size[0]}x{size[1]}", + "-r", str(fps), + "-i", "-", + ] + + # Add audio input if provided + if audio_source: + cmd.extend(["-i", str(audio_source)]) + # Explicitly map: video from input 0 (rawvideo), audio from input 1 + cmd.extend(["-map", "0:v", "-map", "1:a"]) + + cmd.extend([ + "-c:v", codec, + "-preset", preset, + "-crf", str(crf), + "-pix_fmt", "yuv420p", + ]) + + # Add audio codec if we have audio + if audio_source: + cmd.extend(["-c:a", "aac", "-b:a", "192k", "-shortest"]) + + cmd.append(str(self.path)) + + import sys + print(f"FileOutput cmd: {' '.join(cmd)}", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=None, # Show errors for debugging + ) + + def write(self, frame: np.ndarray, t: float): + """Write frame to video file.""" + if not self._is_open or self._process.poll() is not None: + self._is_open = False + return + + # Resize if needed + if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]: + import cv2 + frame = cv2.resize(frame, self.size) + + try: + self._process.stdin.write(frame.tobytes()) + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close the video file.""" + if self._process: + self._process.stdin.close() + self._process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + return self._is_open and self._process.poll() is None + + +class MultiOutput(Output): + """ + Write to multiple outputs simultaneously. + + Useful for recording while showing preview. + """ + + def __init__(self, outputs: list): + self.outputs = outputs + + def write(self, frame: np.ndarray, t: float): + for output in self.outputs: + if output.is_open: + output.write(frame, t) + + def close(self): + for output in self.outputs: + output.close() + + @property + def is_open(self) -> bool: + return any(o.is_open for o in self.outputs) + + +class NullOutput(Output): + """ + Discard frames (for benchmarking). + """ + + def __init__(self): + self._is_open = True + self.frame_count = 0 + + def write(self, frame: np.ndarray, t: float): + self.frame_count += 1 + + def close(self): + self._is_open = False + + @property + def is_open(self) -> bool: + return self._is_open + + +class PipeOutput(Output): + """ + Pipe frames directly to mpv. + + Launches mpv with rawvideo demuxer and writes frames to stdin. + """ + + def __init__(self, size: Tuple[int, int], fps: float = 30, audio_source: str = None): + self.size = size + self.fps = fps + self.audio_source = audio_source + self._is_open = True + self._process = None + self._audio_process = None + self._started = False + + def _start(self): + """Start mpv and audio on first frame.""" + if self._started: + return + self._started = True + + import sys + w, h = self.size + + # Start mpv + cmd = [ + "mpv", "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={self.fps}", + "--title=Streaming", + "-" + ] + print(f"Starting mpv: {w}x{h} @ {self.fps}fps", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ) + + # Start audio + if self.audio_source: + audio_cmd = [ + "ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + str(self.audio_source) + ] + print(f"Starting audio: {self.audio_source}", file=sys.stderr) + self._audio_process = subprocess.Popen( + audio_cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + def write(self, frame: np.ndarray, t: float): + """Write frame to mpv.""" + if not self._is_open: + return + + self._start() + + # Check mpv still running + if self._process.poll() is not None: + self._is_open = False + return + + # Resize if needed + if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]: + import cv2 + frame = cv2.resize(frame, self.size) + + # Ensure correct format + if frame.dtype != np.uint8: + frame = np.clip(frame, 0, 255).astype(np.uint8) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + + try: + self._process.stdin.write(frame.tobytes()) + self._process.stdin.flush() + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close mpv and audio.""" + if self._process: + try: + self._process.stdin.close() + except: + pass + self._process.terminate() + self._process.wait() + if self._audio_process: + self._audio_process.terminate() + self._audio_process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + if self._process and self._process.poll() is not None: + self._is_open = False + return self._is_open diff --git a/streaming/pipeline.py b/streaming/pipeline.py new file mode 100644 index 0000000..29dd7e1 --- /dev/null +++ b/streaming/pipeline.py @@ -0,0 +1,846 @@ +""" +Streaming pipeline executor. + +Directly executes compiled sexp recipes frame-by-frame. +No adapter layer - frames and analysis flow through the DAG. +""" + +import sys +import time +import numpy as np +from pathlib import Path +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + +from .sources import VideoSource +from .audio import StreamingAudioAnalyzer +from .output import DisplayOutput, FileOutput +from .sexp_interp import SexpInterpreter + + +@dataclass +class FrameContext: + """Context passed through the pipeline for each frame.""" + t: float # Current time + energy: float = 0.0 + is_beat: bool = False + beat_count: int = 0 + analysis: Dict[str, Any] = field(default_factory=dict) + + +class StreamingPipeline: + """ + Executes a compiled sexp recipe as a streaming pipeline. + + Frames flow through the DAG directly - no adapter needed. + Each node is evaluated lazily when its output is requested. + """ + + def __init__(self, compiled_recipe, recipe_dir: Path = None, fps: float = 30, seed: int = 42, + output_size: tuple = None): + self.recipe = compiled_recipe + self.recipe_dir = recipe_dir or Path(".") + self.fps = fps + self.seed = seed + + # Build node lookup + self.nodes = {n['id']: n for n in compiled_recipe.nodes} + + # Runtime state + self.sources: Dict[str, VideoSource] = {} + self.audio_analyzer: Optional[StreamingAudioAnalyzer] = None + self.audio_source_path: Optional[str] = None + + # Sexp interpreter for expressions + self.interp = SexpInterpreter() + + # Scan state (node_id -> current value) + self.scan_state: Dict[str, Any] = {} + self.scan_emit: Dict[str, Any] = {} + + # SLICE_ON state + self.slice_on_acc: Dict[str, Any] = {} + self.slice_on_result: Dict[str, Any] = {} + + # Frame cache for current timestep (cleared each frame) + self._frame_cache: Dict[str, np.ndarray] = {} + + # Context for current frame + self.ctx = FrameContext(t=0.0) + + # Output size (w, h) - set after sources are initialized + self._output_size = output_size + + # Initialize + self._init_sources() + self._init_scans() + self._init_slice_on() + + # Set output size from first source if not specified + if self._output_size is None and self.sources: + first_source = next(iter(self.sources.values())) + self._output_size = first_source._size + + def _init_sources(self): + """Initialize video and audio sources.""" + for node in self.recipe.nodes: + if node.get('type') == 'SOURCE': + config = node.get('config', {}) + path = config.get('path') + if path: + full_path = (self.recipe_dir / path).resolve() + suffix = full_path.suffix.lower() + + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + if not full_path.exists(): + print(f"Warning: video not found: {full_path}", file=sys.stderr) + continue + self.sources[node['id']] = VideoSource( + str(full_path), + target_fps=self.fps + ) + elif suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'): + if not full_path.exists(): + print(f"Warning: audio not found: {full_path}", file=sys.stderr) + continue + self.audio_source_path = str(full_path) + self.audio_analyzer = StreamingAudioAnalyzer(str(full_path)) + + def _init_scans(self): + """Initialize scan nodes with their initial state.""" + import random + seed_offset = 0 + + for node in self.recipe.nodes: + if node.get('type') == 'SCAN': + config = node.get('config', {}) + + # Create RNG for this scan + scan_seed = config.get('seed', self.seed + seed_offset) + rng = random.Random(scan_seed) + seed_offset += 1 + + # Evaluate initial value + init_expr = config.get('init', 0) + init_value = self.interp.eval(init_expr, {}) + + self.scan_state[node['id']] = { + 'value': init_value, + 'rng': rng, + 'config': config, + } + + # Compute initial emit + self._update_scan_emit(node['id']) + + def _update_scan_emit(self, node_id: str): + """Update the emit value for a scan.""" + state = self.scan_state[node_id] + config = state['config'] + emit_expr = config.get('emit_expr', config.get('emit', None)) + + if emit_expr is None: + # No emit expression - emit the value directly + self.scan_emit[node_id] = state['value'] + return + + # Build environment from state + env = {} + if isinstance(state['value'], dict): + env.update(state['value']) + else: + env['acc'] = state['value'] + + env['beat_count'] = self.ctx.beat_count + env['time'] = self.ctx.t + + # Set RNG for interpreter + self.interp.rng = state['rng'] + + self.scan_emit[node_id] = self.interp.eval(emit_expr, env) + + def _step_scan(self, node_id: str): + """Step a scan forward on beat.""" + state = self.scan_state[node_id] + config = state['config'] + step_expr = config.get('step_expr', config.get('step', None)) + + if step_expr is None: + return + + # Build environment + env = {} + if isinstance(state['value'], dict): + env.update(state['value']) + else: + env['acc'] = state['value'] + + env['beat_count'] = self.ctx.beat_count + env['time'] = self.ctx.t + + # Set RNG + self.interp.rng = state['rng'] + + # Evaluate step + new_value = self.interp.eval(step_expr, env) + state['value'] = new_value + + # Update emit + self._update_scan_emit(node_id) + + def _init_slice_on(self): + """Initialize SLICE_ON nodes.""" + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + config = node.get('config', {}) + init = config.get('init', {}) + self.slice_on_acc[node['id']] = dict(init) + + # Evaluate initial state + self._eval_slice_on(node['id']) + + def _eval_slice_on(self, node_id: str): + """Evaluate a SLICE_ON node's Lambda.""" + node = self.nodes[node_id] + config = node.get('config', {}) + fn = config.get('fn') + videos = config.get('videos', []) + + if not fn: + return + + acc = self.slice_on_acc[node_id] + n_videos = len(videos) + + # Set up environment + self.interp.globals['videos'] = list(range(n_videos)) + + try: + from .sexp_interp import eval_slice_on_lambda + result = eval_slice_on_lambda( + fn, acc, self.ctx.beat_count, 0, 1, + list(range(n_videos)), self.interp + ) + self.slice_on_result[node_id] = result + + # Update accumulator + if 'acc' in result: + self.slice_on_acc[node_id] = result['acc'] + except Exception as e: + print(f"SLICE_ON eval error: {e}", file=sys.stderr) + + def _on_beat(self): + """Called when a beat is detected.""" + self.ctx.beat_count += 1 + + # Step all scans + for node_id in self.scan_state: + self._step_scan(node_id) + + # Step all SLICE_ON nodes + for node_id in self.slice_on_acc: + self._eval_slice_on(node_id) + + def _get_frame(self, node_id: str) -> Optional[np.ndarray]: + """ + Get the output frame for a node at current time. + + Recursively evaluates inputs as needed. + Results are cached for the current timestep. + """ + if node_id in self._frame_cache: + return self._frame_cache[node_id] + + node = self.nodes.get(node_id) + if not node: + return None + + node_type = node.get('type') + + if node_type == 'SOURCE': + frame = self._eval_source(node) + elif node_type == 'SEGMENT': + frame = self._eval_segment(node) + elif node_type == 'EFFECT': + frame = self._eval_effect(node) + elif node_type == 'SLICE_ON': + frame = self._eval_slice_on_frame(node) + else: + # Unknown node type - try to pass through input + inputs = node.get('inputs', []) + frame = self._get_frame(inputs[0]) if inputs else None + + self._frame_cache[node_id] = frame + return frame + + def _eval_source(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SOURCE node.""" + source = self.sources.get(node['id']) + if source: + return source.read_frame(self.ctx.t) + return None + + def _eval_segment(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SEGMENT node (time segment of source).""" + inputs = node.get('inputs', []) + if not inputs: + return None + + config = node.get('config', {}) + start = config.get('start', 0) + duration = config.get('duration') + + # Resolve any bindings + if isinstance(start, dict): + start = self._resolve_binding(start) if start.get('_binding') else 0 + if isinstance(duration, dict): + duration = self._resolve_binding(duration) if duration.get('_binding') else None + + # Adjust time for segment + t_local = self.ctx.t + (start if isinstance(start, (int, float)) else 0) + if duration and isinstance(duration, (int, float)): + t_local = t_local % duration # Loop within segment + + # Get source frame at adjusted time + source_id = inputs[0] + source = self.sources.get(source_id) + if source: + return source.read_frame(t_local) + + return self._get_frame(source_id) + + def _eval_effect(self, node: dict) -> Optional[np.ndarray]: + """Evaluate an EFFECT node.""" + import cv2 + + inputs = node.get('inputs', []) + config = node.get('config', {}) + effect_name = config.get('effect') + + # Get input frame(s) + input_frames = [self._get_frame(inp) for inp in inputs] + input_frames = [f for f in input_frames if f is not None] + + if not input_frames: + return None + + frame = input_frames[0] + + # Resolve bindings in config + params = self._resolve_config(config) + + # Apply effect based on name + if effect_name == 'rotate': + angle = params.get('angle', 0) + if abs(angle) > 0.5: + h, w = frame.shape[:2] + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + frame = cv2.warpAffine(frame, matrix, (w, h)) + + elif effect_name == 'zoom': + amount = params.get('amount', 1.0) + if abs(amount - 1.0) > 0.01: + frame = self._apply_zoom(frame, amount) + + elif effect_name == 'invert': + amount = params.get('amount', 0) + if amount > 0.01: + inverted = 255 - frame + frame = cv2.addWeighted(frame, 1 - amount, inverted, amount, 0) + + elif effect_name == 'hue_shift': + degrees = params.get('degrees', 0) + if abs(degrees) > 1: + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + int(degrees / 2)) % 180 + frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + elif effect_name == 'blend': + if len(input_frames) >= 2: + opacity = params.get('opacity', 0.5) + frame = cv2.addWeighted(input_frames[0], 1 - opacity, + input_frames[1], opacity, 0) + + elif effect_name == 'blend_multi': + weights = params.get('weights', []) + if len(input_frames) > 1 and weights: + h, w = input_frames[0].shape[:2] + result = np.zeros((h, w, 3), dtype=np.float32) + for f, wt in zip(input_frames, weights): + if f is not None and wt > 0.001: + if f.shape[:2] != (h, w): + f = cv2.resize(f, (w, h)) + result += f.astype(np.float32) * wt + frame = np.clip(result, 0, 255).astype(np.uint8) + + elif effect_name == 'ripple': + amp = params.get('amplitude', 0) + if amp > 1: + frame = self._apply_ripple(frame, amp, + params.get('center_x', 0.5), + params.get('center_y', 0.5), + params.get('frequency', 8), + params.get('decay', 2), + params.get('speed', 5)) + + return frame + + def _eval_slice_on_frame(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SLICE_ON node - returns composited frame.""" + import cv2 + + config = node.get('config', {}) + video_ids = config.get('videos', []) + result = self.slice_on_result.get(node['id'], {}) + + if not result: + # No result yet - return first video + if video_ids: + return self._get_frame(video_ids[0]) + return None + + # Get layers and compose info + layers = result.get('layers', []) + compose = result.get('compose', {}) + weights = compose.get('weights', []) + + if not layers or not weights: + if video_ids: + return self._get_frame(video_ids[0]) + return None + + # Get frames for each layer + frames = [] + for i, layer in enumerate(layers): + video_idx = layer.get('video', i) + if video_idx < len(video_ids): + frame = self._get_frame(video_ids[video_idx]) + + # Apply layer effects (zoom) + effects = layer.get('effects', []) + for eff in effects: + eff_name = eff.get('effect') + if hasattr(eff_name, 'name'): + eff_name = eff_name.name + if eff_name == 'zoom': + zoom_amt = eff.get('amount', 1.0) + if frame is not None: + frame = self._apply_zoom(frame, zoom_amt) + + frames.append(frame) + else: + frames.append(None) + + # Composite with weights - use consistent output size + if self._output_size: + w, h = self._output_size + else: + # Fallback to first non-None frame size + for f in frames: + if f is not None: + h, w = f.shape[:2] + break + else: + return None + + output = np.zeros((h, w, 3), dtype=np.float32) + + for frame, weight in zip(frames, weights): + if frame is None or weight < 0.001: + continue + + # Resize to output size + if frame.shape[1] != w or frame.shape[0] != h: + frame = cv2.resize(frame, (w, h)) + + output += frame.astype(np.float32) * weight + + # Normalize weights + total_weight = sum(wt for wt in weights if wt > 0.001) + if total_weight > 0 and abs(total_weight - 1.0) > 0.01: + output /= total_weight + + return np.clip(output, 0, 255).astype(np.uint8) + + def _resolve_config(self, config: dict) -> dict: + """Resolve bindings in effect config to actual values.""" + resolved = {} + + for key, value in config.items(): + if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', + 'analysis_refs', 'inputs', 'cid'): + continue + + if isinstance(value, dict) and value.get('_binding'): + resolved[key] = self._resolve_binding(value) + elif isinstance(value, dict) and value.get('_expr'): + resolved[key] = self._resolve_expr(value) + else: + resolved[key] = value + + return resolved + + def _resolve_binding(self, binding: dict) -> Any: + """Resolve a binding to its current value.""" + source_id = binding.get('source') + feature = binding.get('feature', 'values') + range_map = binding.get('range') + + # Get raw value from scan or analysis + if source_id in self.scan_emit: + value = self.scan_emit[source_id] + elif source_id in self.ctx.analysis: + data = self.ctx.analysis[source_id] + value = data.get(feature, data.get('values', [0]))[0] if isinstance(data, dict) else data + else: + # Fallback to energy + value = self.ctx.energy + + # Extract feature from dict + if isinstance(value, dict) and feature in value: + value = value[feature] + + # Apply range mapping + if range_map and isinstance(value, (int, float)): + lo, hi = range_map + value = lo + value * (hi - lo) + + return value + + def _resolve_expr(self, expr: dict) -> Any: + """Resolve a compiled expression.""" + env = { + 'energy': self.ctx.energy, + 'beat_count': self.ctx.beat_count, + 't': self.ctx.t, + } + + # Add scan values + for scan_id, value in self.scan_emit.items(): + # Use short form if available + env[scan_id] = value + + # Extract the actual expression from _expr wrapper + actual_expr = expr.get('_expr', expr) + return self.interp.eval(actual_expr, env) + + def _apply_zoom(self, frame: np.ndarray, amount: float) -> np.ndarray: + """Apply zoom to frame.""" + import cv2 + h, w = frame.shape[:2] + + if amount > 1.01: + # Zoom in: crop center + new_w, new_h = int(w / amount), int(h / amount) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = frame[y1:y1+new_h, x1:x1+new_w] + return cv2.resize(cropped, (w, h)) + elif amount < 0.99: + # Zoom out: shrink and center + scaled_w, scaled_h = int(w * amount), int(h * amount) + if scaled_w > 0 and scaled_h > 0: + shrunk = cv2.resize(frame, (scaled_w, scaled_h)) + canvas = np.zeros((h, w, 3), dtype=np.uint8) + x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2 + canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk + return canvas + + return frame + + def _apply_ripple(self, frame: np.ndarray, amplitude: float, + cx: float, cy: float, frequency: float, + decay: float, speed: float) -> np.ndarray: + """Apply ripple effect.""" + import cv2 + h, w = frame.shape[:2] + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize to center + center_x, center_y = w * cx, h * cy + dx = x_coords - center_x + dy = y_coords - center_y + dist = np.sqrt(dx**2 + dy**2) + + # Ripple displacement + phase = self.ctx.t * speed + ripple = amplitude * np.sin(dist / frequency - phase) * np.exp(-dist * decay / max(w, h)) + + # Displace coordinates + angle = np.arctan2(dy, dx) + map_x = (x_coords + ripple * np.cos(angle)).astype(np.float32) + map_y = (y_coords + ripple * np.sin(angle)).astype(np.float32) + + return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + def _find_output_node(self) -> Optional[str]: + """Find the final output node (MUX or last EFFECT).""" + # Look for MUX node + for node in self.recipe.nodes: + if node.get('type') == 'MUX': + return node['id'] + + # Otherwise find last EFFECT after SLICE_ON + last_effect = None + found_slice_on = False + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + found_slice_on = True + elif node.get('type') == 'EFFECT' and found_slice_on: + last_effect = node['id'] + + return last_effect + + def render_frame(self, t: float) -> Optional[np.ndarray]: + """Render a single frame at time t.""" + # Clear frame cache + self._frame_cache.clear() + + # Update context + self.ctx.t = t + + # Update audio analysis + if self.audio_analyzer: + self.audio_analyzer.set_time(t) + energy = self.audio_analyzer.get_energy() + is_beat = self.audio_analyzer.get_beat() + + # Beat edge detection + was_beat = self.ctx.is_beat + self.ctx.energy = energy + self.ctx.is_beat = is_beat + + if is_beat and not was_beat: + self._on_beat() + + # Store in analysis dict + self.ctx.analysis['live_energy'] = {'values': [energy]} + self.ctx.analysis['live_beat'] = {'values': [1.0 if is_beat else 0.0]} + + # Find output node and render + output_node = self._find_output_node() + if output_node: + frame = self._get_frame(output_node) + # Normalize to output size + if frame is not None and self._output_size: + w, h = self._output_size + if frame.shape[1] != w or frame.shape[0] != h: + import cv2 + frame = cv2.resize(frame, (w, h)) + return frame + + return None + + def run(self, output: str = "preview", duration: float = None): + """ + Run the pipeline. + + Args: + output: "preview", filename, or Output object + duration: Duration in seconds (default: audio duration or 60s) + """ + # Determine duration + if duration is None: + if self.audio_analyzer: + duration = self.audio_analyzer.duration + else: + duration = 60.0 + + # Create output + if output == "preview": + # Get frame size from first source + first_source = next(iter(self.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + out = DisplayOutput(size=(w, h), fps=self.fps, audio_source=self.audio_source_path) + elif isinstance(output, str): + first_source = next(iter(self.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + out = FileOutput(output, size=(w, h), fps=self.fps, audio_source=self.audio_source_path) + else: + out = output + + frame_time = 1.0 / self.fps + n_frames = int(duration * self.fps) + + print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr) + print(f"Duration: {duration:.1f}s, {n_frames} frames @ {self.fps}fps", file=sys.stderr) + + start_time = time.time() + frame_count = 0 + + try: + for frame_num in range(n_frames): + t = frame_num * frame_time + + frame = self.render_frame(t) + + if frame is not None: + out.write(frame, t) + frame_count += 1 + + # Progress + if frame_num % 50 == 0: + elapsed = time.time() - start_time + fps = frame_count / elapsed if elapsed > 0 else 0 + pct = 100 * frame_num / n_frames + print(f"\r{pct:5.1f}% | {fps:5.1f} fps | frame {frame_num}/{n_frames}", + end="", file=sys.stderr) + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + finally: + out.close() + for src in self.sources.values(): + src.close() + + elapsed = time.time() - start_time + avg_fps = frame_count / elapsed if elapsed > 0 else 0 + print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", + file=sys.stderr) + + +def run_pipeline(recipe_path: str, output: str = "preview", + duration: float = None, fps: float = None): + """ + Run a recipe through the streaming pipeline. + + No adapter layer - directly executes the compiled recipe. + """ + from pathlib import Path + + # Add artdag to path + import sys + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + + from artdag.sexp.compiler import compile_string + + recipe_path = Path(recipe_path) + recipe_text = recipe_path.read_text() + compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent) + + pipeline = StreamingPipeline( + compiled, + recipe_dir=recipe_path.parent, + fps=fps or compiled.encoding.get('fps', 30), + ) + + pipeline.run(output=output, duration=duration) + + +def run_pipeline_piped(recipe_path: str, duration: float = None, fps: float = None): + """ + Run pipeline and pipe directly to mpv with audio. + """ + import subprocess + from pathlib import Path + import sys + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + from artdag.sexp.compiler import compile_string + + recipe_path = Path(recipe_path) + recipe_text = recipe_path.read_text() + compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent) + + pipeline = StreamingPipeline( + compiled, + recipe_dir=recipe_path.parent, + fps=fps or compiled.encoding.get('fps', 30), + ) + + # Get frame info + first_source = next(iter(pipeline.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + + # Determine duration + if duration is None: + if pipeline.audio_analyzer: + duration = pipeline.audio_analyzer.duration + else: + duration = 60.0 + + actual_fps = fps or compiled.encoding.get('fps', 30) + n_frames = int(duration * actual_fps) + frame_time = 1.0 / actual_fps + + print(f"Streaming {n_frames} frames @ {actual_fps}fps to mpv", file=sys.stderr) + + # Start mpv + mpv_cmd = [ + "mpv", "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={actual_fps}", + "--title=Streaming Pipeline", + "-" + ] + mpv = subprocess.Popen(mpv_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Start audio if available + audio_proc = None + if pipeline.audio_source_path: + audio_cmd = ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + pipeline.audio_source_path] + audio_proc = subprocess.Popen(audio_cmd, stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + + try: + import cv2 + for frame_num in range(n_frames): + if mpv.poll() is not None: + break # mpv closed + + t = frame_num * frame_time + frame = pipeline.render_frame(t) + if frame is not None: + # Ensure consistent frame size + if frame.shape[1] != w or frame.shape[0] != h: + frame = cv2.resize(frame, (w, h)) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + try: + mpv.stdin.write(frame.tobytes()) + mpv.stdin.flush() + except BrokenPipeError: + break + except KeyboardInterrupt: + pass + finally: + if mpv.stdin: + mpv.stdin.close() + mpv.terminate() + if audio_proc: + audio_proc.terminate() + for src in pipeline.sources.values(): + src.close() + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run sexp recipe through streaming pipeline") + parser.add_argument("recipe", help="Path to .sexp recipe file") + parser.add_argument("-o", "--output", default="pipe", + help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)") + parser.add_argument("-d", "--duration", type=float, default=None, + help="Duration in seconds (default: audio duration)") + parser.add_argument("--fps", type=float, default=None, + help="Frame rate (default: from recipe)") + args = parser.parse_args() + + if args.output == "pipe": + run_pipeline_piped(args.recipe, duration=args.duration, fps=args.fps) + else: + run_pipeline(args.recipe, output=args.output, duration=args.duration, fps=args.fps) diff --git a/streaming/recipe_adapter.py b/streaming/recipe_adapter.py new file mode 100644 index 0000000..2133919 --- /dev/null +++ b/streaming/recipe_adapter.py @@ -0,0 +1,470 @@ +""" +Adapter to run sexp recipes through the streaming compositor. + +Bridges the gap between: +- Existing recipe format (sexp files with stages, effects, analysis) +- Streaming compositor (sources, effect chains, compositor config) +""" + +import sys +from pathlib import Path +from typing import Dict, List, Any, Optional + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + +from .compositor import StreamingCompositor +from .sources import VideoSource +from .audio import FileAudioAnalyzer + + +class RecipeAdapter: + """ + Adapts a compiled sexp recipe to run through the streaming compositor. + + Example: + adapter = RecipeAdapter("effects/quick_test.sexp") + adapter.run(output="preview", duration=60) + """ + + def __init__( + self, + recipe_path: str, + params: Dict[str, Any] = None, + backend: str = "numpy", + ): + """ + Load and prepare a recipe for streaming. + + Args: + recipe_path: Path to .sexp recipe file + params: Parameter overrides + backend: "numpy" or "glsl" + """ + self.recipe_path = Path(recipe_path) + self.recipe_dir = self.recipe_path.parent + self.params = params or {} + self.backend = backend + + # Compile recipe + self._compile() + + def _compile(self): + """Compile the recipe and extract structure.""" + from artdag.sexp.compiler import compile_string + + recipe_text = self.recipe_path.read_text() + self.compiled = compile_string(recipe_text, self.params, recipe_dir=self.recipe_dir) + + # Extract key info + self.sources = {} # name -> path + self.effects_registry = {} # effect_name -> path + self.analyzers = {} # name -> analyzer info + + # Walk nodes to find sources and structure + # nodes is a list in CompiledRecipe + for node in self.compiled.nodes: + node_type = node.get("type", "") + + if node_type == "SOURCE": + config = node.get("config", {}) + path = config.get("path") + if path: + self.sources[node["id"]] = self.recipe_dir / path + + elif node_type == "ANALYZE": + config = node.get("config", {}) + self.analyzers[node["id"]] = { + "analyzer": config.get("analyzer"), + "path": config.get("analyzer_path"), + } + + # Get effects registry from compiled recipe + # registry has 'effects' sub-dict + effects_dict = self.compiled.registry.get("effects", {}) + for name, info in effects_dict.items(): + if info.get("path"): + self.effects_registry[name] = Path(info["path"]) + + def run_analysis(self) -> Dict[str, Any]: + """ + Run analysis phase (energy, beats, etc.). + + Returns: + Dict of analysis track name -> {times, values, duration} + """ + print(f"Running analysis...", file=sys.stderr) + + # Use existing planner's analysis execution + from artdag.sexp.planner import create_plan + + analysis_data = {} + + def on_analysis(node_id: str, results: dict): + analysis_data[node_id] = results + print(f" {node_id[:16]}...: {len(results.get('times', []))} samples", file=sys.stderr) + + # Create plan (runs analysis as side effect) + plan = create_plan( + self.compiled, + inputs={}, + recipe_dir=self.recipe_dir, + on_analysis=on_analysis, + ) + + # Also store named analysis tracks + for name, data in plan.analysis.items(): + analysis_data[name] = data + + return analysis_data + + def build_compositor( + self, + analysis_data: Dict[str, Any] = None, + fps: float = None, + ) -> StreamingCompositor: + """ + Build a streaming compositor from the recipe. + + This is a simplified version that handles common patterns. + Complex recipes may need manual configuration. + + Args: + analysis_data: Pre-computed analysis data + + Returns: + Configured StreamingCompositor + """ + # Extract video and audio sources in SLICE_ON input order + video_sources = [] + audio_source = None + + # Find audio source first + for node_id, path in self.sources.items(): + suffix = path.suffix.lower() + if suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'): + audio_source = str(path) + break + + # Find SLICE_ON node to get correct video order + slice_on_inputs = None + for node in self.compiled.nodes: + if node.get('type') == 'SLICE_ON': + # Use 'videos' config key which has the correct order + config = node.get('config', {}) + slice_on_inputs = config.get('videos', []) + break + + if slice_on_inputs: + # Trace each SLICE_ON input back to its SOURCE + node_lookup = {n['id']: n for n in self.compiled.nodes} + + def trace_to_source(node_id, visited=None): + """Trace a node back to its SOURCE, return source path.""" + if visited is None: + visited = set() + if node_id in visited: + return None + visited.add(node_id) + + node = node_lookup.get(node_id) + if not node: + return None + if node.get('type') == 'SOURCE': + return self.sources.get(node_id) + # Recurse through inputs + for inp in node.get('inputs', []): + result = trace_to_source(inp, visited) + if result: + return result + return None + + # Build video_sources in SLICE_ON input order + for inp_id in slice_on_inputs: + source_path = trace_to_source(inp_id) + if source_path: + suffix = source_path.suffix.lower() + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + video_sources.append(str(source_path)) + + # Fallback to definition order if no SLICE_ON + if not video_sources: + for node_id, path in self.sources.items(): + suffix = path.suffix.lower() + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + video_sources.append(str(path)) + + if not video_sources: + raise ValueError("No video sources found in recipe") + + # Build effect chains - use live audio bindings (matching video_sources count) + effects_per_source = self._build_streaming_effects(n_sources=len(video_sources)) + + # Build compositor config from recipe + compositor_config = self._extract_compositor_config(analysis_data) + + return StreamingCompositor( + sources=video_sources, + effects_per_source=effects_per_source, + compositor_config=compositor_config, + analysis_data=analysis_data or {}, + backend=self.backend, + recipe_dir=self.recipe_dir, + fps=fps or self.compiled.encoding.get("fps", 30), + audio_source=audio_source, + ) + + def _build_streaming_effects(self, n_sources: int = None) -> List[List[Dict]]: + """ + Build effect chains for streaming with live audio bindings. + + Replicates the recipe's effect pipeline: + - Per source: rotate, zoom, invert, hue_shift, ascii_art + - All driven by live_energy and live_beat + """ + if n_sources is None: + n_sources = len([p for p in self.sources.values() + if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')]) + + effects_per_source = [] + + for i in range(n_sources): + # Alternate rotation direction per source + rot_dir = 1 if i % 2 == 0 else -1 + + effects = [ + # Rotate - energy drives angle + { + "effect": "rotate", + "effect_path": str(self.effects_registry.get("rotate", "")), + "angle": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [0, 45 * rot_dir], + }, + }, + # Zoom - energy drives amount + { + "effect": "zoom", + "effect_path": str(self.effects_registry.get("zoom", "")), + "amount": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [1.0, 1.5] if i % 2 == 0 else [1.0, 0.7], + }, + }, + # Invert - beat triggers + { + "effect": "invert", + "effect_path": str(self.effects_registry.get("invert", "")), + "amount": { + "_binding": True, + "source": "live_beat", + "feature": "values", + "range": [0, 1], + }, + }, + # Hue shift - energy drives hue + { + "effect": "hue_shift", + "effect_path": str(self.effects_registry.get("hue_shift", "")), + "degrees": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [0, 180], + }, + }, + # ASCII art - energy drives char size, beat triggers mix + { + "effect": "ascii_art", + "effect_path": str(self.effects_registry.get("ascii_art", "")), + "char_size": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [4, 32], + }, + "mix": { + "_binding": True, + "source": "live_beat", + "feature": "values", + "range": [0, 1], + }, + }, + ] + effects_per_source.append(effects) + + return effects_per_source + + def _extract_effects(self) -> List[List[Dict]]: + """Extract effect chains for each source (legacy, pre-computed analysis).""" + # Simplified: find EFFECT nodes and their configs + effects_per_source = [] + + for node_id, path in self.sources.items(): + if path.suffix.lower() not in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + continue + + # Find effects that depend on this source + # This is simplified - real implementation would trace the DAG + effects = [] + + for node in self.compiled.nodes: + if node.get("type") == "EFFECT": + config = node.get("config", {}) + effect_name = config.get("effect") + if effect_name and effect_name in self.effects_registry: + effect_config = { + "effect": effect_name, + "effect_path": str(self.effects_registry[effect_name]), + } + # Copy only effect params (filter out internal fields) + internal_fields = ( + "effect", "effect_path", "cid", "effect_cid", + "effects_registry", "analysis_refs", "inputs", + ) + for k, v in config.items(): + if k not in internal_fields: + effect_config[k] = v + effects.append(effect_config) + break # One effect per source for now + + effects_per_source.append(effects) + + return effects_per_source + + def _extract_compositor_config(self, analysis_data: Dict) -> Dict: + """Extract compositor configuration.""" + # Look for blend_multi or similar composition nodes + for node in self.compiled.nodes: + if node.get("type") == "EFFECT": + config = node.get("config", {}) + if config.get("effect") == "blend_multi": + return { + "mode": config.get("mode", "alpha"), + "weights": config.get("weights", []), + } + + # Default: equal blend + n_sources = len([p for p in self.sources.values() + if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')]) + return { + "mode": "alpha", + "weights": [1.0 / n_sources] * n_sources if n_sources > 0 else [1.0], + } + + def run( + self, + output: str = "preview", + duration: float = None, + fps: float = None, + ): + """ + Run the recipe through streaming compositor. + + Everything streams: video frames read on-demand, audio analyzed in real-time. + No pre-computation. + + Args: + output: "preview", filename, or Output object + duration: Duration in seconds (default: audio duration) + fps: Frame rate (default from recipe, or 30) + """ + # Build compositor with recipe executor for full pipeline + from .recipe_executor import StreamingRecipeExecutor + + compositor = self.build_compositor(analysis_data={}, fps=fps) + + # Use audio duration if not specified + if duration is None: + if compositor._audio_analyzer: + duration = compositor._audio_analyzer.duration + print(f"Using audio duration: {duration:.1f}s", file=sys.stderr) + else: + # Live mode - run until quit + print("Live mode - press 'q' to quit", file=sys.stderr) + + # Create sexp executor that interprets the recipe + from .sexp_executor import SexpStreamingExecutor + executor = SexpStreamingExecutor(self.compiled, seed=42) + + compositor.run(output=output, duration=duration, recipe_executor=executor) + + +def run_recipe( + recipe_path: str, + output: str = "preview", + duration: float = None, + params: Dict = None, + fps: float = None, +): + """ + Run a recipe through streaming compositor. + + Everything streams in real-time: video frames, audio analysis. + No pre-computation - starts immediately. + + Example: + run_recipe("effects/quick_test.sexp", output="preview", duration=30) + run_recipe("effects/quick_test.sexp", output="preview", fps=5) # Lower fps for slow systems + """ + adapter = RecipeAdapter(recipe_path, params=params) + adapter.run(output=output, duration=duration, fps=fps) + + +def run_recipe_piped( + recipe_path: str, + duration: float = None, + params: Dict = None, + fps: float = None, +): + """ + Run recipe and pipe directly to mpv. + """ + from .output import PipeOutput + + adapter = RecipeAdapter(recipe_path, params=params) + compositor = adapter.build_compositor(analysis_data={}, fps=fps) + + # Get frame size + if compositor.sources: + first_source = compositor.sources[0] + w, h = first_source._size + else: + w, h = 720, 720 + + actual_fps = fps or adapter.compiled.encoding.get('fps', 30) + + # Create pipe output + pipe_out = PipeOutput( + size=(w, h), + fps=actual_fps, + audio_source=compositor._audio_source + ) + + # Create executor + from .sexp_executor import SexpStreamingExecutor + executor = SexpStreamingExecutor(adapter.compiled, seed=42) + + # Run with pipe output + compositor.run(output=pipe_out, duration=duration, recipe_executor=executor) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor") + parser.add_argument("recipe", help="Path to .sexp recipe file") + parser.add_argument("-o", "--output", default="pipe", + help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)") + parser.add_argument("-d", "--duration", type=float, default=None, + help="Duration in seconds (default: audio duration)") + parser.add_argument("--fps", type=float, default=None, + help="Frame rate (default: from recipe)") + args = parser.parse_args() + + if args.output == "pipe": + run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps) + else: + run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps) diff --git a/streaming/recipe_executor.py b/streaming/recipe_executor.py new file mode 100644 index 0000000..678d9f6 --- /dev/null +++ b/streaming/recipe_executor.py @@ -0,0 +1,415 @@ +""" +Streaming recipe executor. + +Implements the full recipe logic for real-time streaming: +- Scans (state machines that evolve on beats) +- Process-pair template (two clips with sporadic effects, blended) +- Cycle-crossfade (dynamic composition cycling through video pairs) +""" + +import random +import numpy as np +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + + +@dataclass +class ScanState: + """State for a scan (beat-driven state machine).""" + value: Any = 0 + rng: random.Random = field(default_factory=random.Random) + + +class StreamingScans: + """ + Real-time scan executor. + + Scans are state machines that evolve on each beat. + They drive effect parameters like invert triggers, hue shifts, etc. + """ + + def __init__(self, seed: int = 42, n_sources: int = 4): + self.master_seed = seed + self.n_sources = n_sources + self.scans: Dict[str, ScanState] = {} + self.beat_count = 0 + self.current_time = 0.0 + self.last_beat_time = 0.0 + self._init_scans() + + def _init_scans(self): + """Initialize all scans with their own RNG seeds.""" + scan_names = [] + + # Per-pair scans (dynamic based on n_sources) + for i in range(self.n_sources): + scan_names.extend([ + f"inv_a_{i}", f"inv_b_{i}", f"hue_a_{i}", f"hue_b_{i}", + f"ascii_a_{i}", f"ascii_b_{i}", f"pair_mix_{i}", f"pair_rot_{i}", + ]) + + # Global scans + scan_names.extend(["whole_spin", "ripple_gate", "cycle"]) + + for i, name in enumerate(scan_names): + rng = random.Random(self.master_seed + i) + self.scans[name] = ScanState(value=self._init_value(name), rng=rng) + + def _init_value(self, name: str) -> Any: + """Get initial value for a scan.""" + if name.startswith("inv_") or name.startswith("ascii_"): + return 0 # Counter for remaining beats + elif name.startswith("hue_"): + return {"rem": 0, "hue": 0} + elif name.startswith("pair_mix"): + return {"rem": 0, "opacity": 0.5} + elif name.startswith("pair_rot"): + pair_idx = int(name.split("_")[-1]) + rot_dir = 1 if pair_idx % 2 == 0 else -1 + return {"beat": 0, "clen": 25, "dir": rot_dir, "angle": 0} + elif name == "whole_spin": + return { + "phase": 0, # 0 = waiting, 1 = spinning + "beat": 0, # beats into current phase + "plen": 20, # beats in this phase + "dir": 1, # spin direction + "total_angle": 0.0, # cumulative angle after all spins + "spin_start_angle": 0.0, # angle when current spin started + "spin_start_time": 0.0, # time when current spin started + "spin_end_time": 0.0, # estimated time when spin ends + } + elif name == "ripple_gate": + return {"rem": 0, "cx": 0.5, "cy": 0.5} + elif name == "cycle": + return {"cycle": 0, "beat": 0, "clen": 60} + return 0 + + def on_beat(self): + """Update all scans on a beat.""" + self.beat_count += 1 + # Estimate beat interval from last two beats + beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5 + self.last_beat_time = self.current_time + + for name, state in self.scans.items(): + state.value = self._step_scan(name, state.value, state.rng, beat_interval) + + def _step_scan(self, name: str, value: Any, rng: random.Random, beat_interval: float = 0.5) -> Any: + """Step a scan forward by one beat.""" + + # Invert scan: 10% chance, lasts 1-5 beats + if name.startswith("inv_"): + if value > 0: + return value - 1 + elif rng.random() < 0.1: + return rng.randint(1, 5) + return 0 + + # Hue scan: 10% chance, random hue 30-330, lasts 1-5 beats + elif name.startswith("hue_"): + if value["rem"] > 0: + return {"rem": value["rem"] - 1, "hue": value["hue"]} + elif rng.random() < 0.1: + return {"rem": rng.randint(1, 5), "hue": rng.uniform(30, 330)} + return {"rem": 0, "hue": 0} + + # ASCII scan: 5% chance, lasts 1-3 beats + elif name.startswith("ascii_"): + if value > 0: + return value - 1 + elif rng.random() < 0.05: + return rng.randint(1, 3) + return 0 + + # Pair mix: changes every 1-11 beats + elif name.startswith("pair_mix"): + if value["rem"] > 0: + return {"rem": value["rem"] - 1, "opacity": value["opacity"]} + return {"rem": rng.randint(1, 11), "opacity": rng.choice([0, 0.5, 1.0])} + + # Pair rotation: full rotation every 20-30 beats + elif name.startswith("pair_rot"): + beat = value["beat"] + clen = value["clen"] + dir_ = value["dir"] + angle = value["angle"] + + if beat + 1 < clen: + new_angle = angle + dir_ * (360 / clen) + return {"beat": beat + 1, "clen": clen, "dir": dir_, "angle": new_angle} + else: + return {"beat": 0, "clen": rng.randint(20, 30), "dir": -dir_, "angle": angle} + + # Whole spin: sporadic 720 degree spins (cumulative - stays rotated) + elif name == "whole_spin": + phase = value["phase"] + beat = value["beat"] + plen = value["plen"] + dir_ = value["dir"] + total_angle = value.get("total_angle", 0.0) + spin_start_angle = value.get("spin_start_angle", 0.0) + spin_start_time = value.get("spin_start_time", 0.0) + spin_end_time = value.get("spin_end_time", 0.0) + + if phase == 1: + # Currently spinning + if beat + 1 < plen: + return { + "phase": 1, "beat": beat + 1, "plen": plen, "dir": dir_, + "total_angle": total_angle, + "spin_start_angle": spin_start_angle, + "spin_start_time": spin_start_time, + "spin_end_time": spin_end_time, + } + else: + # Spin complete - update total_angle with final spin + new_total = spin_start_angle + dir_ * 720.0 + return { + "phase": 0, "beat": 0, "plen": rng.randint(20, 40), "dir": dir_, + "total_angle": new_total, + "spin_start_angle": new_total, + "spin_start_time": self.current_time, + "spin_end_time": self.current_time, + } + else: + # Waiting phase + if beat + 1 < plen: + return { + "phase": 0, "beat": beat + 1, "plen": plen, "dir": dir_, + "total_angle": total_angle, + "spin_start_angle": spin_start_angle, + "spin_start_time": spin_start_time, + "spin_end_time": spin_end_time, + } + else: + # Start new spin + new_dir = 1 if rng.random() < 0.5 else -1 + new_plen = rng.randint(10, 25) + spin_duration = new_plen * beat_interval + return { + "phase": 1, "beat": 0, "plen": new_plen, "dir": new_dir, + "total_angle": total_angle, + "spin_start_angle": total_angle, + "spin_start_time": self.current_time, + "spin_end_time": self.current_time + spin_duration, + } + + # Ripple gate: 5% chance, lasts 1-20 beats + elif name == "ripple_gate": + if value["rem"] > 0: + return {"rem": value["rem"] - 1, "cx": value["cx"], "cy": value["cy"]} + elif rng.random() < 0.05: + return {"rem": rng.randint(1, 20), + "cx": rng.uniform(0.1, 0.9), + "cy": rng.uniform(0.1, 0.9)} + return {"rem": 0, "cx": 0.5, "cy": 0.5} + + # Cycle: track which video pair is active + elif name == "cycle": + beat = value["beat"] + clen = value["clen"] + cycle = value["cycle"] + + if beat + 1 < clen: + return {"cycle": cycle, "beat": beat + 1, "clen": clen} + else: + # Move to next pair, vary cycle length + return {"cycle": (cycle + 1) % 4, "beat": 0, + "clen": 40 + (self.beat_count * 7) % 41} + + return value + + def get_emit(self, name: str) -> float: + """Get emitted value for a scan.""" + value = self.scans[name].value + + if name.startswith("inv_") or name.startswith("ascii_"): + return 1.0 if value > 0 else 0.0 + + elif name.startswith("hue_"): + return value["hue"] if value["rem"] > 0 else 0.0 + + elif name.startswith("pair_mix"): + return value["opacity"] + + elif name.startswith("pair_rot"): + return value["angle"] + + elif name == "whole_spin": + # Smooth time-based interpolation during spin + phase = value.get("phase", 0) + if phase == 1: + # Currently spinning - interpolate based on time + spin_start_time = value.get("spin_start_time", 0.0) + spin_end_time = value.get("spin_end_time", spin_start_time + 1.0) + spin_start_angle = value.get("spin_start_angle", 0.0) + dir_ = value.get("dir", 1) + + duration = spin_end_time - spin_start_time + if duration > 0: + progress = (self.current_time - spin_start_time) / duration + progress = max(0.0, min(1.0, progress)) # clamp to 0-1 + else: + progress = 1.0 + + return spin_start_angle + progress * 720.0 * dir_ + else: + # Not spinning - return cumulative angle + return value.get("total_angle", 0.0) + + elif name == "ripple_gate": + return 1.0 if value["rem"] > 0 else 0.0 + + elif name == "cycle": + return value + + return 0.0 + + +class StreamingRecipeExecutor: + """ + Executes a recipe in streaming mode. + + Implements: + - process-pair: two video clips with opposite effects, blended + - cycle-crossfade: dynamic cycling through video pairs + - Final effects: whole-spin rotation, ripple + """ + + def __init__(self, n_sources: int = 4, seed: int = 42): + self.n_sources = n_sources + self.scans = StreamingScans(seed, n_sources=n_sources) + self.last_beat_detected = False + self.current_time = 0.0 + + def on_frame(self, energy: float, is_beat: bool, t: float = 0.0): + """Called each frame with current audio analysis.""" + self.current_time = t + self.scans.current_time = t + # Update scans on beat + if is_beat and not self.last_beat_detected: + self.scans.on_beat() + self.last_beat_detected = is_beat + + def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict: + """ + Get effect parameters for a source clip. + + Args: + source_idx: Which video source (0-3) + clip: "a" or "b" (each source has two clips) + energy: Current audio energy (0-1) + """ + suffix = f"_{source_idx}" + + # Rotation ranges alternate + if source_idx % 2 == 0: + rot_range = [0, 45] if clip == "a" else [0, -45] + zoom_range = [1, 1.5] if clip == "a" else [1, 0.5] + else: + rot_range = [0, -45] if clip == "a" else [0, 45] + zoom_range = [1, 0.5] if clip == "a" else [1, 1.5] + + return { + "rotate_angle": rot_range[0] + energy * (rot_range[1] - rot_range[0]), + "zoom_amount": zoom_range[0] + energy * (zoom_range[1] - zoom_range[0]), + "invert_amount": self.scans.get_emit(f"inv_{clip}{suffix}"), + "hue_degrees": self.scans.get_emit(f"hue_{clip}{suffix}"), + "ascii_mix": 0, # Disabled - too slow without GPU + "ascii_char_size": 4 + energy * 28, # 4-32 + } + + def get_pair_params(self, source_idx: int) -> Dict: + """Get blend and rotation params for a video pair.""" + suffix = f"_{source_idx}" + return { + "blend_opacity": self.scans.get_emit(f"pair_mix{suffix}"), + "pair_rotation": self.scans.get_emit(f"pair_rot{suffix}"), + } + + def get_cycle_weights(self) -> List[float]: + """Get blend weights for cycle-crossfade composition.""" + cycle_state = self.scans.get_emit("cycle") + active = cycle_state["cycle"] + beat = cycle_state["beat"] + clen = cycle_state["clen"] + n = self.n_sources + + phase3 = beat * 3 + weights = [] + + for p in range(n): + prev = (p + n - 1) % n + + if active == p: + if phase3 < clen: + w = 0.9 + elif phase3 < clen * 2: + w = 0.9 - ((phase3 - clen) / clen) * 0.85 + else: + w = 0.05 + elif active == prev: + if phase3 < clen: + w = 0.05 + elif phase3 < clen * 2: + w = 0.05 + ((phase3 - clen) / clen) * 0.85 + else: + w = 0.9 + else: + w = 0.05 + + weights.append(w) + + # Normalize + total = sum(weights) + if total > 0: + weights = [w / total for w in weights] + + return weights + + def get_cycle_zooms(self) -> List[float]: + """Get zoom amounts for cycle-crossfade.""" + cycle_state = self.scans.get_emit("cycle") + active = cycle_state["cycle"] + beat = cycle_state["beat"] + clen = cycle_state["clen"] + n = self.n_sources + + phase3 = beat * 3 + zooms = [] + + for p in range(n): + prev = (p + n - 1) % n + + if active == p: + if phase3 < clen: + z = 1.0 + elif phase3 < clen * 2: + z = 1.0 + ((phase3 - clen) / clen) * 1.0 + else: + z = 0.1 + elif active == prev: + if phase3 < clen: + z = 3.0 # Start big + elif phase3 < clen * 2: + z = 3.0 - ((phase3 - clen) / clen) * 2.0 # Shrink to 1.0 + else: + z = 1.0 + else: + z = 0.1 + + zooms.append(z) + + return zooms + + def get_final_effects(self, energy: float) -> Dict: + """Get final composition effects (whole-spin, ripple).""" + ripple_gate = self.scans.get_emit("ripple_gate") + ripple_state = self.scans.scans["ripple_gate"].value + + return { + "whole_spin_angle": self.scans.get_emit("whole_spin"), + "ripple_amplitude": ripple_gate * (5 + energy * 45), # 5-50 + "ripple_cx": ripple_state["cx"], + "ripple_cy": ripple_state["cy"], + } diff --git a/streaming/sexp_executor.py b/streaming/sexp_executor.py new file mode 100644 index 0000000..0151853 --- /dev/null +++ b/streaming/sexp_executor.py @@ -0,0 +1,678 @@ +""" +Streaming S-expression executor. + +Executes compiled sexp recipes in real-time by: +- Evaluating scan expressions on each beat +- Resolving bindings to get effect parameter values +- Applying effects frame-by-frame +- Evaluating SLICE_ON Lambda for cycle crossfade +""" + +import random +import numpy as np +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + +from .sexp_interp import SexpInterpreter, eval_slice_on_lambda + + +@dataclass +class ScanState: + """Runtime state for a scan.""" + node_id: str + name: Optional[str] + value: Any + rng: random.Random + init_expr: dict + step_expr: dict + emit_expr: dict + + +class ExprEvaluator: + """ + Evaluates compiled expression ASTs. + + Expressions are dicts with: + - _expr: True (marks as expression) + - op: operation name + - args: list of arguments + - name: for 'var' ops + - keys: for 'dict' ops + """ + + def __init__(self, rng: random.Random = None): + self.rng = rng or random.Random() + + def eval(self, expr: Any, env: Dict[str, Any]) -> Any: + """Evaluate an expression in the given environment.""" + # Literal values + if not isinstance(expr, dict): + return expr + + # Check if it's an expression + if not expr.get('_expr'): + # It's a plain dict - return as-is + return expr + + op = expr.get('op') + args = expr.get('args', []) + + # Evaluate based on operation + if op == 'var': + name = expr.get('name') + if name in env: + return env[name] + raise KeyError(f"Unknown variable: {name}") + + elif op == 'dict': + keys = expr.get('keys', []) + values = [self.eval(a, env) for a in args] + return dict(zip(keys, values)) + + elif op == 'get': + obj = self.eval(args[0], env) + key = args[1] + return obj.get(key) if isinstance(obj, dict) else obj[key] + + elif op == 'if': + cond = self.eval(args[0], env) + if cond: + return self.eval(args[1], env) + elif len(args) > 2: + return self.eval(args[2], env) + return None + + # Comparison ops + elif op == '<': + return self.eval(args[0], env) < self.eval(args[1], env) + elif op == '>': + return self.eval(args[0], env) > self.eval(args[1], env) + elif op == '<=': + return self.eval(args[0], env) <= self.eval(args[1], env) + elif op == '>=': + return self.eval(args[0], env) >= self.eval(args[1], env) + elif op == '=': + return self.eval(args[0], env) == self.eval(args[1], env) + elif op == '!=': + return self.eval(args[0], env) != self.eval(args[1], env) + + # Arithmetic ops + elif op == '+': + return self.eval(args[0], env) + self.eval(args[1], env) + elif op == '-': + return self.eval(args[0], env) - self.eval(args[1], env) + elif op == '*': + return self.eval(args[0], env) * self.eval(args[1], env) + elif op == '/': + return self.eval(args[0], env) / self.eval(args[1], env) + elif op == 'mod': + return self.eval(args[0], env) % self.eval(args[1], env) + + # Random ops + elif op == 'rand': + return self.rng.random() + elif op == 'rand-int': + lo = self.eval(args[0], env) + hi = self.eval(args[1], env) + return self.rng.randint(lo, hi) + elif op == 'rand-range': + lo = self.eval(args[0], env) + hi = self.eval(args[1], env) + return self.rng.uniform(lo, hi) + + # Logic ops + elif op == 'and': + return all(self.eval(a, env) for a in args) + elif op == 'or': + return any(self.eval(a, env) for a in args) + elif op == 'not': + return not self.eval(args[0], env) + + else: + raise ValueError(f"Unknown operation: {op}") + + +class SexpStreamingExecutor: + """ + Executes a compiled sexp recipe in streaming mode. + + Reads scan definitions, effect chains, and bindings from the + compiled recipe and executes them frame-by-frame. + """ + + def __init__(self, compiled_recipe, seed: int = 42): + self.recipe = compiled_recipe + self.master_seed = seed + + # Build node lookup + self.nodes = {n['id']: n for n in compiled_recipe.nodes} + + # State (must be initialized before _init_scans) + self.beat_count = 0 + self.current_time = 0.0 + self.last_beat_time = 0.0 + self.last_beat_detected = False + self.energy = 0.0 + + # Initialize scans + self.scans: Dict[str, ScanState] = {} + self.scan_outputs: Dict[str, Any] = {} # Current emit values by node_id + self._init_scans() + + # Initialize SLICE_ON interpreter + self.sexp_interp = SexpInterpreter(random.Random(seed)) + self._slice_on_lambda = None + self._slice_on_acc = None + self._slice_on_result = None # Last evaluation result {layers, compose, acc} + self._init_slice_on() + + def _init_slice_on(self): + """Initialize SLICE_ON Lambda for cycle crossfade.""" + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + config = node.get('config', {}) + self._slice_on_lambda = config.get('fn') + init = config.get('init', {}) + self._slice_on_acc = { + 'cycle': init.get('cycle', 0), + 'beat': init.get('beat', 0), + 'clen': init.get('clen', 60), + } + # Evaluate initial state + self._eval_slice_on() + break + + def _eval_slice_on(self): + """Evaluate the SLICE_ON Lambda with current state.""" + if not self._slice_on_lambda: + return + + n = len(self._get_video_sources()) + videos = list(range(n)) # Placeholder video indices + + try: + result = eval_slice_on_lambda( + self._slice_on_lambda, + self._slice_on_acc, + self.beat_count, + 0.0, # start time (not used for weights) + 1.0, # end time (not used for weights) + videos, + self.sexp_interp, + ) + self._slice_on_result = result + # Update accumulator for next beat + if 'acc' in result: + self._slice_on_acc = result['acc'] + except Exception as e: + import sys + print(f"SLICE_ON eval error: {e}", file=sys.stderr) + + def _init_scans(self): + """Initialize all scan nodes from the recipe.""" + seed_offset = 0 + for node in self.recipe.nodes: + if node.get('type') == 'SCAN': + node_id = node['id'] + config = node.get('config', {}) + + # Create RNG with unique seed + scan_seed = config.get('seed', self.master_seed + seed_offset) + rng = random.Random(scan_seed) + seed_offset += 1 + + # Evaluate initial value + init_expr = config.get('init', 0) + evaluator = ExprEvaluator(rng) + init_value = evaluator.eval(init_expr, {}) + + self.scans[node_id] = ScanState( + node_id=node_id, + name=node.get('name'), + value=init_value, + rng=rng, + init_expr=init_expr, + step_expr=config.get('step_expr', {}), + emit_expr=config.get('emit_expr', {}), + ) + + # Compute initial emit + self._update_emit(node_id) + + def _update_emit(self, node_id: str): + """Update the emit value for a scan.""" + scan = self.scans[node_id] + evaluator = ExprEvaluator(scan.rng) + + # Build environment from current state + env = self._build_scan_env(scan) + + # Evaluate emit expression + emit_value = evaluator.eval(scan.emit_expr, env) + self.scan_outputs[node_id] = emit_value + + def _build_scan_env(self, scan: ScanState) -> Dict[str, Any]: + """Build environment for scan expression evaluation.""" + env = {} + + # Add state variables + if isinstance(scan.value, dict): + env.update(scan.value) + else: + env['acc'] = scan.value + + # Add beat count + env['beat_count'] = self.beat_count + env['time'] = self.current_time + + return env + + def on_beat(self): + """Update all scans on a beat.""" + self.beat_count += 1 + + # Estimate beat interval + beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5 + self.last_beat_time = self.current_time + + # Step each scan + for node_id, scan in self.scans.items(): + evaluator = ExprEvaluator(scan.rng) + env = self._build_scan_env(scan) + + # Evaluate step expression + new_value = evaluator.eval(scan.step_expr, env) + scan.value = new_value + + # Update emit + self._update_emit(node_id) + + # Step the cycle state + self._step_cycle() + + def on_frame(self, energy: float, is_beat: bool, t: float = 0.0): + """Called each frame with audio analysis.""" + self.current_time = t + self.energy = energy + + # Update scans on beat (edge detection) + if is_beat and not self.last_beat_detected: + self.on_beat() + self.last_beat_detected = is_beat + + def resolve_binding(self, binding: dict) -> Any: + """Resolve a binding to get the current value.""" + if not isinstance(binding, dict) or not binding.get('_binding'): + return binding + + source_id = binding.get('source') + feature = binding.get('feature', 'values') + range_map = binding.get('range') + + # Get the raw value + if source_id in self.scan_outputs: + value = self.scan_outputs[source_id] + else: + # Might be an analyzer reference - use energy as fallback + value = self.energy + + # Extract feature if value is a dict + if isinstance(value, dict) and feature in value: + value = value[feature] + + # Apply range mapping + if range_map and isinstance(value, (int, float)): + lo, hi = range_map + value = lo + value * (hi - lo) + + return value + + def get_effect_params(self, effect_node: dict) -> Dict[str, Any]: + """Get resolved parameters for an effect node.""" + config = effect_node.get('config', {}) + params = {} + + for key, value in config.items(): + # Skip internal fields + if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', 'analysis_refs'): + continue + + # Resolve bindings + params[key] = self.resolve_binding(value) + + return params + + def get_scan_value(self, name: str) -> Any: + """Get scan output by name.""" + for node_id, scan in self.scans.items(): + if scan.name == name: + return self.scan_outputs.get(node_id) + return None + + def get_all_scan_values(self) -> Dict[str, Any]: + """Get all named scan outputs.""" + result = {} + for node_id, scan in self.scans.items(): + if scan.name: + result[scan.name] = self.scan_outputs.get(node_id) + return result + + # === Compositor interface methods === + + def _get_video_sources(self) -> List[str]: + """Get list of video source node IDs.""" + sources = [] + for node in self.recipe.nodes: + if node.get('type') == 'SOURCE': + sources.append(node['id']) + # Filter to video only (exclude audio - last one is usually audio) + # Look at file extensions in the paths + return sources[:-1] if len(sources) > 1 else sources + + def _trace_effect_chain(self, start_id: str, stop_at_blend: bool = True) -> List[dict]: + """Trace effect chain from a node, returning effects in order.""" + chain = [] + current_id = start_id + + for _ in range(20): # Max depth + # Find node that uses current as input + next_node = None + for node in self.recipe.nodes: + if current_id in node.get('inputs', []): + if node.get('type') == 'EFFECT': + effect_type = node.get('config', {}).get('effect') + chain.append(node) + if stop_at_blend and effect_type == 'blend': + return chain + next_node = node + break + elif node.get('type') == 'SEGMENT': + next_node = node + break + + if next_node is None: + break + current_id = next_node['id'] + + return chain + + def _find_clip_chains(self, source_idx: int) -> tuple: + """Find effect chains for clip A and B from a source.""" + sources = self._get_video_sources() + if source_idx >= len(sources): + return [], [] + + source_id = sources[source_idx] + + # Find SEGMENT node + segment_id = None + for node in self.recipe.nodes: + if node.get('type') == 'SEGMENT' and source_id in node.get('inputs', []): + segment_id = node['id'] + break + + if not segment_id: + return [], [] + + # Find the two effect chains from segment (clip A and clip B) + chains = [] + for node in self.recipe.nodes: + if segment_id in node.get('inputs', []) and node.get('type') == 'EFFECT': + chain = self._trace_effect_chain(segment_id) + # Get chain starting from this specific branch + branch_chain = [node] + current = node['id'] + for _ in range(10): + found = False + for n in self.recipe.nodes: + if current in n.get('inputs', []) and n.get('type') == 'EFFECT': + branch_chain.append(n) + if n.get('config', {}).get('effect') == 'blend': + break + current = n['id'] + found = True + break + if not found: + break + chains.append(branch_chain) + + # Return first two chains as A and B + chain_a = chains[0] if len(chains) > 0 else [] + chain_b = chains[1] if len(chains) > 1 else [] + return chain_a, chain_b + + def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict: + """Get effect parameters for a source clip (compositor interface).""" + # Get the correct chain for this clip + chain_a, chain_b = self._find_clip_chains(source_idx) + chain = chain_a if clip == 'a' else chain_b + + # Default params + params = { + "rotate_angle": 0, + "zoom_amount": 1.0, + "invert_amount": 0, + "hue_degrees": 0, + "ascii_mix": 0, + "ascii_char_size": 8, + } + + # Resolve from effects in chain + for eff in chain: + config = eff.get('config', {}) + effect_type = config.get('effect') + + if effect_type == 'rotate': + angle_binding = config.get('angle') + if angle_binding: + if isinstance(angle_binding, dict) and angle_binding.get('_binding'): + # Bound to analyzer - use energy with range + range_map = angle_binding.get('range') + if range_map: + lo, hi = range_map + params["rotate_angle"] = lo + energy * (hi - lo) + else: + params["rotate_angle"] = self.resolve_binding(angle_binding) + else: + params["rotate_angle"] = angle_binding if isinstance(angle_binding, (int, float)) else 0 + + elif effect_type == 'zoom': + amount_binding = config.get('amount') + if amount_binding: + if isinstance(amount_binding, dict) and amount_binding.get('_binding'): + range_map = amount_binding.get('range') + if range_map: + lo, hi = range_map + params["zoom_amount"] = lo + energy * (hi - lo) + else: + params["zoom_amount"] = self.resolve_binding(amount_binding) + else: + params["zoom_amount"] = amount_binding if isinstance(amount_binding, (int, float)) else 1.0 + + elif effect_type == 'invert': + amount_binding = config.get('amount') + if amount_binding: + val = self.resolve_binding(amount_binding) + params["invert_amount"] = val if isinstance(val, (int, float)) else 0 + + elif effect_type == 'hue_shift': + deg_binding = config.get('degrees') + if deg_binding: + val = self.resolve_binding(deg_binding) + params["hue_degrees"] = val if isinstance(val, (int, float)) else 0 + + elif effect_type == 'ascii_art': + mix_binding = config.get('mix') + if mix_binding: + val = self.resolve_binding(mix_binding) + params["ascii_mix"] = val if isinstance(val, (int, float)) else 0 + size_binding = config.get('char_size') + if size_binding: + if isinstance(size_binding, dict) and size_binding.get('_binding'): + range_map = size_binding.get('range') + if range_map: + lo, hi = range_map + params["ascii_char_size"] = lo + energy * (hi - lo) + + return params + + def get_pair_params(self, source_idx: int) -> Dict: + """Get blend and rotation params for a video pair (compositor interface).""" + params = { + "blend_opacity": 0.5, + "pair_rotation": 0, + } + + # Find the blend node for this source + chain_a, _ = self._find_clip_chains(source_idx) + + # The last effect in chain_a should be the blend + blend_node = None + for eff in reversed(chain_a): + if eff.get('config', {}).get('effect') == 'blend': + blend_node = eff + break + + if blend_node: + config = blend_node.get('config', {}) + opacity_binding = config.get('opacity') + if opacity_binding: + val = self.resolve_binding(opacity_binding) + if isinstance(val, (int, float)): + params["blend_opacity"] = val + + # Find rotate after blend (pair rotation) + blend_id = blend_node['id'] + for node in self.recipe.nodes: + if blend_id in node.get('inputs', []) and node.get('type') == 'EFFECT': + if node.get('config', {}).get('effect') == 'rotate': + angle_binding = node.get('config', {}).get('angle') + if angle_binding: + val = self.resolve_binding(angle_binding) + if isinstance(val, (int, float)): + params["pair_rotation"] = val + break + + return params + + def _get_cycle_state(self) -> dict: + """Get current cycle state from SLICE_ON or internal tracking.""" + if not hasattr(self, '_cycle_state'): + # Initialize from SLICE_ON node + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + init = node.get('config', {}).get('init', {}) + self._cycle_state = { + 'cycle': init.get('cycle', 0), + 'beat': init.get('beat', 0), + 'clen': init.get('clen', 60), + } + break + else: + self._cycle_state = {'cycle': 0, 'beat': 0, 'clen': 60} + + return self._cycle_state + + def _step_cycle(self): + """Step the cycle state forward on beat by evaluating SLICE_ON Lambda.""" + # Use interpreter to evaluate the Lambda + self._eval_slice_on() + + def get_cycle_weights(self) -> List[float]: + """Get blend weights for cycle-crossfade from SLICE_ON result.""" + n = len(self._get_video_sources()) + if n == 0: + return [1.0] + + # Get weights from interpreted result + if self._slice_on_result: + compose = self._slice_on_result.get('compose', {}) + weights = compose.get('weights', []) + if weights and len(weights) == n: + # Normalize + total = sum(weights) + if total > 0: + return [w / total for w in weights] + + # Fallback: equal weights + return [1.0 / n] * n + + def get_cycle_zooms(self) -> List[float]: + """Get zoom amounts for cycle-crossfade from SLICE_ON result.""" + n = len(self._get_video_sources()) + if n == 0: + return [1.0] + + # Get zooms from interpreted result (layers -> effects -> zoom amount) + if self._slice_on_result: + layers = self._slice_on_result.get('layers', []) + if layers and len(layers) == n: + zooms = [] + for layer in layers: + effects = layer.get('effects', []) + zoom_amt = 1.0 + for eff in effects: + if eff.get('effect') == 'zoom' or (hasattr(eff.get('effect'), 'name') and eff.get('effect').name == 'zoom'): + zoom_amt = eff.get('amount', 1.0) + break + zooms.append(zoom_amt) + return zooms + + # Fallback + return [1.0] * n + + def _get_final_rotate_scan_id(self) -> str: + """Find the scan ID that drives the final rotation (after SLICE_ON).""" + if hasattr(self, '_final_rotate_scan_id'): + return self._final_rotate_scan_id + + # Find SLICE_ON node index + slice_on_idx = None + for i, node in enumerate(self.recipe.nodes): + if node.get('type') == 'SLICE_ON': + slice_on_idx = i + break + + # Find rotate effect after SLICE_ON + if slice_on_idx is not None: + for node in self.recipe.nodes[slice_on_idx + 1:]: + if node.get('type') == 'EFFECT': + config = node.get('config', {}) + if config.get('effect') == 'rotate': + angle_binding = config.get('angle', {}) + if isinstance(angle_binding, dict) and angle_binding.get('_binding'): + self._final_rotate_scan_id = angle_binding.get('source') + return self._final_rotate_scan_id + + self._final_rotate_scan_id = None + return None + + def get_final_effects(self, energy: float) -> Dict: + """Get final composition effects (compositor interface).""" + # Get named scans + scan_values = self.get_all_scan_values() + + # Whole spin - get from the specific scan bound to final rotate effect + whole_spin = 0 + final_rotate_scan_id = self._get_final_rotate_scan_id() + if final_rotate_scan_id and final_rotate_scan_id in self.scan_outputs: + val = self.scan_outputs[final_rotate_scan_id] + if isinstance(val, dict) and 'angle' in val: + whole_spin = val['angle'] + elif isinstance(val, (int, float)): + whole_spin = val + + # Ripple + ripple_gate = scan_values.get('ripple-gate', 0) + ripple_cx = scan_values.get('ripple-cx', 0.5) + ripple_cy = scan_values.get('ripple-cy', 0.5) + + if isinstance(ripple_gate, dict): + ripple_gate = ripple_gate.get('gate', 0) if 'gate' in ripple_gate else 1 + + return { + "whole_spin_angle": whole_spin, + "ripple_amplitude": ripple_gate * (5 + energy * 45), + "ripple_cx": ripple_cx if isinstance(ripple_cx, (int, float)) else 0.5, + "ripple_cy": ripple_cy if isinstance(ripple_cy, (int, float)) else 0.5, + } diff --git a/streaming/sexp_interp.py b/streaming/sexp_interp.py new file mode 100644 index 0000000..e3433b2 --- /dev/null +++ b/streaming/sexp_interp.py @@ -0,0 +1,376 @@ +""" +S-expression interpreter for streaming execution. + +Evaluates sexp expressions including: +- let bindings +- lambda definitions and calls +- Arithmetic, comparison, logic operators +- dict/list operations +- Random number generation +""" + +import random +from typing import Any, Dict, List, Callable +from dataclasses import dataclass + + +@dataclass +class Lambda: + """Runtime lambda value.""" + params: List[str] + body: Any + closure: Dict[str, Any] + + +class Symbol: + """Symbol reference.""" + def __init__(self, name: str): + self.name = name + + def __repr__(self): + return f"Symbol({self.name})" + + +class SexpInterpreter: + """ + Interprets S-expressions in real-time. + + Handles the full sexp language used in recipes. + """ + + def __init__(self, rng: random.Random = None): + self.rng = rng or random.Random() + self.globals: Dict[str, Any] = {} + + def eval(self, expr: Any, env: Dict[str, Any] = None) -> Any: + """Evaluate an expression in the given environment.""" + if env is None: + env = {} + + # Literals + if isinstance(expr, (int, float, str, bool)) or expr is None: + return expr + + # Symbol lookup + if isinstance(expr, Symbol) or (hasattr(expr, 'name') and hasattr(expr, '__class__') and expr.__class__.__name__ == 'Symbol'): + name = expr.name if hasattr(expr, 'name') else str(expr) + if name in env: + return env[name] + if name in self.globals: + return self.globals[name] + raise NameError(f"Undefined symbol: {name}") + + # Compiled expression dict (from compiler) + if isinstance(expr, dict): + if expr.get('_expr'): + return self._eval_compiled_expr(expr, env) + # Plain dict - evaluate values that might be expressions + result = {} + for k, v in expr.items(): + # Some keys should keep Symbol values as strings (effect names, modes) + if k in ('effect', 'mode') and hasattr(v, 'name'): + result[k] = v.name + else: + result[k] = self.eval(v, env) + return result + + # List expression (sexp) + if isinstance(expr, (list, tuple)) and len(expr) > 0: + return self._eval_list(expr, env) + + # Empty list + if isinstance(expr, (list, tuple)): + return [] + + return expr + + def _eval_compiled_expr(self, expr: dict, env: Dict[str, Any]) -> Any: + """Evaluate a compiled expression dict.""" + op = expr.get('op') + args = expr.get('args', []) + + if op == 'var': + name = expr.get('name') + if name in env: + return env[name] + if name in self.globals: + return self.globals[name] + raise NameError(f"Undefined: {name}") + + elif op == 'dict': + keys = expr.get('keys', []) + values = [self.eval(a, env) for a in args] + return dict(zip(keys, values)) + + elif op == 'get': + obj = self.eval(args[0], env) + key = args[1] + return obj.get(key) if isinstance(obj, dict) else obj[key] + + elif op == 'if': + cond = self.eval(args[0], env) + if cond: + return self.eval(args[1], env) + elif len(args) > 2: + return self.eval(args[2], env) + return None + + # Comparison + elif op == '<': + return self.eval(args[0], env) < self.eval(args[1], env) + elif op == '>': + return self.eval(args[0], env) > self.eval(args[1], env) + elif op == '<=': + return self.eval(args[0], env) <= self.eval(args[1], env) + elif op == '>=': + return self.eval(args[0], env) >= self.eval(args[1], env) + elif op == '=': + return self.eval(args[0], env) == self.eval(args[1], env) + elif op == '!=': + return self.eval(args[0], env) != self.eval(args[1], env) + + # Arithmetic + elif op == '+': + return self.eval(args[0], env) + self.eval(args[1], env) + elif op == '-': + return self.eval(args[0], env) - self.eval(args[1], env) + elif op == '*': + return self.eval(args[0], env) * self.eval(args[1], env) + elif op == '/': + return self.eval(args[0], env) / self.eval(args[1], env) + elif op == 'mod': + return self.eval(args[0], env) % self.eval(args[1], env) + + # Random + elif op == 'rand': + return self.rng.random() + elif op == 'rand-int': + return self.rng.randint(self.eval(args[0], env), self.eval(args[1], env)) + elif op == 'rand-range': + return self.rng.uniform(self.eval(args[0], env), self.eval(args[1], env)) + + # Logic + elif op == 'and': + return all(self.eval(a, env) for a in args) + elif op == 'or': + return any(self.eval(a, env) for a in args) + elif op == 'not': + return not self.eval(args[0], env) + + else: + raise ValueError(f"Unknown op: {op}") + + def _eval_list(self, expr: list, env: Dict[str, Any]) -> Any: + """Evaluate a list expression (sexp form).""" + if len(expr) == 0: + return [] + + head = expr[0] + + # Get head name + if isinstance(head, Symbol) or (hasattr(head, 'name') and hasattr(head, '__class__')): + head_name = head.name if hasattr(head, 'name') else str(head) + elif isinstance(head, str): + head_name = head + else: + # Not a symbol - check if it's a data list or function call + if isinstance(head, dict): + # List of dicts - evaluate each element as data + return [self.eval(item, env) for item in expr] + # Otherwise evaluate as function call + fn = self.eval(head, env) + args = [self.eval(a, env) for a in expr[1:]] + return self._call(fn, args, env) + + # Special forms + if head_name == 'let': + return self._eval_let(expr, env) + elif head_name in ('lambda', 'fn'): + return self._eval_lambda(expr, env) + elif head_name == 'if': + return self._eval_if(expr, env) + elif head_name == 'dict': + return self._eval_dict(expr, env) + elif head_name == 'get': + obj = self.eval(expr[1], env) + key = self.eval(expr[2], env) if len(expr) > 2 else expr[2] + if isinstance(key, str): + return obj.get(key) if isinstance(obj, dict) else getattr(obj, key, None) + return obj[key] + elif head_name == 'len': + return len(self.eval(expr[1], env)) + elif head_name == 'range': + start = self.eval(expr[1], env) + end = self.eval(expr[2], env) if len(expr) > 2 else start + if len(expr) == 2: + return list(range(end)) + return list(range(start, end)) + elif head_name == 'map': + fn = self.eval(expr[1], env) + lst = self.eval(expr[2], env) + return [self._call(fn, [x], env) for x in lst] + elif head_name == 'mod': + return self.eval(expr[1], env) % self.eval(expr[2], env) + + # Arithmetic + elif head_name == '+': + return self.eval(expr[1], env) + self.eval(expr[2], env) + elif head_name == '-': + if len(expr) == 2: + return -self.eval(expr[1], env) + return self.eval(expr[1], env) - self.eval(expr[2], env) + elif head_name == '*': + return self.eval(expr[1], env) * self.eval(expr[2], env) + elif head_name == '/': + return self.eval(expr[1], env) / self.eval(expr[2], env) + + # Comparison + elif head_name == '<': + return self.eval(expr[1], env) < self.eval(expr[2], env) + elif head_name == '>': + return self.eval(expr[1], env) > self.eval(expr[2], env) + elif head_name == '<=': + return self.eval(expr[1], env) <= self.eval(expr[2], env) + elif head_name == '>=': + return self.eval(expr[1], env) >= self.eval(expr[2], env) + elif head_name == '=': + return self.eval(expr[1], env) == self.eval(expr[2], env) + + # Logic + elif head_name == 'and': + return all(self.eval(a, env) for a in expr[1:]) + elif head_name == 'or': + return any(self.eval(a, env) for a in expr[1:]) + elif head_name == 'not': + return not self.eval(expr[1], env) + + # Function call + else: + fn = env.get(head_name) or self.globals.get(head_name) + if fn is None: + raise NameError(f"Undefined function: {head_name}") + args = [self.eval(a, env) for a in expr[1:]] + return self._call(fn, args, env) + + def _eval_let(self, expr: list, env: Dict[str, Any]) -> Any: + """Evaluate (let [bindings...] body).""" + bindings = expr[1] + body = expr[2] + + # Create new environment with bindings + new_env = dict(env) + + # Process bindings in pairs + i = 0 + while i < len(bindings): + name = bindings[i] + if isinstance(name, Symbol) or hasattr(name, 'name'): + name = name.name if hasattr(name, 'name') else str(name) + value = self.eval(bindings[i + 1], new_env) + new_env[name] = value + i += 2 + + return self.eval(body, new_env) + + def _eval_lambda(self, expr: list, env: Dict[str, Any]) -> Lambda: + """Evaluate (lambda [params] body).""" + params_expr = expr[1] + body = expr[2] + + # Extract parameter names + params = [] + for p in params_expr: + if isinstance(p, Symbol) or hasattr(p, 'name'): + params.append(p.name if hasattr(p, 'name') else str(p)) + else: + params.append(str(p)) + + return Lambda(params=params, body=body, closure=dict(env)) + + def _eval_if(self, expr: list, env: Dict[str, Any]) -> Any: + """Evaluate (if cond then else).""" + cond = self.eval(expr[1], env) + if cond: + return self.eval(expr[2], env) + elif len(expr) > 3: + return self.eval(expr[3], env) + return None + + def _eval_dict(self, expr: list, env: Dict[str, Any]) -> dict: + """Evaluate (dict :key val ...).""" + result = {} + i = 1 + while i < len(expr): + key = expr[i] + # Handle keyword syntax (:key) and Keyword objects + if hasattr(key, 'name'): + key = key.name + elif hasattr(key, '__class__') and key.__class__.__name__ == 'Keyword': + key = str(key).lstrip(':') + elif isinstance(key, str) and key.startswith(':'): + key = key[1:] + value = self.eval(expr[i + 1], env) + result[key] = value + i += 2 + return result + + def _call(self, fn: Any, args: List[Any], env: Dict[str, Any]) -> Any: + """Call a function with arguments.""" + if isinstance(fn, Lambda): + # Our own Lambda type + call_env = dict(fn.closure) + for param, arg in zip(fn.params, args): + call_env[param] = arg + return self.eval(fn.body, call_env) + elif hasattr(fn, 'params') and hasattr(fn, 'body'): + # Lambda from parser (artdag.sexp.parser.Lambda) + call_env = dict(env) + if hasattr(fn, 'closure') and fn.closure: + call_env.update(fn.closure) + # Get param names + params = [] + for p in fn.params: + if hasattr(p, 'name'): + params.append(p.name) + else: + params.append(str(p)) + for param, arg in zip(params, args): + call_env[param] = arg + return self.eval(fn.body, call_env) + elif callable(fn): + return fn(*args) + else: + raise TypeError(f"Not callable: {type(fn).__name__}") + + +def eval_slice_on_lambda(lambda_obj, acc: dict, i: int, start: float, end: float, + videos: list, interp: SexpInterpreter = None) -> dict: + """ + Evaluate a SLICE_ON lambda function. + + Args: + lambda_obj: The Lambda object from the compiled recipe + acc: Current accumulator state + i: Beat index + start: Slice start time + end: Slice end time + videos: List of video inputs + interp: Interpreter to use + + Returns: + Dict with 'layers', 'compose', 'acc' keys + """ + if interp is None: + interp = SexpInterpreter() + + # Set up global 'videos' for (len videos) to work + interp.globals['videos'] = videos + + # Build initial environment with lambda parameters + env = dict(lambda_obj.closure) if hasattr(lambda_obj, 'closure') and lambda_obj.closure else {} + env['videos'] = videos + + # Call the lambda + result = interp._call(lambda_obj, [acc, i, start, end], env) + + return result diff --git a/streaming/sources.py b/streaming/sources.py new file mode 100644 index 0000000..71e7e53 --- /dev/null +++ b/streaming/sources.py @@ -0,0 +1,281 @@ +""" +Video and image sources with looping support. +""" + +import numpy as np +import subprocess +import json +from pathlib import Path +from typing import Optional, Tuple +from abc import ABC, abstractmethod + + +class Source(ABC): + """Abstract base class for frame sources.""" + + @abstractmethod + def read_frame(self, t: float) -> np.ndarray: + """Read frame at time t (with looping if needed).""" + pass + + @property + @abstractmethod + def duration(self) -> float: + """Source duration in seconds.""" + pass + + @property + @abstractmethod + def size(self) -> Tuple[int, int]: + """Frame size as (width, height).""" + pass + + @property + @abstractmethod + def fps(self) -> float: + """Frames per second.""" + pass + + +class VideoSource(Source): + """ + Video file source with automatic looping. + + Reads frames on-demand, seeking as needed. When time exceeds + duration, wraps around (loops). + """ + + def __init__(self, path: str, target_fps: float = 30): + self.path = Path(path) + self.target_fps = target_fps + + # Initialize decode state first (before _probe which could fail) + self._process: Optional[subprocess.Popen] = None + self._current_start: Optional[float] = None + self._frame_buffer: Optional[np.ndarray] = None + self._buffer_time: Optional[float] = None + + self._duration = None + self._size = None + self._fps = None + + if not self.path.exists(): + raise FileNotFoundError(f"Video not found: {path}") + + self._probe() + + def _probe(self): + """Get video metadata.""" + cmd = [ + "ffprobe", "-v", "quiet", + "-print_format", "json", + "-show_format", "-show_streams", + str(self.path) + ] + result = subprocess.run(cmd, capture_output=True, text=True) + data = json.loads(result.stdout) + + # Get duration + self._duration = float(data["format"]["duration"]) + + # Get video stream info + for stream in data["streams"]: + if stream["codec_type"] == "video": + self._size = (int(stream["width"]), int(stream["height"])) + # Parse fps from r_frame_rate (e.g., "30/1" or "30000/1001") + fps_parts = stream.get("r_frame_rate", "30/1").split("/") + self._fps = float(fps_parts[0]) / float(fps_parts[1]) + break + + @property + def duration(self) -> float: + return self._duration + + @property + def size(self) -> Tuple[int, int]: + return self._size + + @property + def fps(self) -> float: + return self._fps + + def _start_decode(self, start_time: float): + """Start ffmpeg decode process from given time.""" + if self._process: + try: + self._process.stdout.close() + except: + pass + self._process.terminate() + try: + self._process.wait(timeout=1) + except: + self._process.kill() + self._process.wait() + + w, h = self._size + cmd = [ + "ffmpeg", "-v", "quiet", + "-ss", str(start_time), + "-i", str(self.path), + "-f", "rawvideo", + "-pix_fmt", "rgb24", + "-r", str(self.target_fps), + "-" + ] + self._process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=w * h * 3 * 4, # Buffer a few frames + ) + self._current_start = start_time + self._buffer_time = start_time + + def read_frame(self, t: float) -> np.ndarray: + """ + Read frame at time t. + + If t exceeds duration, wraps around (loops). + Seeks if needed, otherwise reads sequentially. + """ + # Wrap time for looping + t_wrapped = t % self._duration + + # Check if we need to seek (loop point or large time jump) + need_seek = ( + self._process is None or + self._buffer_time is None or + abs(t_wrapped - self._buffer_time) > 1.0 / self.target_fps * 2 + ) + + if need_seek: + self._start_decode(t_wrapped) + + # Read frame + w, h = self._size + frame_size = w * h * 3 + + # Try to read with retries for seek settling + for attempt in range(3): + raw = self._process.stdout.read(frame_size) + if len(raw) == frame_size: + break + # End of stream or seek not ready - restart from beginning + self._start_decode(0) + + if len(raw) < frame_size: + # Still no data - return last frame or black + if self._frame_buffer is not None: + return self._frame_buffer.copy() + return np.zeros((h, w, 3), dtype=np.uint8) + + frame = np.frombuffer(raw, dtype=np.uint8).reshape((h, w, 3)) + self._frame_buffer = frame # Cache for fallback + self._buffer_time = t_wrapped + 1.0 / self.target_fps + + return frame + + def close(self): + """Clean up resources.""" + if self._process: + self._process.terminate() + self._process.wait() + self._process = None + + def __del__(self): + self.close() + + def __repr__(self): + return f"VideoSource({self.path.name}, {self._size[0]}x{self._size[1]}, {self._duration:.1f}s)" + + +class ImageSource(Source): + """ + Static image source (returns same frame for any time). + + Useful for backgrounds, overlays, etc. + """ + + def __init__(self, path: str): + self.path = Path(path) + if not self.path.exists(): + raise FileNotFoundError(f"Image not found: {path}") + + # Load image + import cv2 + self._frame = cv2.imread(str(self.path)) + self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2RGB) + self._size = (self._frame.shape[1], self._frame.shape[0]) + + @property + def duration(self) -> float: + return float('inf') # Images last forever + + @property + def size(self) -> Tuple[int, int]: + return self._size + + @property + def fps(self) -> float: + return 30.0 # Arbitrary + + def read_frame(self, t: float) -> np.ndarray: + return self._frame.copy() + + def __repr__(self): + return f"ImageSource({self.path.name}, {self._size[0]}x{self._size[1]})" + + +class LiveSource(Source): + """ + Live video capture source (webcam, capture card, etc.). + + Time parameter is ignored - always returns latest frame. + """ + + def __init__(self, device: int = 0, size: Tuple[int, int] = (1280, 720), fps: float = 30): + import cv2 + self._cap = cv2.VideoCapture(device) + self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, size[0]) + self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1]) + self._cap.set(cv2.CAP_PROP_FPS, fps) + + # Get actual settings + self._size = ( + int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + ) + self._fps = self._cap.get(cv2.CAP_PROP_FPS) + + if not self._cap.isOpened(): + raise RuntimeError(f"Could not open video device {device}") + + @property + def duration(self) -> float: + return float('inf') # Live - no duration + + @property + def size(self) -> Tuple[int, int]: + return self._size + + @property + def fps(self) -> float: + return self._fps + + def read_frame(self, t: float) -> np.ndarray: + """Read latest frame (t is ignored for live sources).""" + import cv2 + ret, frame = self._cap.read() + if not ret: + return np.zeros((self._size[1], self._size[0], 3), dtype=np.uint8) + return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + def close(self): + self._cap.release() + + def __del__(self): + self.close() + + def __repr__(self): + return f"LiveSource({self._size[0]}x{self._size[1]}, {self._fps}fps)" diff --git a/streaming/stream_sexp.py b/streaming/stream_sexp.py new file mode 100644 index 0000000..b36dabf --- /dev/null +++ b/streaming/stream_sexp.py @@ -0,0 +1,1081 @@ +""" +Generic Streaming S-expression Interpreter. + +Executes streaming sexp recipes frame-by-frame. +The sexp defines the pipeline logic - interpreter just provides primitives. + +Primitives: + (read source-name) - read frame from source + (rotate frame :angle N) - rotate frame + (zoom frame :amount N) - zoom frame + (invert frame :amount N) - invert colors + (hue-shift frame :degrees N) - shift hue + (blend frame1 frame2 :opacity N) - blend two frames + (blend-weighted [frames...] [weights...]) - weighted blend + (ripple frame :amplitude N :cx N :cy N ...) - ripple effect + + (bind scan-name :field) - get scan state field + (map value [lo hi]) - map 0-1 value to range + energy - current energy (0-1) + beat - 1 if beat, 0 otherwise + t - current time + beat-count - total beats so far + +Example sexp: + (stream "test" + :fps 30 + (source vid "video.mp4") + (audio aud "music.mp3") + + (scan spin beat + :init {:angle 0 :dir 1} + :step (dict :angle (+ angle (* dir 10)) :dir dir)) + + (frame + (-> (read vid) + (rotate :angle (bind spin :angle)) + (zoom :amount (map energy [1 1.5]))))) +""" + +import sys +import time +import json +import hashlib +import numpy as np +import subprocess +from pathlib import Path +from dataclasses import dataclass, field +from typing import Dict, List, Any, Optional, Tuple, Union + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) +from artdag.sexp.parser import parse, parse_all, Symbol, Keyword + + +@dataclass +class StreamContext: + """Runtime context for streaming.""" + t: float = 0.0 + frame_num: int = 0 + fps: float = 30.0 + energy: float = 0.0 + is_beat: bool = False + beat_count: int = 0 + output_size: Tuple[int, int] = (720, 720) + + +class StreamCache: + """Cache for streaming data.""" + + def __init__(self, cache_dir: Path, recipe_hash: str): + self.cache_dir = cache_dir / recipe_hash + self.cache_dir.mkdir(parents=True, exist_ok=True) + self.analysis_buffer: Dict[str, List] = {} + self.scan_states: Dict[str, List] = {} + self.keyframe_interval = 5.0 + + def record_analysis(self, name: str, t: float, value: float): + if name not in self.analysis_buffer: + self.analysis_buffer[name] = [] + t = float(t) if hasattr(t, 'item') else t + value = float(value) if hasattr(value, 'item') else value + self.analysis_buffer[name].append((t, value)) + + def record_scan_state(self, name: str, t: float, state: dict): + if name not in self.scan_states: + self.scan_states[name] = [] + states = self.scan_states[name] + if not states or t - states[-1][0] >= self.keyframe_interval: + t = float(t) if hasattr(t, 'item') else t + clean = {k: (float(v) if hasattr(v, 'item') else v) for k, v in state.items()} + self.scan_states[name].append((t, clean)) + + def flush(self): + for name, data in self.analysis_buffer.items(): + path = self.cache_dir / f"analysis_{name}.json" + existing = json.loads(path.read_text()) if path.exists() else [] + existing.extend(data) + path.write_text(json.dumps(existing)) + self.analysis_buffer.clear() + + for name, states in self.scan_states.items(): + path = self.cache_dir / f"scan_{name}.json" + existing = json.loads(path.read_text()) if path.exists() else [] + existing.extend(states) + path.write_text(json.dumps(existing)) + self.scan_states.clear() + + +class VideoSource: + """Video source - reads frames sequentially.""" + + def __init__(self, path: str, fps: float = 30): + self.path = Path(path) + if not self.path.exists(): + raise FileNotFoundError(f"Video not found: {path}") + + # Get info + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + + for s in info.get("streams", []): + if s.get("codec_type") == "video": + self.width = s.get("width", 720) + self.height = s.get("height", 720) + break + else: + self.width, self.height = 720, 720 + + self.duration = float(info.get("format", {}).get("duration", 60)) + self.size = (self.width, self.height) + + # Start decoder + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "rawvideo", "-pix_fmt", "rgb24", "-r", str(fps), "-"] + self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + self._frame_size = self.width * self.height * 3 + self._current_frame = None + + def read(self) -> Optional[np.ndarray]: + """Read next frame.""" + data = self._proc.stdout.read(self._frame_size) + if len(data) < self._frame_size: + return self._current_frame # Return last frame if stream ends + self._current_frame = np.frombuffer(data, dtype=np.uint8).reshape( + self.height, self.width, 3).copy() + return self._current_frame + + def skip(self): + """Read and discard frame (keep pipe in sync).""" + self._proc.stdout.read(self._frame_size) + + def close(self): + if self._proc: + self._proc.terminate() + self._proc.wait() + + +class AudioAnalyzer: + """Real-time audio analysis.""" + + def __init__(self, path: str, sample_rate: int = 22050): + self.path = Path(path) + + # Load audio + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"] + self._audio = np.frombuffer( + subprocess.run(cmd, capture_output=True).stdout, dtype=np.float32) + self.sample_rate = sample_rate + + # Get duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + self.duration = float(info.get("format", {}).get("duration", 60)) + + self._flux_history = [] + self._last_beat_time = -1 + + def get_energy(self, t: float) -> float: + idx = int(t * self.sample_rate) + start = max(0, idx - 512) + end = min(len(self._audio), idx + 512) + if start >= end: + return 0.0 + return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0) + + def get_beat(self, t: float) -> bool: + idx = int(t * self.sample_rate) + size = 2048 + + start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2) + if end - start < size//2: + return False + curr = self._audio[start:end] + + pstart, pend = max(0, start - 512), max(0, end - 512) + if pend <= pstart: + return False + prev = self._audio[pstart:pend] + + curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr)))) + prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev)))) + + n = min(len(curr_spec), len(prev_spec)) + flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1) + + self._flux_history.append((t, flux)) + while self._flux_history and self._flux_history[0][0] < t - 1.5: + self._flux_history.pop(0) + + if len(self._flux_history) < 3: + return False + + vals = [f for _, f in self._flux_history] + threshold = np.mean(vals) + np.std(vals) * 0.3 + 0.001 + + is_beat = flux > threshold and t - self._last_beat_time > 0.1 + if is_beat: + self._last_beat_time = t + return is_beat + + +class StreamInterpreter: + """ + Generic streaming sexp interpreter. + + Evaluates the frame pipeline expression each frame. + """ + + def __init__(self, sexp_path: str, cache_dir: str = None): + self.sexp_path = Path(sexp_path) + self.sexp_dir = self.sexp_path.parent + + text = self.sexp_path.read_text() + self.ast = parse(text) + + self.config = self._parse_config() + + recipe_hash = hashlib.sha256(text.encode()).hexdigest()[:16] + cache_path = Path(cache_dir) if cache_dir else self.sexp_dir / ".stream_cache" + self.cache = StreamCache(cache_path, recipe_hash) + + self.ctx = StreamContext(fps=self.config.get('fps', 30)) + self.sources: Dict[str, VideoSource] = {} + self.frames: Dict[str, np.ndarray] = {} # Current frame per source + self._sources_read: set = set() # Track which sources read this frame + self.audios: Dict[str, AudioAnalyzer] = {} # Multiple named audio sources + self.audio_paths: Dict[str, str] = {} + self.audio_state: Dict[str, dict] = {} # Per-audio: {energy, is_beat, beat_count, last_beat} + self.scans: Dict[str, dict] = {} + + # Registries for external definitions + self.primitives: Dict[str, Any] = {} # name -> Python function + self.effects: Dict[str, dict] = {} # name -> {params, body} + self.macros: Dict[str, dict] = {} # name -> {params, body} + self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs" + + self.frame_pipeline = None # The (frame ...) expression + + import random + self.rng = random.Random(self.config.get('seed', 42)) + + def _parse_config(self) -> dict: + """Parse config from (stream name :key val ...).""" + config = {'fps': 30, 'seed': 42} + if not self.ast or not isinstance(self.ast[0], Symbol): + return config + if self.ast[0].name != 'stream': + return config + + i = 2 + while i < len(self.ast): + if isinstance(self.ast[i], Keyword): + config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None + i += 2 + elif isinstance(self.ast[i], list): + break + else: + i += 1 + return config + + def _load_primitives(self, lib_name: str): + """Load primitives from a Python library file.""" + import importlib.util + + # Try multiple paths + lib_paths = [ + self.primitive_lib_dir / f"{lib_name}.py", + self.sexp_dir / "primitive_libs" / f"{lib_name}.py", + self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py", + ] + + lib_path = None + for p in lib_paths: + if p.exists(): + lib_path = p + break + + if not lib_path: + print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr) + return + + spec = importlib.util.spec_from_file_location(lib_name, lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Extract all prim_* functions + count = 0 + for name in dir(module): + if name.startswith('prim_'): + func = getattr(module, name) + prim_name = name[5:] # Remove 'prim_' prefix + self.primitives[prim_name] = func + # Also register with dashes instead of underscores + dash_name = prim_name.replace('_', '-') + self.primitives[dash_name] = func + # Also register with -img suffix (sexp convention) + self.primitives[dash_name + '-img'] = func + count += 1 + + # Also check for PRIMITIVES dict (some modules use this for additional exports) + if hasattr(module, 'PRIMITIVES'): + prims = getattr(module, 'PRIMITIVES') + if isinstance(prims, dict): + for name, func in prims.items(): + self.primitives[name] = func + # Also register underscore version + underscore_name = name.replace('-', '_') + self.primitives[underscore_name] = func + count += 1 + + print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr) + + def _load_effect(self, effect_path: Path): + """Load and register an effect from a .sexp file.""" + if not effect_path.exists(): + print(f"Warning: effect file not found: {effect_path}", file=sys.stderr) + return + + text = effect_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'define-effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = {} + body = None + + i = 2 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'params' and i + 1 < len(form): + # Parse params list + params_list = form[i + 1] + for p in params_list: + if isinstance(p, list) and p: + pname = p[0].name if isinstance(p[0], Symbol) else str(p[0]) + pdef = {'default': 0} + j = 1 + while j < len(p): + if isinstance(p[j], Keyword): + pdef[p[j].name] = p[j + 1] if j + 1 < len(p) else None + j += 2 + else: + j += 1 + params[pname] = pdef + i += 2 + else: + i += 2 + else: + # Body expression + body = form[i] + i += 1 + + self.effects[name] = {'params': params, 'body': body, 'path': str(effect_path)} + print(f"Effect: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [] + body = None + + if len(form) > 2 and isinstance(form[2], list): + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + if len(form) > 3: + body = form[3] + + self.macros[name] = {'params': params, 'body': body} + print(f"Macro: {name}", file=sys.stderr) + + def _init(self): + """Initialize sources, scans, and pipeline from sexp.""" + for form in self.ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + # === External loading === + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'effect': + # (effect name :path "...") + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + # (include :path "...") + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) # Reuse effect loader for includes + i += 2 + else: + i += 1 + + # === Sources === + + elif cmd == 'source': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + path = str(form[2]).strip('"') + full = (self.sexp_dir / path).resolve() + if full.exists(): + self.sources[name] = VideoSource(str(full), self.ctx.fps) + print(f"Source: {name} -> {full}", file=sys.stderr) + else: + print(f"Warning: {full} not found", file=sys.stderr) + + elif cmd == 'audio': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + path = str(form[2]).strip('"') + full = (self.sexp_dir / path).resolve() + if full.exists(): + self.audios[name] = AudioAnalyzer(str(full)) + self.audio_paths[name] = str(full) + self.audio_state[name] = {'energy': 0.0, 'is_beat': False, 'beat_count': 0, 'last_beat': False} + print(f"Audio: {name} -> {full}", file=sys.stderr) + + elif cmd == 'scan': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + # Trigger can be: + # (beat audio-name) - trigger on beat from specific audio + # beat - legacy: trigger on beat from first audio + trigger_expr = form[2] + if isinstance(trigger_expr, list) and len(trigger_expr) >= 2: + # (beat audio-name) + trigger_type = trigger_expr[0].name if isinstance(trigger_expr[0], Symbol) else str(trigger_expr[0]) + trigger_audio = trigger_expr[1].name if isinstance(trigger_expr[1], Symbol) else str(trigger_expr[1]) + trigger = (trigger_type, trigger_audio) + else: + # Legacy bare symbol + trigger = trigger_expr.name if isinstance(trigger_expr, Symbol) else str(trigger_expr) + + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], {}) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger, + } + trigger_str = f"{trigger[0]} {trigger[1]}" if isinstance(trigger, tuple) else trigger + print(f"Scan: {name} (on {trigger_str})", file=sys.stderr) + + elif cmd == 'frame': + # (frame expr) - the pipeline to evaluate each frame + self.frame_pipeline = form[1] if len(form) > 1 else None + + # Set output size from first source + if self.sources: + first = next(iter(self.sources.values())) + self.ctx.output_size = first.size + + def _eval(self, expr, env: dict) -> Any: + """Evaluate an expression.""" + import cv2 + + # Primitives + if isinstance(expr, (int, float)): + return expr + if isinstance(expr, str): + return expr + if isinstance(expr, Symbol): + name = expr.name + # Built-in values + if name == 't' or name == '_time': + return self.ctx.t + if name == 'pi': + import math + return math.pi + if name == 'true': + return True + if name == 'false': + return False + if name == 'nil': + return None + # Environment lookup + if name in env: + return env[name] + # Scan state lookup + if name in self.scans: + return self.scans[name]['state'] + return 0 + + if isinstance(expr, Keyword): + return expr.name + + if not isinstance(expr, list) or not expr: + return expr + + # Dict literal {:key val ...} + if isinstance(expr[0], Keyword): + result = {} + i = 0 + while i < len(expr): + if isinstance(expr[i], Keyword): + result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None + i += 2 + else: + i += 1 + return result + + head = expr[0] + if not isinstance(head, Symbol): + return [self._eval(e, env) for e in expr] + + op = head.name + args = expr[1:] + + # Check if op is a closure in environment + if op in env: + val = env[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + # Invoke closure + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + # Threading macro + if op == '->': + result = self._eval(args[0], env) + for form in args[1:]: + if isinstance(form, list) and form: + # Insert result as first arg + new_form = [form[0], result] + form[1:] + result = self._eval(new_form, env) + else: + result = self._eval([form, result], env) + return result + + # === Audio analysis (explicit) === + + if op == 'energy': + # (energy audio-name) - get current energy from named audio + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return self.audio_state[audio_name]['energy'] + return 0.0 + + if op == 'beat': + # (beat audio-name) - 1 if beat this frame, 0 otherwise + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return 1.0 if self.audio_state[audio_name]['is_beat'] else 0.0 + return 0.0 + + if op == 'beat-count': + # (beat-count audio-name) - total beats from named audio + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return self.audio_state[audio_name]['beat_count'] + return 0 + + # === Frame operations === + + if op == 'read': + # (read source-name) - get current frame from source (lazy read) + name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if name not in self.frames: + if name in self.sources: + self.frames[name] = self.sources[name].read() + self._sources_read.add(name) + return self.frames.get(name) + + # === Binding and mapping === + + if op == 'bind': + # (bind scan-name :field) or (bind scan-name) + scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + field = None + if len(args) > 1 and isinstance(args[1], Keyword): + field = args[1].name + + if scan_name in self.scans: + state = self.scans[scan_name]['state'] + if field: + return state.get(field, 0) + return state + return 0 + + if op == 'map': + # (map value [lo hi]) + val = self._eval(args[0], env) + range_list = self._eval(args[1], env) if len(args) > 1 else [0, 1] + if isinstance(range_list, list) and len(range_list) >= 2: + lo, hi = range_list[0], range_list[1] + return lo + val * (hi - lo) + return val + + # === Arithmetic === + + if op == '+': + return sum(self._eval(a, env) for a in args) + if op == '-': + vals = [self._eval(a, env) for a in args] + return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0] + if op == '*': + result = 1 + for a in args: + result *= self._eval(a, env) + return result + if op == '/': + vals = [self._eval(a, env) for a in args] + return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + if op == 'mod': + vals = [self._eval(a, env) for a in args] + return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + + if op == 'map-range': + # (map-range val from-lo from-hi to-lo to-hi) + val = self._eval(args[0], env) + from_lo = self._eval(args[1], env) + from_hi = self._eval(args[2], env) + to_lo = self._eval(args[3], env) + to_hi = self._eval(args[4], env) + # Normalize val to 0-1 in source range, then scale to target range + if from_hi == from_lo: + return to_lo + t = (val - from_lo) / (from_hi - from_lo) + return to_lo + t * (to_hi - to_lo) + + # === Comparison === + + if op == '<': + return self._eval(args[0], env) < self._eval(args[1], env) + if op == '>': + return self._eval(args[0], env) > self._eval(args[1], env) + if op == '=': + return self._eval(args[0], env) == self._eval(args[1], env) + if op == '<=': + return self._eval(args[0], env) <= self._eval(args[1], env) + if op == '>=': + return self._eval(args[0], env) >= self._eval(args[1], env) + + if op == 'and': + for arg in args: + if not self._eval(arg, env): + return False + return True + + if op == 'or': + # Lisp-style or: returns first truthy value, or last value if none truthy + result = False + for arg in args: + result = self._eval(arg, env) + if result: + return result + return result + + if op == 'not': + return not self._eval(args[0], env) + + # === Logic === + + if op == 'if': + cond = self._eval(args[0], env) + if cond: + return self._eval(args[1], env) + return self._eval(args[2], env) if len(args) > 2 else None + + if op == 'cond': + # (cond pred1 expr1 pred2 expr2 ... true else-expr) + i = 0 + while i < len(args) - 1: + pred = self._eval(args[i], env) + if pred: + return self._eval(args[i + 1], env) + i += 2 + return None + + if op == 'lambda': + # (lambda (params...) body) - create a closure + params = args[0] + body = args[1] + param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params] + # Return a closure dict that captures the current env + return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)} + + if op == 'let' or op == 'let*': + # Support both formats: + # (let [name val name val ...] body) - flat vector + # (let ((name val) (name val) ...) body) - nested list + # Note: our let already evaluates sequentially like let* + bindings = args[0] + body = args[1] + new_env = dict(env) + + if bindings and isinstance(bindings[0], list): + # Nested format: ((name val) (name val) ...) + for binding in bindings: + if isinstance(binding, list) and len(binding) >= 2: + name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0]) + val = self._eval(binding[1], new_env) + new_env[name] = val + else: + # Flat format: [name val name val ...] + i = 0 + while i < len(bindings): + name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i]) + val = self._eval(bindings[i + 1], new_env) + new_env[name] = val + i += 2 + return self._eval(body, new_env) + + # === Random === + + if op == 'rand': + return self.rng.random() + if op == 'rand-int': + lo = int(self._eval(args[0], env)) + hi = int(self._eval(args[1], env)) + return self.rng.randint(lo, hi) + if op == 'rand-range': + lo = self._eval(args[0], env) + hi = self._eval(args[1], env) + return lo + self.rng.random() * (hi - lo) + + # === Dict === + + if op == 'dict': + result = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + result[args[i].name] = self._eval(args[i + 1], env) if i + 1 < len(args) else None + i += 2 + else: + i += 1 + return result + + if op == 'get': + d = self._eval(args[0], env) + key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env) + if isinstance(d, dict): + return d.get(key, 0) + return 0 + + # === List === + + if op == 'list': + return [self._eval(a, env) for a in args] + + if op == 'nth': + lst = self._eval(args[0], env) + idx = int(self._eval(args[1], env)) + if isinstance(lst, list) and 0 <= idx < len(lst): + return lst[idx] + return None + + if op == 'len': + lst = self._eval(args[0], env) + return len(lst) if isinstance(lst, (list, dict, str)) else 0 + + # === External effects === + if op in self.effects: + effect = self.effects[op] + effect_env = dict(env) + effect_env['t'] = self.ctx.t + + # Set defaults for all params + param_names = list(effect['params'].keys()) + for pname, pdef in effect['params'].items(): + effect_env[pname] = pdef.get('default', 0) + + # Parse args: first is frame, then positional params, then kwargs + positional_idx = 0 + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + # Keyword arg + pname = args[i].name + if pname in effect['params'] and i + 1 < len(args): + effect_env[pname] = self._eval(args[i + 1], env) + i += 2 + else: + # Positional arg + val = self._eval(args[i], env) + if positional_idx == 0: + effect_env['frame'] = val + elif positional_idx - 1 < len(param_names): + effect_env[param_names[positional_idx - 1]] = val + positional_idx += 1 + i += 1 + + return self._eval(effect['body'], effect_env) + + # === External primitives === + if op in self.primitives: + prim_func = self.primitives[op] + # Evaluate all args + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + # Call primitive + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # === Macros === + if op in self.macros: + macro = self.macros[op] + # Bind macro params to args (unevaluated) + macro_env = dict(env) + for i, pname in enumerate(macro['params']): + macro_env[pname] = args[i] if i < len(args) else None + # Expand and evaluate + return self._eval(macro['body'], macro_env) + + # === Primitive-style call (name-with-dashes -> prim_name_with_underscores) === + prim_name = op.replace('-', '_') + if prim_name in self.primitives: + prim_func = self.primitives[prim_name] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name.replace('-', '_') + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # Unknown - return as-is + return expr + + def _step_scans(self): + """Step scans on beat from specific audio.""" + for name, scan in self.scans.items(): + trigger = scan['trigger'] + + # Check if this scan should step + should_step = False + audio_name = None + + if isinstance(trigger, tuple) and trigger[0] == 'beat': + # Explicit: (beat audio-name) + audio_name = trigger[1] + if audio_name in self.audio_state: + should_step = self.audio_state[audio_name]['is_beat'] + elif trigger == 'beat': + # Legacy: use first audio + if self.audio_state: + audio_name = next(iter(self.audio_state)) + should_step = self.audio_state[audio_name]['is_beat'] + + if should_step and audio_name: + state = self.audio_state[audio_name] + env = dict(scan['state']) + env['beat_count'] = state['beat_count'] + env['t'] = self.ctx.t + env['energy'] = state['energy'] + + if scan['step']: + new_state = self._eval(scan['step'], env) + if isinstance(new_state, dict): + scan['state'] = new_state + elif new_state is not None: + scan['state'] = {'acc': new_state} + + self.cache.record_scan_state(name, self.ctx.t, scan['state']) + + def run(self, duration: float = None, output: str = "pipe"): + """Run the streaming pipeline.""" + from .output import PipeOutput, DisplayOutput, FileOutput + + self._init() + + if not self.sources: + print("Error: no sources", file=sys.stderr) + return + + if not self.frame_pipeline: + print("Error: no (frame ...) pipeline defined", file=sys.stderr) + return + + w, h = self.ctx.output_size + + # Duration from first audio or default + if duration is None: + if self.audios: + first_audio = next(iter(self.audios.values())) + duration = first_audio.duration + else: + duration = 60.0 + + n_frames = int(duration * self.ctx.fps) + frame_time = 1.0 / self.ctx.fps + + print(f"Streaming {n_frames} frames @ {self.ctx.fps}fps", file=sys.stderr) + + # Use first audio for playback sync + first_audio_path = next(iter(self.audio_paths.values())) if self.audio_paths else None + + # Output + if output == "pipe": + out = PipeOutput(size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + elif output == "preview": + out = DisplayOutput(size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + else: + out = FileOutput(output, size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + + try: + for frame_num in range(n_frames): + if not out.is_open: + print(f"\nOutput closed at {frame_num}", file=sys.stderr) + break + + self.ctx.t = frame_num * frame_time + self.ctx.frame_num = frame_num + + # Update all audio states + for audio_name, analyzer in self.audios.items(): + state = self.audio_state[audio_name] + energy = analyzer.get_energy(self.ctx.t) + is_beat_raw = analyzer.get_beat(self.ctx.t) + is_beat = is_beat_raw and not state['last_beat'] + state['last_beat'] = is_beat_raw + + state['energy'] = energy + state['is_beat'] = is_beat + if is_beat: + state['beat_count'] += 1 + + self.cache.record_analysis(f'{audio_name}_energy', self.ctx.t, energy) + self.cache.record_analysis(f'{audio_name}_beat', self.ctx.t, 1.0 if is_beat else 0.0) + + # Step scans + self._step_scans() + + # Clear frames - will be read lazily + self.frames.clear() + self._sources_read = set() + + # Evaluate pipeline (reads happen on-demand) + result = self._eval(self.frame_pipeline, {}) + + # Skip unread sources to keep pipes in sync + for name, src in self.sources.items(): + if name not in self._sources_read: + src.skip() + + # Ensure output size + if result is not None: + import cv2 + if result.shape[:2] != (h, w): + result = cv2.resize(result, (w, h)) + out.write(result, self.ctx.t) + + # Progress + if frame_num % 30 == 0: + pct = 100 * frame_num / n_frames + # Show beats from first audio + total_beats = 0 + if self.audio_state: + first_state = next(iter(self.audio_state.values())) + total_beats = first_state['beat_count'] + print(f"\r{pct:5.1f}% | beats:{total_beats}", + end="", file=sys.stderr) + sys.stderr.flush() + + if frame_num % 300 == 0: + self.cache.flush() + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + except Exception as e: + print(f"\nError: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + finally: + out.close() + for src in self.sources.values(): + src.close() + self.cache.flush() + + print("\nDone", file=sys.stderr) + + +def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None): + """Run a streaming sexp.""" + interp = StreamInterpreter(sexp_path) + if fps: + interp.ctx.fps = fps + interp.run(duration=duration, output=output) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run streaming sexp") + parser.add_argument("sexp", help="Path to .sexp file") + parser.add_argument("-d", "--duration", type=float, default=None) + parser.add_argument("-o", "--output", default="pipe") + parser.add_argument("--fps", type=float, default=None, help="Override fps (default: from sexp)") + args = parser.parse_args() + + run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps) diff --git a/streaming/stream_sexp_generic.py b/streaming/stream_sexp_generic.py new file mode 100644 index 0000000..2f8d4b2 --- /dev/null +++ b/streaming/stream_sexp_generic.py @@ -0,0 +1,859 @@ +""" +Fully Generic Streaming S-expression Interpreter. + +The interpreter knows NOTHING about video, audio, or any domain. +All domain logic comes from primitives loaded via (require-primitives ...). + +Built-in forms: + - Control: if, cond, let, let*, lambda, -> + - Arithmetic: +, -, *, /, mod, map-range + - Comparison: <, >, =, <=, >=, and, or, not + - Data: dict, get, list, nth, len, quote + - Random: rand, rand-int, rand-range + - Scan: bind (access scan state) + +Everything else comes from primitives or effects. + +Context (ctx) is passed explicitly to frame evaluation: + - ctx.t: current time + - ctx.frame-num: current frame number + - ctx.fps: frames per second +""" + +import sys +import time +import json +import hashlib +import math +import numpy as np +from pathlib import Path +from dataclasses import dataclass +from typing import Dict, List, Any, Optional, Tuple + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) +from artdag.sexp.parser import parse, parse_all, Symbol, Keyword + + +@dataclass +class Context: + """Runtime context passed to frame evaluation.""" + t: float = 0.0 + frame_num: int = 0 + fps: float = 30.0 + + +class StreamInterpreter: + """ + Fully generic streaming sexp interpreter. + + No domain-specific knowledge - just evaluates expressions + and calls primitives. + """ + + def __init__(self, sexp_path: str): + self.sexp_path = Path(sexp_path) + self.sexp_dir = self.sexp_path.parent + + text = self.sexp_path.read_text() + self.ast = parse(text) + + self.config = self._parse_config() + + # Global environment for def bindings + self.globals: Dict[str, Any] = {} + + # Scans + self.scans: Dict[str, dict] = {} + + # Audio playback path (for syncing output) + self.audio_playback: Optional[str] = None + + # Registries for external definitions + self.primitives: Dict[str, Any] = {} + self.effects: Dict[str, dict] = {} + self.macros: Dict[str, dict] = {} + self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs" + + self.frame_pipeline = None + + # External config files (set before run()) + self.sources_config: Optional[Path] = None + self.audio_config: Optional[Path] = None + + import random + self.rng = random.Random(self.config.get('seed', 42)) + + def _load_config_file(self, config_path): + """Load a config file and process its definitions.""" + config_path = Path(config_path) # Accept str or Path + if not config_path.exists(): + print(f"Warning: config file not found: {config_path}", file=sys.stderr) + return + + text = config_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'def': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + value = self._eval(form[2], self.globals) + self.globals[name] = value + print(f"Config: {name}", file=sys.stderr) + + elif cmd == 'audio-playback': + # Path relative to working directory (consistent with other paths) + path = str(form[1]).strip('"') + self.audio_playback = str(Path(path).resolve()) + print(f"Audio playback: {self.audio_playback}", file=sys.stderr) + + def _parse_config(self) -> dict: + """Parse config from (stream name :key val ...).""" + config = {'fps': 30, 'seed': 42, 'width': 720, 'height': 720} + if not self.ast or not isinstance(self.ast[0], Symbol): + return config + if self.ast[0].name != 'stream': + return config + + i = 2 + while i < len(self.ast): + if isinstance(self.ast[i], Keyword): + config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None + i += 2 + elif isinstance(self.ast[i], list): + break + else: + i += 1 + return config + + def _load_primitives(self, lib_name: str): + """Load primitives from a Python library file.""" + import importlib.util + + lib_paths = [ + self.primitive_lib_dir / f"{lib_name}.py", + self.sexp_dir / "primitive_libs" / f"{lib_name}.py", + self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py", + ] + + lib_path = None + for p in lib_paths: + if p.exists(): + lib_path = p + break + + if not lib_path: + print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr) + return + + spec = importlib.util.spec_from_file_location(lib_name, lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + count = 0 + for name in dir(module): + if name.startswith('prim_'): + func = getattr(module, name) + prim_name = name[5:] + dash_name = prim_name.replace('_', '-') + # Register ONLY with namespace (geometry:ripple-displace) + self.primitives[f"{lib_name}:{dash_name}"] = func + count += 1 + + if hasattr(module, 'PRIMITIVES'): + prims = getattr(module, 'PRIMITIVES') + if isinstance(prims, dict): + for name, func in prims.items(): + # Register ONLY with namespace + dash_name = name.replace('_', '-') + self.primitives[f"{lib_name}:{dash_name}"] = func + count += 1 + + print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr) + + def _load_effect(self, effect_path: Path): + """Load and register an effect from a .sexp file.""" + if not effect_path.exists(): + print(f"Warning: effect file not found: {effect_path}", file=sys.stderr) + return + + text = effect_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'define-effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = {} + body = None + i = 2 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'params' and i + 1 < len(form): + for pdef in form[i + 1]: + if isinstance(pdef, list) and pdef: + pname = pdef[0].name if isinstance(pdef[0], Symbol) else str(pdef[0]) + pinfo = {'default': 0} + j = 1 + while j < len(pdef): + if isinstance(pdef[j], Keyword) and j + 1 < len(pdef): + pinfo[pdef[j].name] = pdef[j + 1] + j += 2 + else: + j += 1 + params[pname] = pinfo + i += 2 + else: + body = form[i] + i += 1 + + self.effects[name] = {'params': params, 'body': body} + print(f"Effect: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + body = form[3] + self.macros[name] = {'params': params, 'body': body} + + elif cmd == 'effect': + # Handle (effect name :path "...") in included files - recursive + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + # Resolve relative to the file being loaded + full = (effect_path.parent / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + # Handle (include :path "...") in included files - recursive + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (effect_path.parent / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'scan': + # Handle scans from included files + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + trigger_expr = form[2] + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], self.globals) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger_expr, + } + print(f"Scan: {name}", file=sys.stderr) + + def _init(self): + """Initialize from sexp - load primitives, effects, defs, scans.""" + # Load external config files first (they can override recipe definitions) + if self.sources_config: + self._load_config_file(self.sources_config) + if self.audio_config: + self._load_config_file(self.audio_config) + + for form in self.ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'audio-playback': + # (audio-playback "path") - set audio file for playback sync + # Skip if already set by config file + if self.audio_playback is None: + path = str(form[1]).strip('"') + self.audio_playback = str((self.sexp_dir / path).resolve()) + print(f"Audio playback: {self.audio_playback}", file=sys.stderr) + + elif cmd == 'def': + # (def name expr) - evaluate and store in globals + # Skip if already defined by config file + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + if name in self.globals: + print(f"Def: {name} (from config, skipped)", file=sys.stderr) + continue + value = self._eval(form[2], self.globals) + self.globals[name] = value + print(f"Def: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + body = form[3] + self.macros[name] = {'params': params, 'body': body} + + elif cmd == 'scan': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + trigger_expr = form[2] + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], self.globals) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger_expr, + } + print(f"Scan: {name}", file=sys.stderr) + + elif cmd == 'frame': + self.frame_pipeline = form[1] if len(form) > 1 else None + + def _eval(self, expr, env: dict) -> Any: + """Evaluate an expression.""" + + # Primitives + if isinstance(expr, (int, float)): + return expr + if isinstance(expr, str): + return expr + if isinstance(expr, bool): + return expr + + if isinstance(expr, Symbol): + name = expr.name + # Built-in constants + if name == 'pi': + return math.pi + if name == 'true': + return True + if name == 'false': + return False + if name == 'nil': + return None + # Environment lookup + if name in env: + return env[name] + # Global lookup + if name in self.globals: + return self.globals[name] + # Scan state lookup + if name in self.scans: + return self.scans[name]['state'] + raise NameError(f"Undefined variable: {name}") + + if isinstance(expr, Keyword): + return expr.name + + if not isinstance(expr, list) or not expr: + return expr + + # Dict literal {:key val ...} + if isinstance(expr[0], Keyword): + result = {} + i = 0 + while i < len(expr): + if isinstance(expr[i], Keyword): + result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None + i += 2 + else: + i += 1 + return result + + head = expr[0] + if not isinstance(head, Symbol): + return [self._eval(e, env) for e in expr] + + op = head.name + args = expr[1:] + + # Check for closure call + if op in env: + val = env[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + if op in self.globals: + val = self.globals[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + # Threading macro + if op == '->': + result = self._eval(args[0], env) + for form in args[1:]: + if isinstance(form, list) and form: + new_form = [form[0], result] + form[1:] + result = self._eval(new_form, env) + else: + result = self._eval([form, result], env) + return result + + # === Binding === + + if op == 'bind': + scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if scan_name in self.scans: + state = self.scans[scan_name]['state'] + if len(args) > 1: + key = args[1].name if isinstance(args[1], Keyword) else str(args[1]) + return state.get(key, 0) + return state + return 0 + + # === Arithmetic === + + if op == '+': + return sum(self._eval(a, env) for a in args) + if op == '-': + vals = [self._eval(a, env) for a in args] + return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0] + if op == '*': + result = 1 + for a in args: + result *= self._eval(a, env) + return result + if op == '/': + vals = [self._eval(a, env) for a in args] + return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + if op == 'mod': + vals = [self._eval(a, env) for a in args] + return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + + # === Comparison === + + if op == '<': + return self._eval(args[0], env) < self._eval(args[1], env) + if op == '>': + return self._eval(args[0], env) > self._eval(args[1], env) + if op == '=': + return self._eval(args[0], env) == self._eval(args[1], env) + if op == '<=': + return self._eval(args[0], env) <= self._eval(args[1], env) + if op == '>=': + return self._eval(args[0], env) >= self._eval(args[1], env) + + if op == 'and': + for arg in args: + if not self._eval(arg, env): + return False + return True + + if op == 'or': + result = False + for arg in args: + result = self._eval(arg, env) + if result: + return result + return result + + if op == 'not': + return not self._eval(args[0], env) + + # === Logic === + + if op == 'if': + cond = self._eval(args[0], env) + if cond: + return self._eval(args[1], env) + return self._eval(args[2], env) if len(args) > 2 else None + + if op == 'cond': + i = 0 + while i < len(args) - 1: + pred = self._eval(args[i], env) + if pred: + return self._eval(args[i + 1], env) + i += 2 + return None + + if op == 'lambda': + params = args[0] + body = args[1] + param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params] + return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)} + + if op == 'let' or op == 'let*': + bindings = args[0] + body = args[1] + new_env = dict(env) + + if bindings and isinstance(bindings[0], list): + for binding in bindings: + if isinstance(binding, list) and len(binding) >= 2: + name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0]) + val = self._eval(binding[1], new_env) + new_env[name] = val + else: + i = 0 + while i < len(bindings): + name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i]) + val = self._eval(bindings[i + 1], new_env) + new_env[name] = val + i += 2 + return self._eval(body, new_env) + + # === Dict === + + if op == 'dict': + result = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + key = args[i].name + val = self._eval(args[i + 1], env) if i + 1 < len(args) else None + result[key] = val + i += 2 + else: + i += 1 + return result + + if op == 'get': + obj = self._eval(args[0], env) + key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env) + if isinstance(obj, dict): + return obj.get(key, 0) + return 0 + + # === List === + + if op == 'list': + return [self._eval(a, env) for a in args] + + if op == 'quote': + return args[0] if args else None + + if op == 'nth': + lst = self._eval(args[0], env) + idx = int(self._eval(args[1], env)) + if isinstance(lst, (list, tuple)) and 0 <= idx < len(lst): + return lst[idx] + return None + + if op == 'len': + val = self._eval(args[0], env) + return len(val) if hasattr(val, '__len__') else 0 + + if op == 'map': + seq = self._eval(args[0], env) + fn = self._eval(args[1], env) + if not isinstance(seq, (list, tuple)): + return [] + # Handle closure (lambda from sexp) + if isinstance(fn, dict) and fn.get('_type') == 'closure': + results = [] + for item in seq: + closure_env = dict(fn['env']) + if fn['params']: + closure_env[fn['params'][0]] = item + results.append(self._eval(fn['body'], closure_env)) + return results + # Handle Python callable + if callable(fn): + return [fn(item) for item in seq] + return [] + + # === Effects === + + if op in self.effects: + effect = self.effects[op] + effect_env = dict(env) + + param_names = list(effect['params'].keys()) + for pname, pdef in effect['params'].items(): + effect_env[pname] = pdef.get('default', 0) + + positional_idx = 0 + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + pname = args[i].name + if pname in effect['params'] and i + 1 < len(args): + effect_env[pname] = self._eval(args[i + 1], env) + i += 2 + else: + val = self._eval(args[i], env) + if positional_idx == 0: + effect_env['frame'] = val + elif positional_idx - 1 < len(param_names): + effect_env[param_names[positional_idx - 1]] = val + positional_idx += 1 + i += 1 + + return self._eval(effect['body'], effect_env) + + # === Primitives === + + if op in self.primitives: + prim_func = self.primitives[op] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # === Macros (function-like: args evaluated before binding) === + + if op in self.macros: + macro = self.macros[op] + macro_env = dict(env) + for i, pname in enumerate(macro['params']): + # Evaluate args in calling environment before binding + macro_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(macro['body'], macro_env) + + # Underscore variant lookup + prim_name = op.replace('-', '_') + if prim_name in self.primitives: + prim_func = self.primitives[prim_name] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name.replace('-', '_') + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # Unknown - return as-is + return expr + + def _step_scans(self, ctx: Context, env: dict): + """Step scans based on trigger evaluation.""" + for name, scan in self.scans.items(): + trigger_expr = scan['trigger'] + + # Evaluate trigger in context + should_step = self._eval(trigger_expr, env) + + if should_step: + state = scan['state'] + step_env = dict(state) + step_env.update(env) + + new_state = self._eval(scan['step'], step_env) + if isinstance(new_state, dict): + scan['state'] = new_state + else: + scan['state'] = {'acc': new_state} + + def run(self, duration: float = None, output: str = "pipe"): + """Run the streaming pipeline.""" + # Import output classes - handle both package and direct execution + try: + from .output import PipeOutput, DisplayOutput, FileOutput + except ImportError: + from output import PipeOutput, DisplayOutput, FileOutput + + self._init() + + if not self.frame_pipeline: + print("Error: no (frame ...) pipeline defined", file=sys.stderr) + return + + w = self.config.get('width', 720) + h = self.config.get('height', 720) + fps = self.config.get('fps', 30) + + if duration is None: + # Try to get duration from audio if available + for name, val in self.globals.items(): + if hasattr(val, 'duration'): + duration = val.duration + print(f"Using audio duration: {duration:.1f}s", file=sys.stderr) + break + else: + duration = 60.0 + + n_frames = int(duration * fps) + frame_time = 1.0 / fps + + print(f"Streaming {n_frames} frames @ {fps}fps", file=sys.stderr) + + # Create context + ctx = Context(fps=fps) + + # Output (with optional audio sync) + audio = self.audio_playback + if output == "pipe": + out = PipeOutput(size=(w, h), fps=fps, audio_source=audio) + elif output == "preview": + out = DisplayOutput(size=(w, h), fps=fps, audio_source=audio) + else: + out = FileOutput(output, size=(w, h), fps=fps, audio_source=audio) + + try: + frame_times = [] + for frame_num in range(n_frames): + if not out.is_open: + break + + frame_start = time.time() + ctx.t = frame_num * frame_time + ctx.frame_num = frame_num + + # Build frame environment with context + frame_env = { + 'ctx': { + 't': ctx.t, + 'frame-num': ctx.frame_num, + 'fps': ctx.fps, + }, + 't': ctx.t, # Also expose t directly for convenience + 'frame-num': ctx.frame_num, + } + + # Step scans + self._step_scans(ctx, frame_env) + + # Evaluate pipeline + result = self._eval(self.frame_pipeline, frame_env) + + if result is not None and hasattr(result, 'shape'): + out.write(result, ctx.t) + + frame_elapsed = time.time() - frame_start + frame_times.append(frame_elapsed) + + # Progress with timing + if frame_num % 30 == 0: + pct = 100 * frame_num / n_frames + avg_ms = 1000 * sum(frame_times[-30:]) / max(1, len(frame_times[-30:])) + target_ms = 1000 * frame_time + print(f"\r{pct:5.1f}% [{avg_ms:.0f}ms/frame, target {target_ms:.0f}ms]", end="", file=sys.stderr, flush=True) + + finally: + out.close() + print("\nDone", file=sys.stderr) + + +def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None, + sources_config: str = None, audio_config: str = None): + """Run a streaming sexp.""" + interp = StreamInterpreter(sexp_path) + if fps: + interp.config['fps'] = fps + if sources_config: + interp.sources_config = Path(sources_config) + if audio_config: + interp.audio_config = Path(audio_config) + interp.run(duration=duration, output=output) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run streaming sexp (generic interpreter)") + parser.add_argument("sexp", help="Path to .sexp file") + parser.add_argument("-d", "--duration", type=float, default=None) + parser.add_argument("-o", "--output", default="pipe") + parser.add_argument("--fps", type=float, default=None) + parser.add_argument("--sources", dest="sources_config", help="Path to sources config .sexp file") + parser.add_argument("--audio", dest="audio_config", help="Path to audio config .sexp file") + args = parser.parse_args() + + run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps, + sources_config=args.sources_config, audio_config=args.audio_config) diff --git a/templates/crossfade-zoom.sexp b/templates/crossfade-zoom.sexp new file mode 100644 index 0000000..fc6d9ad --- /dev/null +++ b/templates/crossfade-zoom.sexp @@ -0,0 +1,25 @@ +;; Crossfade with Zoom Transition +;; +;; Macro for transitioning between two frames with a zoom effect. +;; Active frame zooms out while next frame zooms in. +;; +;; Required context: +;; - zoom effect must be loaded +;; - blend effect must be loaded +;; +;; Parameters: +;; active-frame: current frame +;; next-frame: frame to transition to +;; fade-amt: transition progress (0 = all active, 1 = all next) +;; +;; Usage: +;; (include :path "../templates/crossfade-zoom.sexp") +;; ... +;; (crossfade-zoom active-frame next-frame 0.5) + +(defmacro crossfade-zoom (active-frame next-frame fade-amt) + (let [active-zoom (+ 1.0 fade-amt) + active-zoomed (zoom active-frame :amount active-zoom) + next-zoom (+ 0.1 (* fade-amt 0.9)) + next-zoomed (zoom next-frame :amount next-zoom)] + (blend active-zoomed next-zoomed :opacity fade-amt))) diff --git a/templates/cycle-crossfade.sexp b/templates/cycle-crossfade.sexp new file mode 100644 index 0000000..40a87ca --- /dev/null +++ b/templates/cycle-crossfade.sexp @@ -0,0 +1,65 @@ +;; cycle-crossfade template +;; +;; Generalized cycling zoom-crossfade for any number of video layers. +;; Cycles through videos with smooth zoom-based crossfade transitions. +;; +;; Parameters: +;; beat-data - beat analysis node (drives timing) +;; input-videos - list of video nodes to cycle through +;; init-clen - initial cycle length in beats +;; +;; NOTE: The parameter is named "input-videos" (not "videos") because +;; template substitution replaces param names everywhere in the AST. +;; The planner's _expand_slice_on injects env["videos"] at plan time, +;; so (len videos) inside the lambda references that injected value. + +(deftemplate cycle-crossfade + (beat-data input-videos init-clen) + + (slice-on beat-data + :videos input-videos + :init {:cycle 0 :beat 0 :clen init-clen} + :fn (lambda [acc i start end] + (let [beat (get acc "beat") + clen (get acc "clen") + active (get acc "cycle") + n (len videos) + phase3 (* beat 3) + wt (lambda [p] + (let [prev (mod (+ p (- n 1)) n)] + (if (= active p) + (if (< phase3 clen) 1.0 + (if (< phase3 (* clen 2)) + (- 1.0 (* (/ (- phase3 clen) clen) 1.0)) + 0.0)) + (if (= active prev) + (if (< phase3 clen) 0.0 + (if (< phase3 (* clen 2)) + (* (/ (- phase3 clen) clen) 1.0) + 1.0)) + 0.0)))) + zm (lambda [p] + (let [prev (mod (+ p (- n 1)) n)] + (if (= active p) + ;; Active video: normal -> zoom out during transition -> tiny + (if (< phase3 clen) 1.0 + (if (< phase3 (* clen 2)) + (+ 1.0 (* (/ (- phase3 clen) clen) 1.0)) + 0.1)) + (if (= active prev) + ;; Incoming video: tiny -> zoom in during transition -> normal + (if (< phase3 clen) 0.1 + (if (< phase3 (* clen 2)) + (+ 0.1 (* (/ (- phase3 clen) clen) 0.9)) + 1.0)) + 0.1)))) + new-acc (if (< (+ beat 1) clen) + (dict :cycle active :beat (+ beat 1) :clen clen) + (dict :cycle (mod (+ active 1) n) :beat 0 + :clen (+ 40 (mod (* i 7) 41))))] + {:layers (map (lambda [p] + {:video p :effects [{:effect zoom :amount (zm p)}]}) + (range 0 n)) + :compose {:effect blend_multi :mode "alpha" + :weights (map (lambda [p] (wt p)) (range 0 n))} + :acc new-acc})))) diff --git a/templates/process-pair.sexp b/templates/process-pair.sexp new file mode 100644 index 0000000..6720cd2 --- /dev/null +++ b/templates/process-pair.sexp @@ -0,0 +1,112 @@ +;; process-pair template +;; +;; Reusable video-pair processor: takes a single video source, creates two +;; clips (A and B) with opposite rotations and sporadic effects, blends them, +;; and applies a per-pair slow rotation driven by a beat scan. +;; +;; All sporadic triggers (invert, hue-shift, ascii) and pair-level controls +;; (blend opacity, rotation) are defined internally using seed offsets. +;; +;; Parameters: +;; video - source video node +;; energy - energy analysis node (drives rotation/zoom amounts) +;; beat-data - beat analysis node (drives sporadic triggers) +;; rng - RNG object from (make-rng seed) for auto-derived seeds +;; rot-dir - initial rotation direction: 1 (clockwise) or -1 (anti-clockwise) +;; rot-a/b - rotation ranges for clip A/B (e.g. [0 45]) +;; zoom-a/b - zoom ranges for clip A/B (e.g. [1 1.5]) + +(deftemplate process-pair + (video energy beat-data rng rot-dir rot-a rot-b zoom-a zoom-b) + + ;; --- Sporadic triggers for clip A --- + + ;; Invert: 10% chance per beat, lasts 1-5 beats + (def inv-a (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0)) + :emit (if (> acc 0) 1 0))) + + ;; Hue shift: 10% chance, random hue 30-330 deg, lasts 1-5 beats + (def hue-a (scan beat-data :rng rng :init (dict :rem 0 :hue 0) + :step (if (> rem 0) + (dict :rem (- rem 1) :hue hue) + (if (< (rand) 0.1) + (dict :rem (rand-int 1 5) :hue (rand-range 30 330)) + (dict :rem 0 :hue 0))) + :emit (if (> rem 0) hue 0))) + + ;; ASCII art: 5% chance, lasts 1-3 beats + (def ascii-a (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0)) + :emit (if (> acc 0) 1 0))) + + ;; --- Sporadic triggers for clip B (offset seeds) --- + + (def inv-b (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0)) + :emit (if (> acc 0) 1 0))) + + (def hue-b (scan beat-data :rng rng :init (dict :rem 0 :hue 0) + :step (if (> rem 0) + (dict :rem (- rem 1) :hue hue) + (if (< (rand) 0.1) + (dict :rem (rand-int 1 5) :hue (rand-range 30 330)) + (dict :rem 0 :hue 0))) + :emit (if (> rem 0) hue 0))) + + (def ascii-b (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0)) + :emit (if (> acc 0) 1 0))) + + ;; --- Pair-level controls --- + + ;; Internal A/B blend: randomly show A (0), both (0.5), or B (1), every 1-11 beats + (def pair-mix (scan beat-data :rng rng + :init (dict :rem 0 :opacity 0.5) + :step (if (> rem 0) + (dict :rem (- rem 1) :opacity opacity) + (dict :rem (rand-int 1 11) :opacity (* (rand-int 0 2) 0.5))) + :emit opacity)) + + ;; Per-pair rotation: one full rotation every 20-30 beats, alternating direction + (def pair-rot (scan beat-data :rng rng + :init (dict :beat 0 :clen 25 :dir rot-dir :angle 0) + :step (if (< (+ beat 1) clen) + (dict :beat (+ beat 1) :clen clen :dir dir + :angle (+ angle (* dir (/ 360 clen)))) + (dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1) + :angle angle)) + :emit angle)) + + ;; --- Clip A processing --- + (def clip-a (-> video (segment :start 0 :duration (bind energy duration)))) + (def rotated-a (-> clip-a + (effect rotate :angle (bind energy values :range rot-a)) + (effect zoom :amount (bind energy values :range zoom-a)) + (effect invert :amount (bind inv-a values)) + (effect hue_shift :degrees (bind hue-a values)) + ;; ASCII disabled - too slow without GPU + ;; (effect ascii_art + ;; :char_size (bind energy values :range [4 32]) + ;; :mix (bind ascii-a values)) + )) + + ;; --- Clip B processing --- + (def clip-b (-> video (segment :start 0 :duration (bind energy duration)))) + (def rotated-b (-> clip-b + (effect rotate :angle (bind energy values :range rot-b)) + (effect zoom :amount (bind energy values :range zoom-b)) + (effect invert :amount (bind inv-b values)) + (effect hue_shift :degrees (bind hue-b values)) + ;; ASCII disabled - too slow without GPU + ;; (effect ascii_art + ;; :char_size (bind energy values :range [4 32]) + ;; :mix (bind ascii-b values)) + )) + + ;; --- Blend A+B and apply pair rotation --- + (-> rotated-a + (effect blend rotated-b + :mode "alpha" :opacity (bind pair-mix values) :resize_mode "fit") + (effect rotate + :angle (bind pair-rot values)))) diff --git a/templates/scan-oscillating-spin.sexp b/templates/scan-oscillating-spin.sexp new file mode 100644 index 0000000..051f079 --- /dev/null +++ b/templates/scan-oscillating-spin.sexp @@ -0,0 +1,28 @@ +;; Oscillating Spin Scan +;; +;; Accumulates rotation angle on each beat, reversing direction +;; periodically for an oscillating effect. +;; +;; Required context: +;; - music: audio analyzer from (streaming:make-audio-analyzer ...) +;; +;; Provides scan: spin +;; Bind with: (bind spin :angle) ;; cumulative rotation angle +;; +;; Behavior: +;; - Rotates 14.4 degrees per beat (completes 360 in 25 beats) +;; - After 20-30 beats, reverses direction +;; - Creates a swinging/oscillating rotation effect +;; +;; Usage: +;; (include :path "../templates/scan-oscillating-spin.sexp") +;; +;; In frame: +;; (rotate frame :angle (bind spin :angle)) + +(scan spin (streaming:audio-beat music t) + :init {:angle 0 :dir 1 :left 25} + :step (if (> left 0) + (dict :angle (+ angle (* dir 14.4)) :dir dir :left (- left 1)) + (dict :angle angle :dir (* dir -1) + :left (+ 20 (mod (streaming:audio-beat-count music t) 11))))) diff --git a/templates/scan-ripple-drops.sexp b/templates/scan-ripple-drops.sexp new file mode 100644 index 0000000..7caf720 --- /dev/null +++ b/templates/scan-ripple-drops.sexp @@ -0,0 +1,41 @@ +;; Beat-Triggered Ripple Drops Scan +;; +;; Creates random ripple drops triggered by audio beats. +;; Each drop has a random center position and duration. +;; +;; Required context: +;; - music: audio analyzer from (streaming:make-audio-analyzer ...) +;; - core primitives loaded +;; +;; Provides scan: ripple-state +;; Bind with: (bind ripple-state :gate) ;; 0 or 1 +;; (bind ripple-state :cx) ;; center x (0-1) +;; (bind ripple-state :cy) ;; center y (0-1) +;; +;; Parameters: +;; trigger-chance: probability per beat (default 0.15) +;; min-duration: minimum beats (default 1) +;; max-duration: maximum beats (default 15) +;; +;; Usage: +;; (include :path "../templates/scan-ripple-drops.sexp") +;; ;; Uses default: 15% chance, 1-15 beat duration +;; +;; In frame: +;; (let [rip-gate (bind ripple-state :gate) +;; rip-amp (* rip-gate (core:map-range e 0 1 5 50))] +;; (ripple frame +;; :amplitude rip-amp +;; :center_x (bind ripple-state :cx) +;; :center_y (bind ripple-state :cy))) + +(scan ripple-state (streaming:audio-beat music t) + :init {:gate 0 :cx 0.5 :cy 0.5 :left 0} + :step (if (> left 0) + (dict :gate 1 :cx cx :cy cy :left (- left 1)) + (if (< (core:rand) 0.15) + (dict :gate 1 + :cx (+ 0.2 (* (core:rand) 0.6)) + :cy (+ 0.2 (* (core:rand) 0.6)) + :left (+ 1 (mod (streaming:audio-beat-count music t) 15))) + (dict :gate 0 :cx 0.5 :cy 0.5 :left 0)))) diff --git a/templates/standard-effects.sexp b/templates/standard-effects.sexp new file mode 100644 index 0000000..9e97f34 --- /dev/null +++ b/templates/standard-effects.sexp @@ -0,0 +1,22 @@ +;; Standard Effects Bundle +;; +;; Loads commonly-used video effects. +;; Include after primitives are loaded. +;; +;; Effects provided: +;; - rotate: rotation by angle +;; - zoom: scale in/out +;; - blend: alpha blend two frames +;; - ripple: water ripple distortion +;; - invert: color inversion +;; - hue_shift: hue rotation +;; +;; Usage: +;; (include :path "../templates/standard-effects.sexp") + +(effect rotate :path "../sexp_effects/effects/rotate.sexp") +(effect zoom :path "../sexp_effects/effects/zoom.sexp") +(effect blend :path "../sexp_effects/effects/blend.sexp") +(effect ripple :path "../sexp_effects/effects/ripple.sexp") +(effect invert :path "../sexp_effects/effects/invert.sexp") +(effect hue_shift :path "../sexp_effects/effects/hue_shift.sexp") diff --git a/templates/standard-primitives.sexp b/templates/standard-primitives.sexp new file mode 100644 index 0000000..6e2c62d --- /dev/null +++ b/templates/standard-primitives.sexp @@ -0,0 +1,14 @@ +;; Standard Primitives Bundle +;; +;; Loads all commonly-used primitive libraries. +;; Include this at the top of streaming recipes. +;; +;; Usage: +;; (include :path "../templates/standard-primitives.sexp") + +(require-primitives "geometry") +(require-primitives "core") +(require-primitives "image") +(require-primitives "blending") +(require-primitives "color_ops") +(require-primitives "streaming") diff --git a/templates/stream-process-pair.sexp b/templates/stream-process-pair.sexp new file mode 100644 index 0000000..55f408e --- /dev/null +++ b/templates/stream-process-pair.sexp @@ -0,0 +1,72 @@ +;; stream-process-pair template (streaming-compatible) +;; +;; Macro for processing a video source pair with full effects. +;; Reads source, applies A/B effects (rotate, zoom, invert, hue), blends, +;; and applies pair-level rotation. +;; +;; Required context (must be defined in calling scope): +;; - sources: array of video sources +;; - pair-configs: array of {:dir :rot-a :rot-b :zoom-a :zoom-b} configs +;; - pair-states: array from (bind pairs :states) +;; - now: current time (t) +;; - e: audio energy (0-1) +;; +;; Required effects (must be loaded): +;; - rotate, zoom, invert, hue_shift, blend +;; +;; Usage: +;; (include :path "../templates/stream-process-pair.sexp") +;; ...in frame pipeline... +;; (let [pair-states (bind pairs :states) +;; now t +;; e (streaming:audio-energy music now)] +;; (process-pair 0)) ;; process source at index 0 + +(require-primitives "core") + +(defmacro process-pair (src-idx) + (let [src (nth sources src-idx) + frame (streaming:source-read src now) + cfg (nth pair-configs src-idx) + state (nth pair-states src-idx) + + ;; Get state values (invert uses countdown > 0) + inv-a-active (if (> (get state :inv-a) 0) 1 0) + inv-b-active (if (> (get state :inv-b) 0) 1 0) + ;; Hue is active only when countdown > 0 + hue-a-val (if (> (get state :hue-a) 0) (get state :hue-a-val) 0) + hue-b-val (if (> (get state :hue-b) 0) (get state :hue-b-val) 0) + mix-opacity (get state :mix) + pair-rot-angle (* (get state :angle) (get cfg :dir)) + + ;; Get config values for energy-mapped ranges + rot-a-max (get cfg :rot-a) + rot-b-max (get cfg :rot-b) + zoom-a-max (get cfg :zoom-a) + zoom-b-max (get cfg :zoom-b) + + ;; Energy-driven rotation and zoom + rot-a (core:map-range e 0 1 0 rot-a-max) + rot-b (core:map-range e 0 1 0 rot-b-max) + zoom-a (core:map-range e 0 1 1 zoom-a-max) + zoom-b (core:map-range e 0 1 1 zoom-b-max) + + ;; Apply effects to clip A + clip-a (-> frame + (rotate :angle rot-a) + (zoom :amount zoom-a) + (invert :amount inv-a-active) + (hue_shift :degrees hue-a-val)) + + ;; Apply effects to clip B + clip-b (-> frame + (rotate :angle rot-b) + (zoom :amount zoom-b) + (invert :amount inv-b-active) + (hue_shift :degrees hue-b-val)) + + ;; Blend A+B + blended (blend clip-a clip-b :opacity mix-opacity)] + + ;; Apply pair-level rotation + (rotate blended :angle pair-rot-angle))) diff --git a/test_effects_pipeline.py b/test_effects_pipeline.py new file mode 100644 index 0000000..d1c8870 --- /dev/null +++ b/test_effects_pipeline.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python3 +""" +Test the full effects pipeline: segment -> effect -> output + +This tests that effects can be applied to video segments without +producing "No video stream found" errors. +""" + +import subprocess +import tempfile +import sys +from pathlib import Path + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent)) + +import numpy as np +from sexp_effects import ( + get_interpreter, + load_effects_dir, + run_effect, + list_effects, +) + + +def create_test_video(path: Path, duration: float = 1.0, size: str = "64x64") -> bool: + """Create a short test video using ffmpeg.""" + cmd = [ + "ffmpeg", "-y", + "-f", "lavfi", "-i", f"testsrc=duration={duration}:size={size}:rate=10", + "-c:v", "libx264", "-preset", "ultrafast", + str(path) + ] + result = subprocess.run(cmd, capture_output=True) + if result.returncode != 0: + print(f"Failed to create test video: {result.stderr.decode()}") + return False + return True + + +def segment_video(input_path: Path, output_path: Path, start: float, duration: float) -> bool: + """Segment a video file.""" + cmd = [ + "ffmpeg", "-y", "-i", str(input_path), + "-ss", str(start), "-t", str(duration), + "-c:v", "libx264", "-preset", "ultrafast", + "-c:a", "aac", + str(output_path) + ] + result = subprocess.run(cmd, capture_output=True) + if result.returncode != 0: + print(f"Failed to segment video: {result.stderr.decode()}") + return False + + # Verify output has video stream + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(output_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + import json + probe_data = json.loads(probe_result.stdout) + + has_video = any( + s.get("codec_type") == "video" + for s in probe_data.get("streams", []) + ) + if not has_video: + print(f"Segment has no video stream!") + return False + + return True + + +def run_effect_on_video(effect_name: str, input_path: Path, output_path: Path) -> bool: + """Run a sexp effect on a video file using frame processing.""" + import json + + # Get video info + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + + if not video_stream: + print(f" Input has no video stream: {input_path}") + return False + + width = int(video_stream["width"]) + height = int(video_stream["height"]) + fps_str = video_stream.get("r_frame_rate", "10/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + # Read frames, process, write + read_cmd = ["ffmpeg", "-i", str(input_path), "-f", "rawvideo", "-pix_fmt", "rgb24", "-"] + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{width}x{height}", "-r", str(fps), + "-i", "-", + "-c:v", "libx264", "-preset", "ultrafast", + str(output_path) + ] + + read_proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE) + + frame_size = width * height * 3 + frame_count = 0 + state = {} + + while True: + frame_data = read_proc.stdout.read(frame_size) + if len(frame_data) < frame_size: + break + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((height, width, 3)) + processed, state = run_effect(effect_name, frame, {'_time': frame_count / fps}, state) + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + + read_proc.stdout.close() + write_proc.stdin.close() + read_proc.wait() + write_proc.wait() + + if write_proc.returncode != 0: + print(f" FFmpeg encode failed: {write_proc.stderr.read().decode()}") + return False + + return frame_count > 0 + + +def test_effect_pipeline(effect_name: str, tmpdir: Path) -> tuple: + """ + Test full pipeline: create video -> segment -> apply effect + + Returns (success, error_message) + """ + # Create test video + source_video = tmpdir / "source.mp4" + if not create_test_video(source_video, duration=1.0, size="64x64"): + return False, "Failed to create source video" + + # Segment it (simulate what the recipe does) + segment_video_path = tmpdir / "segment.mp4" + if not segment_video(source_video, segment_video_path, start=0.2, duration=0.5): + return False, "Failed to segment video" + + # Check segment file exists and has content + if not segment_video_path.exists(): + return False, "Segment file doesn't exist" + if segment_video_path.stat().st_size < 100: + return False, f"Segment file too small: {segment_video_path.stat().st_size} bytes" + + # Apply effect + output_video = tmpdir / "output.mp4" + try: + if not run_effect_on_video(effect_name, segment_video_path, output_video): + return False, "Effect processing failed" + except Exception as e: + return False, str(e) + + # Verify output + if not output_video.exists(): + return False, "Output file doesn't exist" + if output_video.stat().st_size < 100: + return False, f"Output file too small: {output_video.stat().st_size} bytes" + + return True, None + + +def main(): + print("=" * 60) + print("Effects Pipeline Test") + print("=" * 60) + + # Load effects + effects_dir = Path(__file__).parent / "sexp_effects" / "effects" + load_effects_dir(str(effects_dir)) + + effects = list_effects() + print(f"Testing {len(effects)} effects through segment->effect pipeline\n") + + passed = [] + failed = [] + + # Test multi-input effects separately + multi_input_effects = ("blend", "layer") + print("\nTesting multi-input effects...") + from sexp_effects.interpreter import get_interpreter + interp = get_interpreter() + frame_a = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8) + frame_b = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8) + + for name in multi_input_effects: + try: + interp.global_env.set('frame-a', frame_a.copy()) + interp.global_env.set('frame-b', frame_b.copy()) + interp.global_env.set('frame', frame_a.copy()) + result, state = interp.run_effect(name, frame_a.copy(), {'_time': 0.5}, {}) + if isinstance(result, np.ndarray) and result.shape == frame_a.shape: + passed.append(name) + print(f" {name}: OK") + else: + failed.append((name, f"Bad output shape: {result.shape if hasattr(result, 'shape') else type(result)}")) + print(f" {name}: FAILED - bad shape") + except Exception as e: + failed.append((name, str(e))) + print(f" {name}: FAILED - {e}") + + print("\nTesting single-input effects through pipeline...") + + # Test each effect + for effect_name in sorted(effects): + # Skip multi-input effects (already tested above) + if effect_name in multi_input_effects: + continue + + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = Path(tmpdir) + + success, error = test_effect_pipeline(effect_name, tmpdir) + + if success: + passed.append(effect_name) + print(f" {effect_name}: OK") + else: + failed.append((effect_name, error)) + print(f" {effect_name}: FAILED - {error}") + + print() + print("=" * 60) + print(f"Pipeline test: {len(passed)} passed, {len(failed)} failed") + if failed: + print("\nFailed effects:") + for name, error in failed: + print(f" {name}: {error}") + print("=" * 60) + + return len(failed) == 0 + + +if __name__ == "__main__": + success = main() + sys.exit(0 if success else 1)