Squashed 'test/' content from commit f2edc20

git-subtree-dir: test
git-subtree-split: f2edc20cba865a6ef67ca807c2ed6cee8e6c2836
This commit is contained in:
giles
2026-02-24 23:10:04 +00:00
commit c590f2e039
105 changed files with 19968 additions and 0 deletions

19
.gitignore vendored Normal file
View File

@@ -0,0 +1,19 @@
# Python cache
__pycache__/
*.pyc
# Media files
*.mp4
*.mkv
*.webm
*.mp3
# Output files
*.json
# Cache directories
.cache/
.stage_cache/
effects/.stage_cache/
local_server/.cache/
local_server/.data/

223
analyze.py Normal file
View File

@@ -0,0 +1,223 @@
#!/usr/bin/env python3
"""
Run analyzers from a recipe and output analysis data as S-expressions.
Usage:
analyze.py recipe.sexp [-o analysis.sexp]
Output format:
(analysis
(beats-data
:tempo 120.5
:times (0.0 0.5 1.0 1.5 ...)
:duration 10.0)
(bass-data
:times (0.0 0.1 0.2 ...)
:values (0.5 0.8 0.3 ...)))
"""
import sys
import tempfile
import subprocess
import importlib.util
from pathlib import Path
# Add artdag to path
sys.path.insert(0, str(Path(__file__).parent.parent / "artdag"))
from artdag.sexp import compile_string, parse
from artdag.sexp.parser import Symbol, Keyword, serialize
def load_analyzer(analyzer_path: Path):
"""Load an analyzer module from file path."""
spec = importlib.util.spec_from_file_location("analyzer", analyzer_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def run_analyzer(analyzer_path: Path, input_path: Path, params: dict) -> dict:
"""Run an analyzer and return results."""
analyzer = load_analyzer(analyzer_path)
return analyzer.analyze(input_path, params)
def pre_execute_segment(source_path: Path, start: float, duration: float, work_dir: Path) -> Path:
"""Pre-execute a segment to get audio for analysis."""
suffix = source_path.suffix.lower()
is_audio = suffix in ('.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a')
output_ext = ".m4a" if is_audio else ".mp4"
output_path = work_dir / f"segment{output_ext}"
cmd = ["ffmpeg", "-y", "-i", str(source_path)]
if start:
cmd.extend(["-ss", str(start)])
if duration:
cmd.extend(["-t", str(duration)])
if is_audio:
cmd.extend(["-c:a", "aac", str(output_path)])
else:
cmd.extend(["-c:v", "libx264", "-preset", "fast", "-crf", "18",
"-c:a", "aac", str(output_path)])
subprocess.run(cmd, check=True, capture_output=True)
return output_path
def to_sexp(value, indent=0):
"""Convert a Python value to S-expression string."""
if isinstance(value, dict):
if not value:
return "()"
items = []
for k, v in value.items():
key = k.replace('_', '-')
items.append(f":{key} {to_sexp(v)}")
return "(" + " ".join(items) + ")"
elif isinstance(value, list):
if not value:
return "()"
items = [to_sexp(v) for v in value]
return "(" + " ".join(items) + ")"
elif isinstance(value, str):
return f'"{value}"'
elif isinstance(value, bool):
return "true" if value else "false"
elif value is None:
return "nil"
elif isinstance(value, float):
return f"{value:.6g}"
else:
return str(value)
def analyze_recipe(recipe_path: Path, output_file: Path = None):
"""Run all analyzers in a recipe and output S-expression analysis data."""
recipe_text = recipe_path.read_text()
recipe_dir = recipe_path.parent
print(f"Compiling: {recipe_path}", file=sys.stderr)
compiled = compile_string(recipe_text)
print(f"Recipe: {compiled.name} v{compiled.version}", file=sys.stderr)
# Find all ANALYZE nodes and their dependencies
nodes_by_id = {n["id"]: n for n in compiled.nodes}
# Track source paths and segment outputs
source_paths = {}
segment_outputs = {}
analysis_results = {}
work_dir = Path(tempfile.mkdtemp(prefix="artdag_analyze_"))
# Process nodes in dependency order
def get_input_path(node_id: str) -> Path:
"""Resolve the input path for a node."""
if node_id in segment_outputs:
return segment_outputs[node_id]
if node_id in source_paths:
return source_paths[node_id]
node = nodes_by_id.get(node_id)
if not node:
return None
if node["type"] == "SOURCE":
path = recipe_dir / node["config"].get("path", "")
source_paths[node_id] = path.resolve()
return source_paths[node_id]
if node["type"] == "SEGMENT":
inputs = node.get("inputs", [])
if inputs:
input_path = get_input_path(inputs[0])
if input_path:
config = node.get("config", {})
start = config.get("start", 0)
duration = config.get("duration")
output = pre_execute_segment(input_path, start, duration, work_dir)
segment_outputs[node_id] = output
return output
return None
# Find and run all analyzers
for node in compiled.nodes:
if node["type"] == "ANALYZE":
config = node.get("config", {})
analyzer_name = config.get("analyzer", "unknown")
analyzer_path = config.get("analyzer_path")
if not analyzer_path:
print(f" Skipping {analyzer_name}: no path", file=sys.stderr)
continue
# Get input
inputs = node.get("inputs", [])
if not inputs:
print(f" Skipping {analyzer_name}: no inputs", file=sys.stderr)
continue
input_path = get_input_path(inputs[0])
if not input_path or not input_path.exists():
print(f" Skipping {analyzer_name}: input not found", file=sys.stderr)
continue
# Run analyzer
full_path = recipe_dir / analyzer_path
params = {k: v for k, v in config.items()
if k not in ("analyzer", "analyzer_path", "cid")}
print(f" Running analyzer: {analyzer_name}", file=sys.stderr)
results = run_analyzer(full_path, input_path, params)
# Store by node ID for uniqueness (multiple analyzers may have same type)
node_id = node.get("id")
analysis_results[node_id] = results
times = results.get("times", [])
print(f" {len(times)} times @ {results.get('tempo', 0):.1f} BPM", file=sys.stderr)
# Generate S-expression output
lines = ["(analysis"]
for name, data in analysis_results.items():
# Quote node IDs to prevent parser treating hex like "0e42..." as scientific notation
lines.append(f' ("{name}"')
for key, value in data.items():
sexp_key = key.replace('_', '-')
sexp_value = to_sexp(value)
lines.append(f" :{sexp_key} {sexp_value}")
lines.append(" )")
lines.append(")")
output = "\n".join(lines)
if output_file:
output_file.write_text(output)
print(f"\nAnalysis written to: {output_file}", file=sys.stderr)
else:
print(output)
print(f"Debug: temp files in {work_dir}", file=sys.stderr)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run analyzers from recipe")
parser.add_argument("recipe", type=Path, help="Recipe file (.sexp)")
parser.add_argument("-o", "--output", type=Path, help="Output file (default: stdout)")
args = parser.parse_args()
if not args.recipe.exists():
print(f"Recipe not found: {args.recipe}", file=sys.stderr)
sys.exit(1)
analyze_recipe(args.recipe, args.output)

404
cache.py Normal file
View File

@@ -0,0 +1,404 @@
#!/usr/bin/env python3
"""
Unified content cache for artdag.
Design:
- IPNS (cache_id) = computation hash, known BEFORE execution
"What would be the result of running X with inputs Y?"
- CID = content hash, known AFTER execution
"What is this actual content?"
Structure:
.cache/
refs/ # IPNS → CID mappings (computation → result)
{cache_id} # Text file containing the CID of the result
content/ # Content-addressed storage
{cid[:2]}/{cid} # Actual content by CID
"""
import hashlib
import json
import os
from pathlib import Path
from typing import Optional, Dict, Any, Tuple
# Default cache location - can be overridden via ARTDAG_CACHE env var
DEFAULT_CACHE_DIR = Path(__file__).parent / ".cache"
def get_cache_dir() -> Path:
"""Get the cache directory, creating if needed."""
cache_dir = Path(os.environ.get("ARTDAG_CACHE", DEFAULT_CACHE_DIR))
cache_dir.mkdir(parents=True, exist_ok=True)
return cache_dir
def get_refs_dir() -> Path:
"""Get the refs directory (IPNS → CID mappings)."""
refs_dir = get_cache_dir() / "refs"
refs_dir.mkdir(parents=True, exist_ok=True)
return refs_dir
def get_content_dir() -> Path:
"""Get the content directory (CID → content)."""
content_dir = get_cache_dir() / "content"
content_dir.mkdir(parents=True, exist_ok=True)
return content_dir
# =============================================================================
# CID (Content Hash) Operations
# =============================================================================
def compute_cid(content: bytes) -> str:
"""Compute content ID (SHA256 hash) for bytes."""
return hashlib.sha256(content).hexdigest()
def compute_file_cid(file_path: Path) -> str:
"""Compute content ID for a file."""
with open(file_path, 'rb') as f:
return compute_cid(f.read())
def compute_string_cid(text: str) -> str:
"""Compute content ID for a string."""
return compute_cid(text.encode('utf-8'))
# =============================================================================
# Content Storage (by CID)
# =============================================================================
def _content_path(cid: str) -> Path:
"""Get path for content by CID."""
return get_content_dir() / cid[:2] / cid
def content_exists_by_cid(cid: str) -> Optional[Path]:
"""Check if content exists by CID."""
path = _content_path(cid)
if path.exists() and path.stat().st_size > 0:
return path
return None
def content_store_by_cid(cid: str, content: bytes) -> Path:
"""Store content by its CID."""
path = _content_path(cid)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_bytes(content)
return path
def content_store_file(file_path: Path) -> Tuple[str, Path]:
"""Store a file by its content hash. Returns (cid, path)."""
content = file_path.read_bytes()
cid = compute_cid(content)
path = content_store_by_cid(cid, content)
return cid, path
def content_store_string(text: str) -> Tuple[str, Path]:
"""Store a string by its content hash. Returns (cid, path)."""
content = text.encode('utf-8')
cid = compute_cid(content)
path = content_store_by_cid(cid, content)
return cid, path
def content_get(cid: str) -> Optional[bytes]:
"""Get content by CID."""
path = content_exists_by_cid(cid)
if path:
return path.read_bytes()
return None
def content_get_string(cid: str) -> Optional[str]:
"""Get string content by CID."""
content = content_get(cid)
if content:
return content.decode('utf-8')
return None
# =============================================================================
# Refs (IPNS → CID mappings)
# =============================================================================
def _ref_path(cache_id: str) -> Path:
"""Get path for a ref by cache_id."""
return get_refs_dir() / cache_id
def ref_exists(cache_id: str) -> Optional[str]:
"""Check if a ref exists. Returns CID if found."""
path = _ref_path(cache_id)
if path.exists():
return path.read_text().strip()
return None
def ref_set(cache_id: str, cid: str) -> Path:
"""Set a ref (IPNS → CID mapping)."""
path = _ref_path(cache_id)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(cid)
return path
def ref_get_content(cache_id: str) -> Optional[bytes]:
"""Get content by cache_id (looks up ref, then fetches content)."""
cid = ref_exists(cache_id)
if cid:
return content_get(cid)
return None
def ref_get_string(cache_id: str) -> Optional[str]:
"""Get string content by cache_id."""
content = ref_get_content(cache_id)
if content:
return content.decode('utf-8')
return None
# =============================================================================
# High-level Cache Operations
# =============================================================================
def cache_store(cache_id: str, content: bytes) -> Tuple[str, Path]:
"""
Store content with IPNS→CID indirection.
Args:
cache_id: Computation hash (IPNS address)
content: Content to store
Returns:
(cid, path) tuple
"""
cid = compute_cid(content)
path = content_store_by_cid(cid, content)
ref_set(cache_id, cid)
return cid, path
def cache_store_file(cache_id: str, file_path: Path) -> Tuple[str, Path]:
"""Store a file with IPNS→CID indirection."""
content = file_path.read_bytes()
return cache_store(cache_id, content)
def cache_store_string(cache_id: str, text: str) -> Tuple[str, Path]:
"""Store a string with IPNS→CID indirection."""
return cache_store(cache_id, text.encode('utf-8'))
def cache_store_json(cache_id: str, data: Any) -> Tuple[str, Path]:
"""Store JSON data with IPNS→CID indirection."""
text = json.dumps(data, indent=2)
return cache_store_string(cache_id, text)
def cache_exists(cache_id: str) -> Optional[Path]:
"""Check if cached content exists for a computation."""
cid = ref_exists(cache_id)
if cid:
return content_exists_by_cid(cid)
return None
def cache_get(cache_id: str) -> Optional[bytes]:
"""Get cached content by computation hash."""
return ref_get_content(cache_id)
def cache_get_string(cache_id: str) -> Optional[str]:
"""Get cached string by computation hash."""
return ref_get_string(cache_id)
def cache_get_json(cache_id: str) -> Optional[Any]:
"""Get cached JSON by computation hash."""
text = cache_get_string(cache_id)
if text:
return json.loads(text)
return None
def cache_get_path(cache_id: str) -> Optional[Path]:
"""Get path to cached content by computation hash."""
cid = ref_exists(cache_id)
if cid:
return content_exists_by_cid(cid)
return None
# =============================================================================
# Plan Cache (convenience wrappers)
# =============================================================================
def _stable_hash_params(params: Dict[str, Any]) -> str:
"""Compute stable hash of params using JSON + SHA256 (consistent with CID)."""
params_str = json.dumps(params, sort_keys=True, default=str)
return hashlib.sha256(params_str.encode()).hexdigest()
def plan_cache_id(source_cid: str, params: Dict[str, Any] = None) -> str:
"""
Compute the cache_id (IPNS address) for a plan.
Based on source CID + params. Name/version are just metadata.
"""
key = f"plan:{source_cid}"
if params:
params_hash = _stable_hash_params(params)
key = f"{key}:{params_hash}"
return hashlib.sha256(key.encode()).hexdigest()
def plan_exists(source_cid: str, params: Dict[str, Any] = None) -> Optional[str]:
"""Check if a cached plan exists. Returns CID if found."""
cache_id = plan_cache_id(source_cid, params)
return ref_exists(cache_id)
def plan_store(source_cid: str, params: Dict[str, Any], content: str) -> Tuple[str, str, Path]:
"""
Store a plan in the cache.
Returns:
(cache_id, cid, path) tuple
"""
cache_id = plan_cache_id(source_cid, params)
cid, path = cache_store_string(cache_id, content)
return cache_id, cid, path
def plan_load(source_cid: str, params: Dict[str, Any] = None) -> Optional[str]:
"""Load a plan from cache. Returns plan content string."""
cache_id = plan_cache_id(source_cid, params)
return cache_get_string(cache_id)
def plan_get_path(source_cid: str, params: Dict[str, Any] = None) -> Optional[Path]:
"""Get path to cached plan."""
cache_id = plan_cache_id(source_cid, params)
return cache_get_path(cache_id)
# =============================================================================
# Cache Listing
# =============================================================================
def list_cache(verbose: bool = False) -> Dict[str, Any]:
"""List all cached items."""
from datetime import datetime
cache_dir = get_cache_dir()
refs_dir = get_refs_dir()
content_dir = get_content_dir()
def format_size(size):
if size >= 1_000_000_000:
return f"{size / 1_000_000_000:.1f}GB"
elif size >= 1_000_000:
return f"{size / 1_000_000:.1f}MB"
elif size >= 1000:
return f"{size / 1000:.1f}KB"
else:
return f"{size}B"
def get_file_info(path: Path) -> Dict:
stat = path.stat()
return {
"path": path,
"name": path.name,
"size": stat.st_size,
"size_str": format_size(stat.st_size),
"mtime": datetime.fromtimestamp(stat.st_mtime),
}
result = {
"refs": [],
"content": [],
"summary": {"total_items": 0, "total_size": 0},
}
# Refs
if refs_dir.exists():
for f in sorted(refs_dir.iterdir()):
if f.is_file():
info = get_file_info(f)
info["cache_id"] = f.name
info["cid"] = f.read_text().strip()
# Try to determine type from content
cid = info["cid"]
content_path = content_exists_by_cid(cid)
if content_path:
info["content_size"] = content_path.stat().st_size
info["content_size_str"] = format_size(info["content_size"])
result["refs"].append(info)
# Content
if content_dir.exists():
for subdir in sorted(content_dir.iterdir()):
if subdir.is_dir():
for f in sorted(subdir.iterdir()):
if f.is_file():
info = get_file_info(f)
info["cid"] = f.name
result["content"].append(info)
# Summary
result["summary"]["total_refs"] = len(result["refs"])
result["summary"]["total_content"] = len(result["content"])
result["summary"]["total_size"] = sum(i["size"] for i in result["content"])
result["summary"]["total_size_str"] = format_size(result["summary"]["total_size"])
return result
def print_cache_listing(verbose: bool = False):
"""Print cache listing to stdout."""
info = list_cache(verbose)
cache_dir = get_cache_dir()
print(f"\nCache directory: {cache_dir}\n")
# Refs summary
if info["refs"]:
print(f"=== Refs ({len(info['refs'])}) ===")
for ref in info["refs"][:20]: # Show first 20
content_info = f"{ref.get('content_size_str', '?')}" if 'content_size_str' in ref else ""
print(f" {ref['cache_id'][:16]}... → {ref['cid'][:16]}...{content_info}")
if len(info["refs"]) > 20:
print(f" ... and {len(info['refs']) - 20} more")
print()
# Content by type
if info["content"]:
# Group by first 2 chars (subdirectory)
print(f"=== Content ({len(info['content'])} items, {info['summary']['total_size_str']}) ===")
for item in info["content"][:20]:
print(f" {item['cid'][:16]}... {item['size_str']:>8} {item['mtime'].strftime('%Y-%m-%d %H:%M')}")
if len(info["content"]) > 20:
print(f" ... and {len(info['content']) - 20} more")
print()
print(f"=== Summary ===")
print(f" Refs: {info['summary']['total_refs']}")
print(f" Content: {info['summary']['total_content']} ({info['summary']['total_size_str']})")
if verbose:
print(f"\nTo clear cache: rm -rf {cache_dir}/*")
if __name__ == "__main__":
import sys
verbose = "-v" in sys.argv or "--verbose" in sys.argv
print_cache_listing(verbose)

17
configs/audio-dizzy.sexp Normal file
View File

@@ -0,0 +1,17 @@
;; Audio Configuration - dizzy.mp3
;;
;; Defines audio analyzer and playback for a recipe.
;; Pass to recipe with: --audio configs/audio-dizzy.sexp
;;
;; Provides:
;; - music: audio analyzer for beat/energy detection
;; - audio-playback: path for synchronized playback
(require-primitives "streaming")
;; Audio analyzer (provides beat detection and energy levels)
;; Paths relative to working directory (project root)
(def music (streaming:make-audio-analyzer "dizzy.mp3"))
;; Audio playback path (for sync with video output)
(audio-playback "dizzy.mp3")

View File

@@ -0,0 +1,17 @@
;; Audio Configuration - dizzy.mp3
;;
;; Defines audio analyzer and playback for a recipe.
;; Pass to recipe with: --audio configs/audio-dizzy.sexp
;;
;; Provides:
;; - music: audio analyzer for beat/energy detection
;; - audio-playback: path for synchronized playback
(require-primitives "streaming")
;; Audio analyzer (provides beat detection and energy levels)
;; Paths relative to working directory (project root)
(def music (streaming:make-audio-analyzer "woods_half/halleluwah.webm"))
;; Audio playback path (for sync with video output)
(audio-playback "woods_half/halleluwah.webm")

View File

@@ -0,0 +1,38 @@
;; Default Sources Configuration
;;
;; Defines video sources and per-pair effect configurations.
;; Pass to recipe with: --sources configs/sources-default.sexp
;;
;; Required by recipes using process-pair macro:
;; - sources: array of video sources
;; - pair-configs: array of effect configurations per source
(require-primitives "streaming")
;; Video sources array
;; Paths relative to working directory (project root)
(def sources [
(streaming:make-video-source "monday.webm" 30)
(streaming:make-video-source "escher.webm" 30)
(streaming:make-video-source "2.webm" 30)
(streaming:make-video-source "disruptors.webm" 30)
(streaming:make-video-source "4.mp4" 30)
(streaming:make-video-source "ecstacy.mp4" 30)
(streaming:make-video-source "dopple.webm" 30)
(streaming:make-video-source "5.mp4" 30)
])
;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges
;; :dir = rotation direction (1 or -1)
;; :rot-a, :rot-b = max rotation angles for clip A and B
;; :zoom-a, :zoom-b = max zoom amounts for clip A and B
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4
{:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller)
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed)
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5
])

View File

@@ -0,0 +1,19 @@
;; Half-resolution Woods Sources (960x540)
;;
;; Pass to recipe with: --sources configs/sources-woods-half.sexp
(require-primitives "streaming")
(def sources [
(streaming:make-video-source "woods_half/1.webm" 30)
(streaming:make-video-source "woods_half/2.webm" 30)
(streaming:make-video-source "woods_half/3.webm" 30)
(streaming:make-video-source "woods_half/4.webm" 30)
])
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5}
])

View File

@@ -0,0 +1,39 @@
;; Default Sources Configuration
;;
;; Defines video sources and per-pair effect configurations.
;; Pass to recipe with: --sources configs/sources-default.sexp
;;
;; Required by recipes using process-pair macro:
;; - sources: array of video sources
;; - pair-configs: array of effect configurations per source
(require-primitives "streaming")
;; Video sources array
;; Paths relative to working directory (project root)
(def sources [
(streaming:make-video-source "woods/1.webm" 10)
(streaming:make-video-source "woods/2.webm" 10)
(streaming:make-video-source "woods/3.webm" 10)
(streaming:make-video-source "woods/4.webm" 10)
(streaming:make-video-source "woods/5.webm" 10)
(streaming:make-video-source "woods/6.webm" 10)
(streaming:make-video-source "woods/7.webm" 10)
(streaming:make-video-source "woods/8.webm" 10)
])
;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges
;; :dir = rotation direction (1 or -1)
;; :rot-a, :rot-b = max rotation angles for clip A and B
;; :zoom-a, :zoom-b = max zoom amounts for clip A and B
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
])

View File

@@ -0,0 +1,150 @@
;; Quick Test - Fully Explicit Streaming Version
;;
;; The interpreter is completely generic - knows nothing about video/audio.
;; All domain logic is explicit via primitives.
;;
;; Run with built-in sources/audio:
;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp --fps 30
;;
;; Run with external config files:
;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp \
;; --sources configs/sources-default.sexp \
;; --audio configs/audio-dizzy.sexp \
;; --fps 30
(stream "quick_test_explicit"
:fps 30
:width 1920
:height 1080
:seed 42
;; Load standard primitives and effects
(include :path "../templates/standard-primitives.sexp")
(include :path "../templates/standard-effects.sexp")
;; Load reusable templates
(include :path "../templates/stream-process-pair.sexp")
(include :path "../templates/crossfade-zoom.sexp")
;; === SOURCES AS ARRAY ===
(def sources [
(streaming:make-video-source "monday.webm" 30)
(streaming:make-video-source "escher.webm" 30)
(streaming:make-video-source "2.webm" 30)
(streaming:make-video-source "disruptors.webm" 30)
(streaming:make-video-source "4.mp4" 30)
(streaming:make-video-source "ecstacy.mp4" 30)
(streaming:make-video-source "dopple.webm" 30)
(streaming:make-video-source "5.mp4" 30)
])
;; Per-pair config: [rot-dir, rot-a-max, rot-b-max, zoom-a-max, zoom-b-max]
;; Pairs 3,6: reversed (negative rot-a, positive rot-b, shrink zoom-a, grow zoom-b)
;; Pair 5: smaller ranges
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4
{:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller)
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed)
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5
])
;; Audio analyzer
(def music (streaming:make-audio-analyzer "dizzy.mp3"))
;; Audio playback
(audio-playback "../dizzy.mp3")
;; === GLOBAL SCANS ===
;; Cycle state: which source is active (recipe-specific)
;; clen = beats per source (8-24 beats = ~4-12 seconds)
(scan cycle (streaming:audio-beat music t)
:init {:active 0 :beat 0 :clen 16}
:step (if (< (+ beat 1) clen)
(dict :active active :beat (+ beat 1) :clen clen)
(dict :active (mod (+ active 1) (len sources)) :beat 0
:clen (+ 8 (mod (* (streaming:audio-beat-count music t) 7) 17)))))
;; Reusable scans from templates (require 'music' to be defined)
(include :path "../templates/scan-oscillating-spin.sexp")
(include :path "../templates/scan-ripple-drops.sexp")
;; === PER-PAIR STATE (dynamically sized based on sources) ===
;; Each pair has: inv-a, inv-b, hue-a, hue-b, mix, rot-angle
(scan pairs (streaming:audio-beat music t)
:init {:states (map (core:range (len sources)) (lambda (_)
{:inv-a 0 :inv-b 0 :hue-a 0 :hue-b 0 :hue-a-val 0 :hue-b-val 0 :mix 0.5 :mix-rem 5 :angle 0 :rot-beat 0 :rot-clen 25}))}
:step (dict :states (map states (lambda (p)
(let [;; Invert toggles (10% chance, lasts 1-4 beats)
new-inv-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-a) 1)))
new-inv-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-b) 1)))
;; Hue shifts (10% chance, lasts 1-4 beats) - use countdown like invert
old-hue-a (get p :hue-a)
old-hue-b (get p :hue-b)
new-hue-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-a 1)))
new-hue-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-b 1)))
;; Pick random hue value when triggering (stored separately)
new-hue-a-val (if (> new-hue-a old-hue-a) (+ 30 (* (core:rand) 300)) (get p :hue-a-val))
new-hue-b-val (if (> new-hue-b old-hue-b) (+ 30 (* (core:rand) 300)) (get p :hue-b-val))
;; Mix (holds for 1-10 beats, then picks 0, 0.5, or 1)
mix-rem (get p :mix-rem)
old-mix (get p :mix)
new-mix-rem (if (> mix-rem 0) (- mix-rem 1) (+ 1 (core:rand-int 1 10)))
new-mix (if (> mix-rem 0) old-mix (* (core:rand-int 0 2) 0.5))
;; Rotation (accumulates, reverses direction when cycle completes)
rot-beat (get p :rot-beat)
rot-clen (get p :rot-clen)
old-angle (get p :angle)
;; Note: dir comes from pair-configs, but we store rotation state here
new-rot-beat (if (< (+ rot-beat 1) rot-clen) (+ rot-beat 1) 0)
new-rot-clen (if (< (+ rot-beat 1) rot-clen) rot-clen (+ 20 (core:rand-int 0 10)))
new-angle (+ old-angle (/ 360 rot-clen))]
(dict :inv-a new-inv-a :inv-b new-inv-b
:hue-a new-hue-a :hue-b new-hue-b
:hue-a-val new-hue-a-val :hue-b-val new-hue-b-val
:mix new-mix :mix-rem new-mix-rem
:angle new-angle :rot-beat new-rot-beat :rot-clen new-rot-clen))))))
;; === FRAME PIPELINE ===
(frame
(let [now t
e (streaming:audio-energy music now)
;; Get cycle state
active (bind cycle :active)
beat-pos (bind cycle :beat)
clen (bind cycle :clen)
;; Transition logic: last third of cycle crossfades to next
phase3 (* beat-pos 3)
fading (and (>= phase3 (* clen 2)) (< phase3 (* clen 3)))
fade-amt (if fading (/ (- phase3 (* clen 2)) clen) 0)
next-idx (mod (+ active 1) (len sources))
;; Get pair states array (required by process-pair macro)
pair-states (bind pairs :states)
;; Process active pair using macro from template
active-frame (process-pair active)
;; Crossfade with zoom during transition (using macro)
result (if fading
(crossfade-zoom active-frame (process-pair next-idx) fade-amt)
active-frame)
;; Final: global spin + ripple
spun (rotate result :angle (bind spin :angle))
rip-gate (bind ripple-state :gate)
rip-amp (* rip-gate (core:map-range e 0 1 5 50))]
(ripple spun
:amplitude rip-amp
:center_x (bind ripple-state :cx)
:center_y (bind ripple-state :cy)
:frequency 8
:decay 2
:speed 5))))

2368
execute.py Normal file

File diff suppressed because it is too large Load Diff

415
plan.py Normal file
View File

@@ -0,0 +1,415 @@
#!/usr/bin/env python3
"""
Plan generator for S-expression recipes.
Expands dynamic nodes (SLICE_ON) into primitives using analysis data.
Outputs a plan that can be executed by execute.py.
Usage:
analyze.py recipe.sexp > analysis.sexp
plan.py recipe.sexp --analysis analysis.sexp --sexp > plan.sexp
execute.py plan.sexp --analysis analysis.sexp
"""
import sys
import json
from pathlib import Path
# Add artdag to path
sys.path.insert(0, str(Path(__file__).parent.parent / "artdag"))
from artdag.sexp import compile_string, parse
from artdag.sexp.planner import create_plan
from artdag.sexp.parser import Binding, serialize as sexp_serialize, Symbol, Keyword
def parse_analysis_sexp(content: str) -> dict:
"""Parse analysis S-expression into dict."""
sexp = parse(content)
if isinstance(sexp, list) and len(sexp) == 1:
sexp = sexp[0]
if not isinstance(sexp, list) or not sexp:
raise ValueError("Invalid analysis S-expression")
# Should be (analysis (name ...) (name ...) ...)
if not isinstance(sexp[0], Symbol) or sexp[0].name != "analysis":
raise ValueError("Expected (analysis ...) S-expression")
result = {}
for item in sexp[1:]:
if isinstance(item, list) and item:
# Handle both Symbol names and quoted string names (node IDs)
first = item[0]
if isinstance(first, Symbol):
name = first.name
elif isinstance(first, str):
name = first
else:
continue # Skip malformed entries
data = {}
i = 1
while i < len(item):
if isinstance(item[i], Keyword):
key = item[i].name.replace("-", "_")
i += 1
if i < len(item):
data[key] = item[i]
i += 1
else:
i += 1
result[name] = data
return result
def to_sexp(value, indent=0):
"""Convert a Python value to S-expression string."""
from artdag.sexp.parser import Lambda
# Handle Binding objects
if isinstance(value, Binding):
# analysis_ref can be a string, node ID, or dict - serialize it properly
if isinstance(value.analysis_ref, str):
ref_str = f'"{value.analysis_ref}"'
else:
ref_str = to_sexp(value.analysis_ref, 0)
s = f'(bind {ref_str} :range [{value.range_min} {value.range_max}]'
if value.transform:
s += f' :transform {value.transform}'
return s + ')'
# Handle binding dicts from compiler (convert to bind sexp format)
if isinstance(value, dict) and value.get("_binding"):
source = value.get("source", "")
range_val = value.get("range", [0.0, 1.0])
range_min = range_val[0] if isinstance(range_val, list) else 0.0
range_max = range_val[1] if isinstance(range_val, list) and len(range_val) > 1 else 1.0
transform = value.get("transform")
offset = value.get("offset")
s = f'(bind "{source}" :range [{range_min} {range_max}]'
if offset:
s += f' :offset {offset}'
if transform:
s += f' :transform {transform}'
return s + ')'
# Handle Symbol - serialize as bare identifier
if isinstance(value, Symbol):
return value.name
# Handle Keyword - serialize with colon prefix
if isinstance(value, Keyword):
return f':{value.name}'
# Handle Lambda
if isinstance(value, Lambda):
params = " ".join(value.params)
body = to_sexp(value.body, 0)
return f'(fn [{params}] {body})'
prefix = " " * indent
if isinstance(value, dict):
if not value:
return "()"
items = []
for k, v in value.items():
if isinstance(k, str):
# Keys starting with _ are internal markers - keep underscore to avoid :-foo
if k.startswith('_'):
key_str = k # Keep as-is: _binding -> :_binding
else:
key_str = k.replace('_', '-')
else:
key_str = str(k)
items.append(f":{key_str} {to_sexp(v, 0)}")
return "(" + " ".join(items) + ")"
elif isinstance(value, list):
if not value:
return "()"
items = [to_sexp(v, 0) for v in value]
return "(" + " ".join(items) + ")"
elif isinstance(value, str):
# Escape special characters in strings
escaped = value.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
return f'"{escaped}"'
elif isinstance(value, bool):
return "true" if value else "false"
elif isinstance(value, (int, float)):
return str(value)
elif value is None:
return "nil"
else:
# For any unknown type, convert to string and quote it
return f'"{str(value)}"'
def plan_recipe(recipe_path: Path, output_format: str = "text", output_file: Path = None, analysis_path: Path = None, params: dict = None):
"""Compile recipe, expand dynamic nodes using analysis, output plan.
Args:
recipe_path: Path to recipe file
output_format: Output format (text, json, sexp)
output_file: Optional output file path
analysis_path: Optional pre-computed analysis file
params: Optional dict of name -> value bindings to inject into compilation
"""
recipe_text = recipe_path.read_text()
recipe_dir = recipe_path.parent
print(f"Compiling: {recipe_path}", file=sys.stderr)
if params:
print(f"Parameters: {params}", file=sys.stderr)
compiled = compile_string(recipe_text, params)
print(f"Recipe: {compiled.name} v{compiled.version}", file=sys.stderr)
print(f"Nodes: {len(compiled.nodes)}", file=sys.stderr)
# Load pre-computed analysis if provided (file or stdin with -)
pre_analysis = None
if analysis_path:
if str(analysis_path) == "-":
print(f"Loading analysis: stdin", file=sys.stderr)
analysis_text = sys.stdin.read()
else:
print(f"Loading analysis: {analysis_path}", file=sys.stderr)
analysis_text = analysis_path.read_text()
pre_analysis = parse_analysis_sexp(analysis_text)
print(f" Tracks: {list(pre_analysis.keys())}", file=sys.stderr)
# Track analysis results for embedding in plan
analysis_data = {}
def on_analysis(node_id, results):
analysis_data[node_id] = results
times = results.get("times", [])
print(f" Analysis complete: {len(times)} beat times", file=sys.stderr)
# Create plan (uses pre_analysis or runs analyzers, expands SLICE_ON)
print("\n--- Planning ---", file=sys.stderr)
plan = create_plan(
compiled,
inputs={},
recipe_dir=recipe_dir,
on_analysis=on_analysis,
pre_analysis=pre_analysis,
)
print(f"\nPlan ID: {plan.plan_id[:16]}...", file=sys.stderr)
print(f"Steps: {len(plan.steps)}", file=sys.stderr)
# Generate output
if output_format == "sexp":
output = generate_sexp_output(compiled, plan, analysis_data)
elif output_format == "json":
output = generate_json_output(compiled, plan, analysis_data)
else:
output = generate_text_output(compiled, plan, analysis_data)
# Write output
if output_file:
output_file.write_text(output)
print(f"\nPlan written to: {output_file}", file=sys.stderr)
else:
print(output)
class PlanJSONEncoder(json.JSONEncoder):
"""Custom encoder for plan objects."""
def default(self, obj):
if isinstance(obj, Binding):
return {
"_type": "binding",
"analysis_ref": obj.analysis_ref,
"track": obj.track,
"range_min": obj.range_min,
"range_max": obj.range_max,
"transform": obj.transform,
}
if isinstance(obj, Symbol):
return {"_type": "symbol", "name": obj.name}
if isinstance(obj, Keyword):
return {"_type": "keyword", "name": obj.name}
return super().default(obj)
def generate_json_output(compiled, plan, analysis_data):
"""Generate JSON plan output."""
output = {
"plan_id": plan.plan_id,
"recipe_id": compiled.name,
"recipe_hash": plan.recipe_hash,
"encoding": compiled.encoding,
"output_step_id": plan.output_step_id,
"steps": [],
}
for step in plan.steps:
step_dict = {
"step_id": step.step_id,
"node_type": step.node_type,
"config": step.config,
"inputs": step.inputs,
"level": step.level,
"cache_id": step.cache_id,
}
# Embed analysis results for ANALYZE steps
if step.node_type == "ANALYZE" and step.step_id in analysis_data:
step_dict["config"]["analysis_results"] = analysis_data[step.step_id]
output["steps"].append(step_dict)
return json.dumps(output, indent=2, cls=PlanJSONEncoder)
def generate_sexp_output(compiled, plan, analysis_data):
"""Generate S-expression plan output."""
lines = [
f'(plan "{compiled.name}"',
f' :version "{compiled.version}"',
f' :plan-id "{plan.plan_id}"',
]
if compiled.encoding:
lines.append(f' :encoding {to_sexp(compiled.encoding)}')
# Include analysis data for effect parameter bindings
if plan.analysis:
lines.append('')
lines.append(' (analysis')
for name, data in plan.analysis.items():
times = data.get("times", [])
values = data.get("values", [])
# Truncate for display but include all data
times_str = " ".join(str(t) for t in times)
values_str = " ".join(str(v) for v in values)
lines.append(f' ({name}')
lines.append(f' :times ({times_str})')
lines.append(f' :values ({values_str}))')
lines.append(' )')
lines.append('')
for step in plan.steps:
lines.append(f' (step "{step.step_id}"')
lines.append(f' :type {step.node_type}')
lines.append(f' :level {step.level}')
lines.append(f' :cache "{step.cache_id}"')
if step.inputs:
inputs_str = " ".join(f'"{i}"' for i in step.inputs)
lines.append(f' :inputs ({inputs_str})')
for key, value in step.config.items():
lines.append(f' :{key.replace("_", "-")} {to_sexp(value)}')
lines.append(' )')
lines.append('')
lines.append(f' :output "{plan.output_step_id}")')
return '\n'.join(lines)
def generate_text_output(compiled, plan, analysis_data):
"""Generate human-readable text output."""
lines = [
f"Recipe: {compiled.name} v{compiled.version}",
]
if compiled.encoding:
lines.append(f"Encoding: {compiled.encoding}")
lines.extend([
f"\nPlan ID: {plan.plan_id}",
f"Output: {plan.output_step_id[:16]}...",
f"\nSteps ({len(plan.steps)}):",
"-" * 60,
])
for step in plan.steps:
lines.append(f"\n[{step.level}] {step.node_type}")
lines.append(f" id: {step.step_id[:16]}...")
lines.append(f" cache: {step.cache_id[:16]}...")
if step.inputs:
lines.append(f" inputs: {[i[:16] + '...' for i in step.inputs]}")
for key, value in step.config.items():
if key == "analysis_results":
lines.append(f" {key}: <{len(value.get('times', []))} times>")
else:
lines.append(f" {key}: {value}")
return '\n'.join(lines)
def parse_param(param_str: str) -> tuple:
"""Parse a key=value parameter string.
Args:
param_str: String in format "key=value"
Returns:
Tuple of (key, parsed_value) where value is converted to int/float if possible
"""
if "=" not in param_str:
raise ValueError(f"Invalid parameter format: {param_str} (expected key=value)")
key, value = param_str.split("=", 1)
key = key.strip()
value = value.strip()
# Try to parse as int
try:
return (key, int(value))
except ValueError:
pass
# Try to parse as float
try:
return (key, float(value))
except ValueError:
pass
# Return as string
return (key, value)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Generate execution plan from recipe")
parser.add_argument("recipe", type=Path, help="Recipe file (.sexp)")
parser.add_argument("-o", "--output", type=Path, help="Output file (default: stdout)")
parser.add_argument("-a", "--analysis", type=Path, help="Pre-computed analysis file (.sexp)")
parser.add_argument("-p", "--param", action="append", dest="params", metavar="KEY=VALUE",
help="Set recipe parameter (can be used multiple times)")
parser.add_argument("--json", action="store_true", help="Output JSON format")
parser.add_argument("--text", action="store_true", help="Output human-readable text format")
args = parser.parse_args()
if not args.recipe.exists():
print(f"Recipe not found: {args.recipe}", file=sys.stderr)
sys.exit(1)
if args.analysis and str(args.analysis) != "-" and not args.analysis.exists():
print(f"Analysis file not found: {args.analysis}", file=sys.stderr)
sys.exit(1)
# Parse parameters
params = {}
if args.params:
for param_str in args.params:
try:
key, value = parse_param(param_str)
params[key] = value
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
if args.json:
fmt = "json"
elif args.text:
fmt = "text"
else:
fmt = "sexp"
plan_recipe(args.recipe, fmt, args.output, args.analysis, params or None)

122
run-effect.sh Normal file
View File

@@ -0,0 +1,122 @@
#!/bin/bash
# Run a single effect by number (0-42)
# Usage: ./run-effect.sh <effect-number>
#
# Note: For effects with simple numeric params, you can also use native params:
# python3 plan.py recipe-parametric.sexp -p strength=5 -p amount=30 | python3 execute.py - -d . -o output.mp4
EFFECT_NUM=${1:-0}
# Effect definitions array
EFFECTS=(
"(effect invert)"
"(effect grayscale)"
"(effect sepia)"
"(effect brightness :amount 30)"
"(effect contrast :amount 1.5)"
"(effect saturation :amount 2.0)"
"(effect hue_shift :degrees 90)"
"(effect color_cycle :speed 2)"
"(effect threshold :level 128)"
"(effect posterize :levels 6)"
"(effect blur :radius 10)"
"(effect sharpen :amount 2)"
"(effect bloom :intensity 0.6 :radius 20)"
"(effect color-adjust :brightness 20 :contrast 1.2)"
"(effect swirl :strength 3)"
"(effect fisheye :strength 0.5)"
"(effect wave :amplitude 30 :wavelength 60)"
"(effect ripple :amplitude 20 :frequency 6)"
"(effect kaleidoscope :segments 6 :rotation_speed 30)"
"(effect zoom :factor 1.2)"
"(effect rotate :angle 15)"
"(effect mirror :direction \"horizontal\")"
"(effect pixelate :block_size 16)"
"(effect ascii_art :char_size 8 :color_mode \"color\")"
"(effect ascii_zones :char_size 10)"
"(effect edge_detect :low 50 :high 150)"
"(effect emboss :strength 1.5)"
"(effect outline :thickness 2)"
"(effect neon_glow :glow_radius 20 :glow_intensity 2)"
"(effect crt :line_spacing 3 :vignette_amount 0.3)"
"(effect scanlines :spacing 3 :intensity 0.4)"
"(effect film_grain :intensity 0.25)"
"(effect vignette :strength 0.6)"
"(effect noise :amount 40)"
"(effect rgb_split :offset_x 20)"
"(effect echo :num_echoes 4 :decay 0.5)"
"(effect trails :persistence 0.7)"
"(effect strobe :frequency 4)"
"(effect flip :direction \"horizontal\")"
"(effect tile_grid :rows 2 :cols 2)"
"(effect pixelsort :threshold_low 30 :threshold_high 220)"
"(effect datamosh :corruption 0.5 :block_size 24)"
)
if [ "$EFFECT_NUM" -lt 0 ] || [ "$EFFECT_NUM" -ge ${#EFFECTS[@]} ]; then
echo "Effect number must be 0-$((${#EFFECTS[@]}-1))"
exit 1
fi
EFFECT="${EFFECTS[$EFFECT_NUM]}"
echo "Running effect $EFFECT_NUM: $EFFECT"
# Create temp recipe with selected effect
cat > /tmp/recipe-temp.sexp << EOF
(recipe "effect-test"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ascii_art :path "sexp_effects/effects/ascii_art.sexp")
(effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp")
(effect bloom :path "sexp_effects/effects/bloom.sexp")
(effect blur :path "sexp_effects/effects/blur.sexp")
(effect brightness :path "sexp_effects/effects/brightness.sexp")
(effect color-adjust :path "sexp_effects/effects/color-adjust.sexp")
(effect color_cycle :path "sexp_effects/effects/color_cycle.sexp")
(effect contrast :path "sexp_effects/effects/contrast.sexp")
(effect crt :path "sexp_effects/effects/crt.sexp")
(effect datamosh :path "sexp_effects/effects/datamosh.sexp")
(effect echo :path "sexp_effects/effects/echo.sexp")
(effect edge_detect :path "sexp_effects/effects/edge_detect.sexp")
(effect emboss :path "sexp_effects/effects/emboss.sexp")
(effect film_grain :path "sexp_effects/effects/film_grain.sexp")
(effect fisheye :path "sexp_effects/effects/fisheye.sexp")
(effect flip :path "sexp_effects/effects/flip.sexp")
(effect grayscale :path "sexp_effects/effects/grayscale.sexp")
(effect hue_shift :path "sexp_effects/effects/hue_shift.sexp")
(effect invert :path "sexp_effects/effects/invert.sexp")
(effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp")
(effect mirror :path "sexp_effects/effects/mirror.sexp")
(effect neon_glow :path "sexp_effects/effects/neon_glow.sexp")
(effect noise :path "sexp_effects/effects/noise.sexp")
(effect outline :path "sexp_effects/effects/outline.sexp")
(effect pixelate :path "sexp_effects/effects/pixelate.sexp")
(effect pixelsort :path "sexp_effects/effects/pixelsort.sexp")
(effect posterize :path "sexp_effects/effects/posterize.sexp")
(effect rgb_split :path "sexp_effects/effects/rgb_split.sexp")
(effect ripple :path "sexp_effects/effects/ripple.sexp")
(effect rotate :path "sexp_effects/effects/rotate.sexp")
(effect saturation :path "sexp_effects/effects/saturation.sexp")
(effect scanlines :path "sexp_effects/effects/scanlines.sexp")
(effect sepia :path "sexp_effects/effects/sepia.sexp")
(effect sharpen :path "sexp_effects/effects/sharpen.sexp")
(effect strobe :path "sexp_effects/effects/strobe.sexp")
(effect swirl :path "sexp_effects/effects/swirl.sexp")
(effect threshold :path "sexp_effects/effects/threshold.sexp")
(effect tile_grid :path "sexp_effects/effects/tile_grid.sexp")
(effect trails :path "sexp_effects/effects/trails.sexp")
(effect vignette :path "sexp_effects/effects/vignette.sexp")
(effect wave :path "sexp_effects/effects/wave.sexp")
(effect zoom :path "sexp_effects/effects/zoom.sexp")
(def video (source :path "monday.webm"))
(def audio (source :path "dizzy.mp3"))
(def clip (-> video (segment :start 0 :duration 10)))
(def audio-clip (-> audio (segment :start 0 :duration 10)))
(def result (-> clip $EFFECT))
(mux result audio-clip))
EOF
python3 plan.py /tmp/recipe-temp.sexp | python3 execute.py - -d . -o "effect-${EFFECT_NUM}.mp4"
echo "Output: effect-${EFFECT_NUM}.mp4"

7
run-file.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
# Run the full pipeline: analyze -> plan -> execute -> play
# Usage: ./run.sh recipe.sexp
RECIPE="${1:-recipe-bound.sexp}"
python analyze.py "$RECIPE" | python plan.py "$RECIPE" -a - | python execute.py - -d "$(dirname "$RECIPE")" -o output.mp4

127
run.py Executable file
View File

@@ -0,0 +1,127 @@
#!/usr/bin/env python3
"""
Run a recipe: plan then execute.
This is a convenience wrapper that:
1. Generates a plan (runs analyzers, expands SLICE_ON)
2. Executes the plan (produces video output)
"""
import json
import sys
import tempfile
from pathlib import Path
# Add artdag to path
sys.path.insert(0, str(Path(__file__).parent.parent / "artdag"))
from artdag.sexp import compile_string
from artdag.sexp.planner import create_plan
from artdag.sexp.parser import Binding
# Import execute functionality
from execute import execute_plan
class PlanEncoder(json.JSONEncoder):
"""JSON encoder that handles Binding objects."""
def default(self, obj):
if isinstance(obj, Binding):
return {
"_bind": obj.analysis_ref,
"range_min": obj.range_min,
"range_max": obj.range_max,
}
return super().default(obj)
def run_recipe(recipe_path: Path, output_path: Path = None):
"""Run a recipe file: plan then execute."""
recipe_text = recipe_path.read_text()
recipe_dir = recipe_path.parent
print(f"=== COMPILE ===")
print(f"Recipe: {recipe_path}")
compiled = compile_string(recipe_text)
print(f"Name: {compiled.name} v{compiled.version}")
print(f"Nodes: {len(compiled.nodes)}")
# Track analysis results
analysis_data = {}
def on_analysis(node_id, results):
analysis_data[node_id] = results
times = results.get("times", [])
print(f" Analysis: {len(times)} beat times @ {results.get('tempo', 0):.1f} BPM")
# Generate plan
print(f"\n=== PLAN ===")
plan = create_plan(
compiled,
inputs={},
recipe_dir=recipe_dir,
on_analysis=on_analysis,
)
print(f"Plan ID: {plan.plan_id[:16]}...")
print(f"Steps: {len(plan.steps)}")
# Write plan to temp file for execute
plan_dict = {
"plan_id": plan.plan_id,
"recipe_id": compiled.name,
"recipe_hash": plan.recipe_hash,
"encoding": compiled.encoding,
"output_step_id": plan.output_step_id,
"steps": [],
}
for step in plan.steps:
step_dict = {
"step_id": step.step_id,
"node_type": step.node_type,
"config": step.config,
"inputs": step.inputs,
"level": step.level,
"cache_id": step.cache_id,
}
if step.node_type == "ANALYZE" and step.step_id in analysis_data:
step_dict["config"]["analysis_results"] = analysis_data[step.step_id]
plan_dict["steps"].append(step_dict)
# Save plan
work_dir = Path(tempfile.mkdtemp(prefix="artdag_run_"))
plan_file = work_dir / "plan.json"
with open(plan_file, "w") as f:
json.dump(plan_dict, f, indent=2, cls=PlanEncoder)
print(f"Plan saved: {plan_file}")
# Execute plan
print(f"\n=== EXECUTE ===")
result = execute_plan(plan_file, output_path, recipe_dir)
print(f"\n=== DONE ===")
print(f"Output: {result}")
return result
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: run.py <recipe.sexp> [output.mp4]")
print()
print("Commands:")
print(" run.py <recipe> - Plan and execute recipe")
print(" plan.py <recipe> - Generate plan only")
print(" execute.py <plan> - Execute pre-generated plan")
sys.exit(1)
recipe_path = Path(sys.argv[1])
output_path = Path(sys.argv[2]) if len(sys.argv) > 2 else None
if not recipe_path.exists():
print(f"Recipe not found: {recipe_path}")
sys.exit(1)
run_recipe(recipe_path, output_path)

7
run.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
# Run the full pipeline: analyze -> plan -> execute -> play
# Usage: ./run.sh recipe.sexp
RECIPE="${1:-recipe-bound.sexp}"
python3 analyze.py "$RECIPE" | python plan.py "$RECIPE" -a - | python execute.py - -d "$(dirname "$RECIPE")" | xargs mpv --fs

528
run_staged.py Normal file
View File

@@ -0,0 +1,528 @@
#!/usr/bin/env python3
"""
Run a staged recipe through analyze -> plan -> execute pipeline.
This script demonstrates stage-level caching: analysis stages can be
skipped on re-run if the inputs haven't changed.
Usage:
python3 run_staged.py recipe.sexp [-o output.mp4]
python3 run_staged.py effects/ascii_art_staged.sexp -o ascii_out.mp4
The script:
1. Compiles the recipe and extracts stage information
2. For each stage in topological order:
- Check stage cache (skip if hit)
- Run stage (analyze, plan, execute)
- Cache stage outputs
3. Produce final output
"""
import os
import sys
import json
import tempfile
import shutil
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Any
# Add artdag to path
sys.path.insert(0, str(Path(__file__).parent.parent / "artdag"))
from artdag.sexp import compile_string, parse
from artdag.sexp.parser import Symbol, Keyword, serialize
from artdag.sexp.planner import create_plan
# Import unified cache
import cache as unified_cache
import hashlib
def _cache_analysis_tracks(plan):
"""Cache each analysis track individually, replace data with cache-id refs."""
import json as _json
for name, data in plan.analysis.items():
json_str = _json.dumps(data, sort_keys=True)
content_cid = hashlib.sha256(json_str.encode()).hexdigest()
unified_cache.cache_store_json(content_cid, data)
plan.analysis[name] = {"_cache_id": content_cid}
def _resolve_analysis_refs(analysis_dict):
"""Resolve cache-id refs back to full analysis data."""
resolved = {}
for name, data in analysis_dict.items():
if isinstance(data, dict) and "_cache_id" in data:
loaded = unified_cache.cache_get_json(data["_cache_id"])
if loaded:
resolved[name] = loaded
else:
resolved[name] = data
return resolved
def run_staged_recipe(
recipe_path: Path,
output_path: Optional[Path] = None,
cache_dir: Optional[Path] = None,
params: Optional[Dict[str, Any]] = None,
verbose: bool = True,
force_replan: bool = False,
) -> Path:
"""
Run a staged recipe with stage-level caching.
Args:
recipe_path: Path to the .sexp recipe file
output_path: Optional output file path
cache_dir: Optional cache directory for stage results
params: Optional parameter overrides
verbose: Print progress information
Returns:
Path to the final output file
"""
recipe_text = recipe_path.read_text()
recipe_dir = recipe_path.parent
# Use unified cache
content_cache_dir = unified_cache.get_content_dir()
def log(msg: str):
if verbose:
print(msg, file=sys.stderr)
# Store recipe source by CID
recipe_cid, _ = unified_cache.content_store_string(recipe_text)
log(f"Recipe CID: {recipe_cid[:16]}...")
# Compile recipe
log(f"Compiling: {recipe_path}")
compiled = compile_string(recipe_text, params, recipe_dir=recipe_dir)
log(f"Recipe: {compiled.name} v{compiled.version}")
log(f"Nodes: {len(compiled.nodes)}")
# Store effects by CID
for effect_name, effect_info in compiled.registry.get("effects", {}).items():
effect_path = effect_info.get("path")
effect_cid = effect_info.get("cid")
if effect_path and effect_cid:
effect_file = Path(effect_path)
if effect_file.exists():
stored_cid, _ = unified_cache.content_store_file(effect_file)
if stored_cid == effect_cid:
log(f"Effect '{effect_name}' CID: {effect_cid[:16]}...")
else:
log(f"Warning: Effect '{effect_name}' CID mismatch")
# Store analyzers by CID
for analyzer_name, analyzer_info in compiled.registry.get("analyzers", {}).items():
analyzer_path = analyzer_info.get("path")
analyzer_cid = analyzer_info.get("cid")
if analyzer_path:
analyzer_file = Path(analyzer_path) if Path(analyzer_path).is_absolute() else recipe_dir / analyzer_path
if analyzer_file.exists():
stored_cid, _ = unified_cache.content_store_file(analyzer_file)
log(f"Analyzer '{analyzer_name}' CID: {stored_cid[:16]}...")
# Store included files by CID
for include_path, include_cid in compiled.registry.get("includes", {}).items():
include_file = Path(include_path)
if include_file.exists():
stored_cid, _ = unified_cache.content_store_file(include_file)
if stored_cid == include_cid:
log(f"Include '{include_file.name}' CID: {include_cid[:16]}...")
else:
log(f"Warning: Include '{include_file.name}' CID mismatch")
# Check for stages
if not compiled.stages:
log("No stages found - running as regular recipe")
return _run_non_staged(compiled, recipe_dir, output_path, verbose)
log(f"\nStages: {len(compiled.stages)}")
log(f"Stage order: {compiled.stage_order}")
# Display stage info
for stage in compiled.stages:
log(f"\n Stage: {stage.name}")
log(f" Requires: {stage.requires or '(none)'}")
log(f" Inputs: {stage.inputs or '(none)'}")
log(f" Outputs: {stage.outputs}")
# Create plan with analysis
log("\n--- Planning ---")
analysis_data = {}
def on_analysis(node_id: str, results: dict):
analysis_data[node_id] = results
times = results.get("times", [])
log(f" Analysis complete: {node_id[:16]}... ({len(times)} times)")
# Check for cached plan using unified cache
plan_cid = unified_cache.plan_exists(recipe_cid, params)
if plan_cid and not force_replan:
plan_cache_path = unified_cache.plan_get_path(recipe_cid, params)
log(f"\nFound cached plan: {plan_cid[:16]}...")
plan_sexp_str = unified_cache.plan_load(recipe_cid, params)
# Parse the cached plan
from execute import parse_plan_input
plan_dict = parse_plan_input(plan_sexp_str)
# Resolve cache-id refs in plan's embedded analysis
if "analysis" in plan_dict:
plan_dict["analysis"] = _resolve_analysis_refs(plan_dict["analysis"])
# Load analysis data from unified cache
analysis_data = {}
for step in plan_dict.get("steps", []):
if step.get("node_type") == "ANALYZE":
step_id = step.get("step_id")
cached_analysis = unified_cache.cache_get_json(step_id)
if cached_analysis:
analysis_data[step_id] = cached_analysis
log(f" Loaded analysis: {step_id[:16]}...")
log(f"Plan ID: {plan_dict.get('plan_id', 'unknown')[:16]}...")
log(f"Steps: {len(plan_dict.get('steps', []))}")
log(f"Analysis tracks: {list(analysis_data.keys())}")
# Execute directly from cached plan
log("\n--- Execution (from cached plan) ---")
from execute import execute_plan
result_path = execute_plan(
plan_path=plan_cache_path,
output_path=output_path,
recipe_dir=recipe_dir,
external_analysis=analysis_data,
cache_dir=content_cache_dir,
)
log(f"\n--- Complete ---")
log(f"Output: {result_path}")
return result_path
# No cached plan - create new one
plan = create_plan(
compiled,
inputs={},
recipe_dir=recipe_dir,
on_analysis=on_analysis,
)
log(f"\nPlan ID: {plan.plan_id[:16]}...")
log(f"Steps: {len(plan.steps)}")
log(f"Analysis tracks: {list(analysis_data.keys())}")
# Cache analysis tracks individually and replace with cache-id refs
_cache_analysis_tracks(plan)
# Save plan to unified cache
plan_sexp_str = plan.to_string(pretty=True)
plan_cache_id, plan_cid, plan_cache_path = unified_cache.plan_store(recipe_cid, params, plan_sexp_str)
log(f"Saved plan: {plan_cache_id[:16]}... → {plan_cid[:16]}...")
# Execute the plan using execute.py logic
log("\n--- Execution ---")
from execute import execute_plan
# Resolve cache-id refs back to full data for execution
resolved_analysis = _resolve_analysis_refs(plan.analysis)
plan_dict = {
"plan_id": plan.plan_id,
"source_hash": plan.source_hash,
"encoding": compiled.encoding,
"output_step_id": plan.output_step_id,
"analysis": {**resolved_analysis, **analysis_data},
"effects_registry": plan.effects_registry,
"minimal_primitives": plan.minimal_primitives,
"steps": [],
}
for step in plan.steps:
step_dict = {
"step_id": step.step_id,
"node_type": step.node_type,
"config": step.config,
"inputs": step.inputs,
"level": step.level,
"cache_id": step.cache_id,
}
# Tag with stage info if present
if step.stage:
step_dict["stage"] = step.stage
plan_dict["steps"].append(step_dict)
# Execute using unified cache
result_path = execute_plan(
plan_path=None,
output_path=output_path,
recipe_dir=recipe_dir,
plan_data=plan_dict,
external_analysis=analysis_data,
cache_dir=content_cache_dir,
)
log(f"\n--- Complete ---")
log(f"Output: {result_path}")
return result_path
def _run_non_staged(compiled, recipe_dir: Path, output_path: Optional[Path], verbose: bool) -> Path:
"""Run a non-staged recipe using the standard pipeline."""
from execute import execute_plan
from plan import plan_recipe
# This is a fallback for recipes without stages
# Just run through regular plan -> execute
raise NotImplementedError("Non-staged recipes should use plan.py | execute.py")
def list_cache(verbose: bool = False):
"""List all cached items using the unified cache."""
unified_cache.print_cache_listing(verbose)
def list_params(recipe_path: Path):
"""List available parameters for a recipe and its effects."""
from artdag.sexp import parse
from artdag.sexp.parser import Symbol, Keyword
from artdag.sexp.compiler import _parse_params
from artdag.sexp.effect_loader import load_sexp_effect_file
recipe_text = recipe_path.read_text()
sexp = parse(recipe_text)
if isinstance(sexp, list) and len(sexp) == 1:
sexp = sexp[0]
# Find recipe name
recipe_name = sexp[1] if len(sexp) > 1 and isinstance(sexp[1], str) else recipe_path.stem
# Find :params block and effect declarations
recipe_params = []
effect_declarations = {} # name -> path
i = 2
while i < len(sexp):
item = sexp[i]
if isinstance(item, Keyword) and item.name == "params":
if i + 1 < len(sexp):
recipe_params = _parse_params(sexp[i + 1])
i += 2
elif isinstance(item, list) and item:
# Check for effect declaration: (effect name :path "...")
if isinstance(item[0], Symbol) and item[0].name == "effect":
if len(item) >= 2:
effect_name = item[1].name if isinstance(item[1], Symbol) else item[1]
# Find :path
j = 2
while j < len(item):
if isinstance(item[j], Keyword) and item[j].name == "path":
if j + 1 < len(item):
effect_declarations[effect_name] = item[j + 1]
break
j += 1
i += 1
else:
i += 1
# Load effect params
effect_params = {} # effect_name -> list of ParamDef
recipe_dir = recipe_path.parent
for effect_name, effect_rel_path in effect_declarations.items():
effect_path = recipe_dir / effect_rel_path
if effect_path.exists() and effect_path.suffix == ".sexp":
try:
_, _, _, param_defs = load_sexp_effect_file(effect_path)
if param_defs:
effect_params[effect_name] = param_defs
except Exception as e:
print(f"Warning: Could not load params from effect {effect_name}: {e}", file=sys.stderr)
# Print results
def print_params(params, header_prefix=""):
print(f"{header_prefix}{'Name':<20} {'Type':<8} {'Default':<12} {'Range/Choices':<20} Description")
print(f"{header_prefix}{'-' * 88}")
for p in params:
range_str = ""
if p.range_min is not None and p.range_max is not None:
range_str = f"[{p.range_min}, {p.range_max}]"
elif p.choices:
range_str = ", ".join(p.choices[:3])
if len(p.choices) > 3:
range_str += "..."
default_str = str(p.default) if p.default is not None else "-"
if len(default_str) > 10:
default_str = default_str[:9] + ""
print(f"{header_prefix}{p.name:<20} {p.param_type:<8} {default_str:<12} {range_str:<20} {p.description}")
if recipe_params:
print(f"\nRecipe parameters for '{recipe_name}':\n")
print_params(recipe_params)
else:
print(f"\nRecipe '{recipe_name}' has no declared parameters.")
if effect_params:
for effect_name, params in effect_params.items():
print(f"\n\nEffect '{effect_name}' parameters:\n")
print_params(params)
if not recipe_params and not effect_params:
print("\nParameters can be declared using :params block:")
print("""
:params (
(color_mode :type string :default "color" :desc "Character color")
(char_size :type int :default 12 :range [4 32] :desc "Cell size")
)
""")
return
print("\n\nUsage:")
print(f" python3 run_staged.py {recipe_path} -p <name>=<value> [-p <name>=<value> ...]")
print(f"\nExample:")
all_params = recipe_params + [p for params in effect_params.values() for p in params]
if all_params:
p = all_params[0]
example_val = p.default if p.default else ("value" if p.param_type == "string" else "1")
print(f" python3 run_staged.py {recipe_path} -p {p.name}={example_val}")
def main():
import argparse
parser = argparse.ArgumentParser(
description="Run a staged recipe with stage-level caching",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
python3 run_staged.py effects/ascii_art_fx_staged.sexp --list-params
python3 run_staged.py effects/ascii_art_fx_staged.sexp -o output.mp4
python3 run_staged.py recipe.sexp -p color_mode=lime -p char_jitter=5
"""
)
parser.add_argument("recipe", type=Path, nargs="?", help="Recipe file (.sexp)")
parser.add_argument("-o", "--output", type=Path, help="Output file path")
parser.add_argument("-p", "--param", action="append", dest="params",
metavar="KEY=VALUE", help="Set recipe parameter")
parser.add_argument("-q", "--quiet", action="store_true", help="Suppress progress output")
parser.add_argument("--list-params", action="store_true", help="List available parameters and exit")
parser.add_argument("--list-cache", action="store_true", help="List cached items and exit")
parser.add_argument("--no-cache", action="store_true", help="Ignore cached plan, force re-planning")
parser.add_argument("--show-plan", action="store_true", help="Show the plan S-expression and exit (don't execute)")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument("-j", "--jobs", type=int, default=None,
help="Max parallel workers (default: 4, or ARTDAG_WORKERS env)")
parser.add_argument("--pipelines", type=int, default=None,
help="Max concurrent video pipelines (default: 1, or ARTDAG_VIDEO_PIPELINES env)")
args = parser.parse_args()
# Apply concurrency limits before any execution
if args.jobs is not None:
os.environ["ARTDAG_WORKERS"] = str(args.jobs)
if args.pipelines is not None:
os.environ["ARTDAG_VIDEO_PIPELINES"] = str(args.pipelines)
from execute import set_max_video_pipelines
set_max_video_pipelines(args.pipelines)
# List cache mode - doesn't require recipe
if args.list_cache:
list_cache(verbose=args.verbose)
sys.exit(0)
# All other modes require a recipe
if not args.recipe:
print("Error: recipe file required", file=sys.stderr)
sys.exit(1)
if not args.recipe.exists():
print(f"Recipe not found: {args.recipe}", file=sys.stderr)
sys.exit(1)
# List params mode
if args.list_params:
list_params(args.recipe)
sys.exit(0)
# Parse parameters
params = {}
if args.params:
for param_str in args.params:
if "=" not in param_str:
print(f"Invalid parameter format: {param_str}", file=sys.stderr)
sys.exit(1)
key, value = param_str.split("=", 1)
# Try to parse as number
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass # Keep as string
params[key] = value
# Show plan mode - generate plan and display without executing
if args.show_plan:
recipe_text = args.recipe.read_text()
recipe_dir = args.recipe.parent
# Compute recipe CID (content hash)
recipe_cid, _ = unified_cache.content_store_string(recipe_text)
compiled = compile_string(recipe_text, params if params else None, recipe_dir=recipe_dir)
# Check for cached plan using unified cache (keyed by source CID + params)
plan_cid = unified_cache.plan_exists(recipe_cid, params if params else None)
if plan_cid and not args.no_cache:
print(f";; Cached plan CID: {plan_cid}", file=sys.stderr)
plan_sexp_str = unified_cache.plan_load(recipe_cid, params if params else None)
print(plan_sexp_str)
else:
print(f";; Generating new plan...", file=sys.stderr)
analysis_data = {}
def on_analysis(node_id: str, results: dict):
analysis_data[node_id] = results
plan = create_plan(
compiled,
inputs={},
recipe_dir=recipe_dir,
on_analysis=on_analysis,
)
# Cache analysis tracks individually before serialization
_cache_analysis_tracks(plan)
plan_sexp_str = plan.to_string(pretty=True)
# Save to unified cache
cache_id, plan_cid, plan_path = unified_cache.plan_store(recipe_cid, params if params else None, plan_sexp_str)
print(f";; Saved: {cache_id[:16]}... → {plan_cid}", file=sys.stderr)
print(plan_sexp_str)
sys.exit(0)
result = run_staged_recipe(
recipe_path=args.recipe,
output_path=args.output,
params=params if params else None,
verbose=not args.quiet,
force_replan=args.no_cache,
)
# Print final output path
print(result)
if __name__ == "__main__":
main()

32
sexp_effects/__init__.py Normal file
View File

@@ -0,0 +1,32 @@
"""
S-Expression Effects System
Safe, shareable effects defined in S-expressions.
"""
from .parser import parse, parse_file, Symbol, Keyword
from .interpreter import (
Interpreter,
get_interpreter,
load_effect,
load_effects_dir,
run_effect,
list_effects,
make_process_frame,
)
from .primitives import PRIMITIVES
__all__ = [
'parse',
'parse_file',
'Symbol',
'Keyword',
'Interpreter',
'get_interpreter',
'load_effect',
'load_effects_dir',
'run_effect',
'list_effects',
'make_process_frame',
'PRIMITIVES',
]

View File

@@ -0,0 +1,17 @@
;; ASCII Art effect - converts image to ASCII characters
(require-primitives "ascii")
(define-effect ascii_art
:params (
(char_size :type int :default 8 :range [4 32])
(alphabet :type string :default "standard")
(color_mode :type string :default "color" :desc ""color", "mono", "invert", or any color name/hex")
(background_color :type string :default "black" :desc "background color name/hex")
(invert_colors :type int :default 0 :desc "swap foreground and background colors")
(contrast :type float :default 1.5 :range [1 3])
)
(let* ((sample (cell-sample frame char_size))
(colors (nth sample 0))
(luminances (nth sample 1))
(chars (luminance-to-chars luminances alphabet contrast)))
(render-char-grid frame chars colors char_size color_mode background_color invert_colors)))

View File

@@ -0,0 +1,52 @@
;; ASCII Art FX - converts image to ASCII characters with per-character effects
(require-primitives "ascii")
(define-effect ascii_art_fx
:params (
;; Basic parameters
(char_size :type int :default 8 :range [4 32]
:desc "Size of each character cell in pixels")
(alphabet :type string :default "standard"
:desc "Character set to use")
(color_mode :type string :default "color"
:choices [color mono invert]
:desc "Color mode: color, mono, invert, or any color name/hex")
(background_color :type string :default "black"
:desc "Background color name or hex value")
(invert_colors :type int :default 0 :range [0 1]
:desc "Swap foreground and background colors (0/1)")
(contrast :type float :default 1.5 :range [1 3]
:desc "Character selection contrast")
;; Per-character effects
(char_jitter :type float :default 0 :range [0 20]
:desc "Position jitter amount in pixels")
(char_scale :type float :default 1.0 :range [0.5 2.0]
:desc "Character scale factor")
(char_rotation :type float :default 0 :range [0 180]
:desc "Rotation amount in degrees")
(char_hue_shift :type float :default 0 :range [0 360]
:desc "Hue shift in degrees")
;; Modulation sources
(jitter_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives jitter modulation")
(scale_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives scale modulation")
(rotation_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives rotation modulation")
(hue_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives hue shift modulation")
)
(let* ((sample (cell-sample frame char_size))
(colors (nth sample 0))
(luminances (nth sample 1))
(chars (luminance-to-chars luminances alphabet contrast)))
(render-char-grid-fx frame chars colors luminances char_size
color_mode background_color invert_colors
char_jitter char_scale char_rotation char_hue_shift
jitter_source scale_source rotation_source hue_source)))

View File

@@ -0,0 +1,102 @@
;; Composable ASCII Art with Per-Zone Expression-Driven Effects
;; Requires ascii primitive library for the ascii-fx-zone primitive
(require-primitives "ascii")
;; Two modes of operation:
;;
;; 1. EXPRESSION MODE: Use zone-* variables in expression parameters
;; Zone variables available:
;; zone-row, zone-col: Grid position (integers)
;; zone-row-norm, zone-col-norm: Normalized position (0-1)
;; zone-lum: Cell luminance (0-1)
;; zone-sat: Cell saturation (0-1)
;; zone-hue: Cell hue (0-360)
;; zone-r, zone-g, zone-b: RGB components (0-1)
;;
;; Example:
;; (ascii-fx-zone frame
;; :cols 80
;; :char_hue (* zone-lum 180)
;; :char_rotation (* zone-col-norm 30))
;;
;; 2. CELL EFFECT MODE: Pass a lambda to apply arbitrary effects per-cell
;; The lambda receives (cell-image zone-dict) and returns modified cell.
;; Zone dict contains: row, col, row-norm, col-norm, lum, sat, hue, r, g, b,
;; char, color, cell_size, plus any bound analysis values.
;;
;; Any loaded sexp effect can be called on cells - each cell is just a small frame:
;; (blur cell radius) - Gaussian blur
;; (rotate cell angle) - Rotate by angle degrees
;; (brightness cell factor) - Adjust brightness
;; (contrast cell factor) - Adjust contrast
;; (saturation cell factor) - Adjust saturation
;; (hue_shift cell degrees) - Shift hue
;; (rgb_split cell offset_x offset_y) - RGB channel split
;; (invert cell) - Invert colors
;; (pixelate cell block_size) - Pixelate
;; (wave cell amplitude freq) - Wave distortion
;; ... and any other loaded effect
;;
;; Example:
;; (ascii-fx-zone frame
;; :cols 60
;; :cell_effect (lambda [cell zone]
;; (blur (rotate cell (* (get zone "energy") 45))
;; (if (> (get zone "lum") 0.5) 3 0))))
(define-effect ascii_fx_zone
:params (
(cols :type int :default 80 :range [20 200]
:desc "Number of character columns")
(char_size :type int :default nil :range [4 32]
:desc "Character cell size in pixels (overrides cols if set)")
(alphabet :type string :default "standard"
:desc "Character set: standard, blocks, simple, digits, or custom string")
(color_mode :type string :default "color"
:desc "Color mode: color, mono, invert, or any color name/hex")
(background :type string :default "black"
:desc "Background color name or hex value")
(contrast :type float :default 1.5 :range [0.5 3.0]
:desc "Contrast for character selection")
(char_hue :type any :default nil
:desc "Hue shift expression (evaluated per-zone with zone-* vars)")
(char_saturation :type any :default nil
:desc "Saturation multiplier expression (1.0 = unchanged)")
(char_brightness :type any :default nil
:desc "Brightness multiplier expression (1.0 = unchanged)")
(char_scale :type any :default nil
:desc "Character scale expression (1.0 = normal size)")
(char_rotation :type any :default nil
:desc "Character rotation expression (degrees)")
(char_jitter :type any :default nil
:desc "Position jitter expression (pixels)")
(cell_effect :type any :default nil
:desc "Lambda (cell zone) -> cell for arbitrary per-cell effects")
;; Convenience params for staged recipes (avoids compile-time expression issues)
(energy :type float :default nil
:desc "Energy multiplier (0-1) from audio analysis bind")
(rotation_scale :type float :default 0
:desc "Max rotation at top-right when energy=1 (degrees)")
)
;; The ascii-fx-zone special form handles expression params
;; If energy + rotation_scale provided, it builds: energy * scale * position_factor
;; where position_factor = 0 at bottom-left, 3 at top-right
;; If cell_effect provided, each character is rendered to a cell image,
;; passed to the lambda, and the result composited back
(ascii-fx-zone frame
:cols cols
:char_size char_size
:alphabet alphabet
:color_mode color_mode
:background background
:contrast contrast
:char_hue char_hue
:char_saturation char_saturation
:char_brightness char_brightness
:char_scale char_scale
:char_rotation char_rotation
:char_jitter char_jitter
:cell_effect cell_effect
:energy energy
:rotation_scale rotation_scale))

View File

@@ -0,0 +1,30 @@
;; ASCII Zones effect - different character sets for different brightness zones
;; Dark areas use simple chars, mid uses standard, bright uses blocks
(require-primitives "ascii")
(define-effect ascii_zones
:params (
(char_size :type int :default 8 :range [4 32])
(dark_threshold :type int :default 80 :range [0 128])
(bright_threshold :type int :default 180 :range [128 255])
(color_mode :type string :default "color")
)
(let* ((sample (cell-sample frame char_size))
(colors (nth sample 0))
(luminances (nth sample 1))
;; Start with simple chars as base
(base-chars (luminance-to-chars luminances "simple" 1.2))
;; Map each cell to appropriate alphabet based on brightness zone
(zoned-chars (map-char-grid base-chars luminances
(lambda (r c ch lum)
(cond
;; Bright zones: use block characters
((> lum bright_threshold)
(alphabet-char "blocks" (floor (/ (- lum bright_threshold) 15))))
;; Dark zones: use simple sparse chars
((< lum dark_threshold)
(alphabet-char " .-" (floor (/ lum 30))))
;; Mid zones: use standard ASCII
(else
(alphabet-char "standard" (floor (/ lum 4)))))))))
(render-char-grid frame zoned-chars colors char_size color_mode (list 0 0 0))))

View File

@@ -0,0 +1,31 @@
;; Blend effect - combines two video frames
;; Streaming-compatible: frame is background, overlay is second frame
;; Usage: (blend background overlay :opacity 0.5 :mode "alpha")
;;
;; Params:
;; mode - blend mode (add, multiply, screen, overlay, difference, lighten, darken, alpha)
;; opacity - blend amount (0-1)
(require-primitives "image" "blending" "core")
(define-effect blend
:params (
(overlay :type frame :default nil)
(mode :type string :default "alpha")
(opacity :type float :default 0.5)
)
(if (core:is-nil overlay)
frame
(let [a frame
b overlay
a-h (image:height a)
a-w (image:width a)
b-h (image:height b)
b-w (image:width b)
;; Resize b to match a if needed
b-sized (if (and (= a-w b-w) (= a-h b-h))
b
(image:resize b a-w a-h "linear"))]
(if (= mode "alpha")
(blending:blend-images a b-sized opacity)
(blending:blend-images a (blending:blend-mode a b-sized mode) opacity)))))

View File

@@ -0,0 +1,58 @@
;; N-way weighted blend effect
;; Streaming-compatible: pass inputs as a list of frames
;; Usage: (blend_multi :inputs [(read a) (read b) (read c)] :weights [0.3 0.4 0.3])
;;
;; Parameters:
;; inputs - list of N frames to blend
;; weights - list of N floats, one per input (resolved per-frame)
;; mode - blend mode applied when folding each frame in:
;; "alpha" — pure weighted average (default)
;; "multiply" — darken by multiplication
;; "screen" — lighten (inverse multiply)
;; "overlay" — contrast-boosting midtone blend
;; "soft-light" — gentle dodge/burn
;; "hard-light" — strong dodge/burn
;; "color-dodge" — brightens towards white
;; "color-burn" — darkens towards black
;; "difference" — absolute pixel difference
;; "exclusion" — softer difference
;; "add" — additive (clamped)
;; "subtract" — subtractive (clamped)
;; "darken" — per-pixel minimum
;; "lighten" — per-pixel maximum
;; resize_mode - how to match frame dimensions (fit, crop, stretch)
;;
;; Uses a left-fold over inputs[1..N-1]. At each step the running
;; opacity is: w[i] / (w[0] + w[1] + ... + w[i])
;; which produces the correct normalised weighted result.
(require-primitives "image" "blending")
(define-effect blend_multi
:params (
(inputs :type list :default [])
(weights :type list :default [])
(mode :type string :default "alpha")
(resize_mode :type string :default "fit")
)
(let [n (len inputs)
;; Target dimensions from first frame
target-w (image:width (nth inputs 0))
target-h (image:height (nth inputs 0))
;; Fold over indices 1..n-1
;; Accumulator is (list blended-frame running-weight-sum)
seed (list (nth inputs 0) (nth weights 0))
result (reduce (range 1 n) seed
(lambda (pair i)
(let [acc (nth pair 0)
running (nth pair 1)
w (nth weights i)
new-running (+ running w)
opacity (/ w (max new-running 0.001))
f (image:resize (nth inputs i) target-w target-h "linear")
;; Apply blend mode then mix with opacity
blended (if (= mode "alpha")
(blending:blend-images acc f opacity)
(blending:blend-images acc (blending:blend-mode acc f mode) opacity))]
(list blended new-running))))]
(nth result 0)))

View File

@@ -0,0 +1,16 @@
;; Bloom effect - glow on bright areas
(require-primitives "image" "blending")
(define-effect bloom
:params (
(intensity :type float :default 0.5 :range [0 2])
(threshold :type int :default 200 :range [0 255])
(radius :type int :default 15 :range [1 50])
)
(let* ((bright (map-pixels frame
(lambda (x y c)
(if (> (luminance c) threshold)
c
(rgb 0 0 0)))))
(blurred (image:blur bright radius)))
(blending:blend-mode frame blurred "add")))

View File

@@ -0,0 +1,8 @@
;; Blur effect - gaussian blur
(require-primitives "image")
(define-effect blur
:params (
(radius :type int :default 5 :range [1 50])
)
(image:blur frame (max 1 radius)))

View File

@@ -0,0 +1,9 @@
;; Brightness effect - adjusts overall brightness
;; Uses vectorized adjust primitive for fast processing
(require-primitives "color_ops")
(define-effect brightness
:params (
(amount :type int :default 0 :range [-255 255])
)
(color_ops:adjust-brightness frame amount))

View File

@@ -0,0 +1,13 @@
;; Color adjustment effect - replaces TRANSFORM node
(require-primitives "color_ops")
(define-effect color-adjust
:params (
(brightness :type int :default 0 :range [-255 255] :desc "Brightness adjustment")
(contrast :type float :default 1 :range [0 3] :desc "Contrast multiplier")
(saturation :type float :default 1 :range [0 2] :desc "Saturation multiplier")
)
(-> frame
(color_ops:adjust-brightness brightness)
(color_ops:adjust-contrast contrast)
(color_ops:adjust-saturation saturation)))

View File

@@ -0,0 +1,13 @@
;; Color Cycle effect - animated hue rotation
(require-primitives "color_ops")
(define-effect color_cycle
:params (
(speed :type int :default 1 :range [0 10])
)
(let ((shift (* t speed 360)))
(map-pixels frame
(lambda (x y c)
(let* ((hsv (rgb->hsv c))
(new-h (mod (+ (first hsv) shift) 360)))
(hsv->rgb (list new-h (nth hsv 1) (nth hsv 2))))))))

View File

@@ -0,0 +1,9 @@
;; Contrast effect - adjusts image contrast
;; Uses vectorized adjust primitive for fast processing
(require-primitives "color_ops")
(define-effect contrast
:params (
(amount :type int :default 1 :range [0.5 3])
)
(color_ops:adjust-contrast frame amount))

View File

@@ -0,0 +1,30 @@
;; CRT effect - old monitor simulation
(require-primitives "image")
(define-effect crt
:params (
(line_spacing :type int :default 2 :range [1 10])
(line_opacity :type float :default 0.3 :range [0 1])
(vignette_amount :type float :default 0.2)
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (/ w 2))
(cy (/ h 2))
(max-dist (sqrt (+ (* cx cx) (* cy cy)))))
(map-pixels frame
(lambda (x y c)
(let* (;; Scanline darkening
(scanline-factor (if (= 0 (mod y line_spacing))
(- 1 line_opacity)
1))
;; Vignette
(dx (- x cx))
(dy (- y cy))
(dist (sqrt (+ (* dx dx) (* dy dy))))
(vignette-factor (- 1 (* (/ dist max-dist) vignette_amount)))
;; Combined
(factor (* scanline-factor vignette-factor)))
(rgb (* (red c) factor)
(* (green c) factor)
(* (blue c) factor)))))))

View File

@@ -0,0 +1,14 @@
;; Datamosh effect - glitch block corruption
(define-effect datamosh
:params (
(block_size :type int :default 32 :range [8 128])
(corruption :type float :default 0.3 :range [0 1])
(max_offset :type int :default 50 :range [0 200])
(color_corrupt :type bool :default true)
)
;; Get previous frame from state, or use current frame if none
(let ((prev (state-get "prev_frame" frame)))
(begin
(state-set "prev_frame" (copy frame))
(datamosh frame prev block_size corruption max_offset color_corrupt))))

View File

@@ -0,0 +1,19 @@
;; Echo effect - motion trails using frame buffer
(require-primitives "blending")
(define-effect echo
:params (
(num_echoes :type int :default 4 :range [1 20])
(decay :type float :default 0.5 :range [0 1])
)
(let* ((buffer (state-get 'buffer (list)))
(new-buffer (take (cons frame buffer) (+ num_echoes 1))))
(begin
(state-set 'buffer new-buffer)
;; Blend frames with decay
(if (< (length new-buffer) 2)
frame
(let ((result (copy frame)))
;; Simple blend of first two frames for now
;; Full version would fold over all frames
(blending:blend-images frame (nth new-buffer 1) (* decay 0.5)))))))

View File

@@ -0,0 +1,9 @@
;; Edge detection effect - highlights edges
(require-primitives "image")
(define-effect edge_detect
:params (
(low :type int :default 50 :range [10 100])
(high :type int :default 150 :range [50 300])
)
(image:edge-detect frame low high))

View File

@@ -0,0 +1,13 @@
;; Emboss effect - creates raised/3D appearance
(require-primitives "blending")
(define-effect emboss
:params (
(strength :type int :default 1 :range [0.5 3])
(blend :type float :default 0.3 :range [0 1])
)
(let* ((kernel (list (list (- strength) (- strength) 0)
(list (- strength) 1 strength)
(list 0 strength strength)))
(embossed (convolve frame kernel)))
(blending:blend-images embossed frame blend)))

View File

@@ -0,0 +1,19 @@
;; Film Grain effect - adds film grain texture
(require-primitives "core")
(define-effect film_grain
:params (
(intensity :type float :default 0.2 :range [0 1])
(colored :type bool :default false)
)
(let ((grain-amount (* intensity 50)))
(map-pixels frame
(lambda (x y c)
(if colored
(rgb (clamp (+ (red c) (gaussian 0 grain-amount)) 0 255)
(clamp (+ (green c) (gaussian 0 grain-amount)) 0 255)
(clamp (+ (blue c) (gaussian 0 grain-amount)) 0 255))
(let ((n (gaussian 0 grain-amount)))
(rgb (clamp (+ (red c) n) 0 255)
(clamp (+ (green c) n) 0 255)
(clamp (+ (blue c) n) 0 255))))))))

View File

@@ -0,0 +1,16 @@
;; Fisheye effect - barrel/pincushion lens distortion
(require-primitives "geometry" "image")
(define-effect fisheye
:params (
(strength :type float :default 0.3 :range [-1 1])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(zoom_correct :type bool :default true)
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
(coords (geometry:fisheye-coords w h strength cx cy zoom_correct)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,16 @@
;; Flip effect - flips image horizontally or vertically
(require-primitives "geometry")
(define-effect flip
:params (
(horizontal :type bool :default true)
(vertical :type bool :default false)
)
(let ((result frame))
(if horizontal
(set! result (geometry:flip-img result "horizontal"))
nil)
(if vertical
(set! result (geometry:flip-img result "vertical"))
nil)
result))

View File

@@ -0,0 +1,7 @@
;; Grayscale effect - converts to grayscale
;; Uses vectorized mix-gray primitive for fast processing
(require-primitives "image")
(define-effect grayscale
:params ()
(image:grayscale frame))

View File

@@ -0,0 +1,12 @@
;; Hue shift effect - rotates hue values
;; Uses vectorized shift-hsv primitive for fast processing
(require-primitives "color_ops")
(define-effect hue_shift
:params (
(degrees :type int :default 0 :range [0 360])
(speed :type int :default 0 :desc "rotation per second")
)
(let ((shift (+ degrees (* speed t))))
(color_ops:shift-hsv frame shift 1 1)))

View File

@@ -0,0 +1,9 @@
;; Invert effect - inverts all colors
;; Uses vectorized invert-img primitive for fast processing
;; amount param: 0 = no invert, 1 = full invert (threshold at 0.5)
(require-primitives "color_ops")
(define-effect invert
:params ((amount :type float :default 1 :range [0 1]))
(if (> amount 0.5) (color_ops:invert-img frame) frame))

View File

@@ -0,0 +1,20 @@
;; Kaleidoscope effect - mandala-like symmetry patterns
(require-primitives "geometry" "image")
(define-effect kaleidoscope
:params (
(segments :type int :default 6 :range [3 16])
(rotation :type int :default 0 :range [0 360])
(rotation_speed :type int :default 0 :range [-180 180])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(zoom :type int :default 1 :range [0.5 3])
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
;; Total rotation including time-based animation
(total_rot (+ rotation (* rotation_speed (or _time 0))))
(coords (geometry:kaleidoscope-coords w h segments total_rot cx cy zoom)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,36 @@
;; Layer effect - composite overlay over background at position
;; Streaming-compatible: frame is background, overlay is foreground
;; Usage: (layer background overlay :x 10 :y 20 :opacity 0.8)
;;
;; Params:
;; overlay - frame to composite on top
;; x, y - position to place overlay
;; opacity - blend amount (0-1)
;; mode - blend mode (alpha, multiply, screen, etc.)
(require-primitives "image" "blending" "core")
(define-effect layer
:params (
(overlay :type frame :default nil)
(x :type int :default 0)
(y :type int :default 0)
(opacity :type float :default 1.0)
(mode :type string :default "alpha")
)
(if (core:is-nil overlay)
frame
(let [bg (copy frame)
fg overlay
fg-w (image:width fg)
fg-h (image:height fg)]
(if (= opacity 1.0)
;; Simple paste
(paste bg fg x y)
;; Blend with opacity
(let [blended (if (= mode "alpha")
(blending:blend-images (image:crop bg x y fg-w fg-h) fg opacity)
(blending:blend-images (image:crop bg x y fg-w fg-h)
(blending:blend-mode (image:crop bg x y fg-w fg-h) fg mode)
opacity))]
(paste bg blended x y))))))

View File

@@ -0,0 +1,33 @@
;; Mirror effect - mirrors half of image
(require-primitives "geometry" "image")
(define-effect mirror
:params (
(mode :type string :default "left_right")
)
(let* ((w (image:width frame))
(h (image:height frame))
(hw (floor (/ w 2)))
(hh (floor (/ h 2))))
(cond
((= mode "left_right")
(let ((left (image:crop frame 0 0 hw h))
(result (copy frame)))
(paste result (geometry:flip-img left "horizontal") hw 0)))
((= mode "right_left")
(let ((right (image:crop frame hw 0 hw h))
(result (copy frame)))
(paste result (geometry:flip-img right "horizontal") 0 0)))
((= mode "top_bottom")
(let ((top (image:crop frame 0 0 w hh))
(result (copy frame)))
(paste result (geometry:flip-img top "vertical") 0 hh)))
((= mode "bottom_top")
(let ((bottom (image:crop frame 0 hh w hh))
(result (copy frame)))
(paste result (geometry:flip-img bottom "vertical") 0 0)))
(else frame))))

View File

@@ -0,0 +1,23 @@
;; Neon Glow effect - glowing edge effect
(require-primitives "image" "blending")
(define-effect neon_glow
:params (
(edge_low :type int :default 50 :range [10 200])
(edge_high :type int :default 150 :range [50 300])
(glow_radius :type int :default 15 :range [1 50])
(glow_intensity :type int :default 2 :range [0.5 5])
(background :type float :default 0.3 :range [0 1])
)
(let* ((edge-img (image:edge-detect frame edge_low edge_high))
(glow (image:blur edge-img glow_radius))
;; Intensify the glow
(bright-glow (map-pixels glow
(lambda (x y c)
(rgb (clamp (* (red c) glow_intensity) 0 255)
(clamp (* (green c) glow_intensity) 0 255)
(clamp (* (blue c) glow_intensity) 0 255))))))
(blending:blend-mode (blending:blend-images frame (make-image (image:width frame) (image:height frame) (list 0 0 0))
(- 1 background))
bright-glow
"screen")))

View File

@@ -0,0 +1,8 @@
;; Noise effect - adds random noise
;; Uses vectorized add-noise primitive for fast processing
(define-effect noise
:params (
(amount :type int :default 20 :range [0 100])
)
(add-noise frame amount))

View File

@@ -0,0 +1,24 @@
;; Outline effect - shows only edges
(require-primitives "image")
(define-effect outline
:params (
(thickness :type int :default 2 :range [1 10])
(threshold :type int :default 100 :range [20 300])
(color :type list :default (list 0 0 0)
)
(fill_mode "original"))
(let* ((edge-img (image:edge-detect frame (/ threshold 2) threshold))
(dilated (if (> thickness 1)
(dilate edge-img thickness)
edge-img))
(base (cond
((= fill_mode "original") (copy frame))
((= fill_mode "white") (make-image (image:width frame) (image:height frame) (list 255 255 255)))
(else (make-image (image:width frame) (image:height frame) (list 0 0 0))))))
(map-pixels base
(lambda (x y c)
(let ((edge-val (luminance (pixel dilated x y))))
(if (> edge-val 128)
color
c))))))

View File

@@ -0,0 +1,13 @@
;; Pixelate effect - creates blocky pixels
(require-primitives "image")
(define-effect pixelate
:params (
(block_size :type int :default 8 :range [2 64])
)
(let* ((w (image:width frame))
(h (image:height frame))
(small-w (max 1 (floor (/ w block_size))))
(small-h (max 1 (floor (/ h block_size))))
(small (image:resize frame small-w small-h "area")))
(image:resize small w h "nearest")))

View File

@@ -0,0 +1,11 @@
;; Pixelsort effect - glitch art pixel sorting
(define-effect pixelsort
:params (
(sort_by :type string :default "lightness")
(threshold_low :type int :default 50 :range [0 255])
(threshold_high :type int :default 200 :range [0 255])
(angle :type int :default 0 :range [0 180])
(reverse :type bool :default false)
)
(pixelsort frame sort_by threshold_low threshold_high angle reverse))

View File

@@ -0,0 +1,8 @@
;; Posterize effect - reduces color levels
(require-primitives "color_ops")
(define-effect posterize
:params (
(levels :type int :default 8 :range [2 32])
)
(color_ops:posterize frame levels))

View File

@@ -0,0 +1,11 @@
;; Resize effect - replaces RESIZE node
;; Note: uses target-w/target-h to avoid conflict with width/height primitives
(require-primitives "image")
(define-effect resize-frame
:params (
(target-w :type int :default 640 :desc "Target width in pixels")
(target-h :type int :default 480 :desc "Target height in pixels")
(mode :type string :default "linear" :choices [linear nearest area] :desc "Interpolation mode")
)
(image:resize frame target-w target-h mode))

View File

@@ -0,0 +1,13 @@
;; RGB Split effect - chromatic aberration
(define-effect rgb_split
:params (
(offset_x :type int :default 10 :range [-50 50])
(offset_y :type int :default 0 :range [-50 50])
)
(let* ((r (channel frame 0))
(g (channel frame 1))
(b (channel frame 2))
(r-shifted (translate (merge-channels r r r) offset_x offset_y))
(b-shifted (translate (merge-channels b b b) (- offset_x) (- offset_y))))
(merge-channels (channel r-shifted 0) g (channel b-shifted 0))))

View File

@@ -0,0 +1,19 @@
;; Ripple effect - radial wave distortion from center
(require-primitives "geometry" "image" "math")
(define-effect ripple
:params (
(frequency :type int :default 5 :range [1 20])
(amplitude :type int :default 10 :range [0 50])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(decay :type int :default 1 :range [0 5])
(speed :type int :default 1 :range [0 10])
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
(phase (* (or t 0) speed 2 pi))
(coords (geometry:ripple-displace w h frequency amplitude cx cy decay phase)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,11 @@
;; Rotate effect - rotates image
(require-primitives "geometry")
(define-effect rotate
:params (
(angle :type int :default 0 :range [-360 360])
(speed :type int :default 0 :desc "rotation per second")
)
(let ((total-angle (+ angle (* speed t))))
(geometry:rotate-img frame total-angle)))

View File

@@ -0,0 +1,9 @@
;; Saturation effect - adjusts color saturation
;; Uses vectorized shift-hsv primitive for fast processing
(require-primitives "color_ops")
(define-effect saturation
:params (
(amount :type int :default 1 :range [0 3])
)
(color_ops:adjust-saturation frame amount))

View File

@@ -0,0 +1,15 @@
;; Scanlines effect - VHS-style horizontal line shifting
(require-primitives "core")
(define-effect scanlines
:params (
(amplitude :type int :default 10 :range [0 100])
(frequency :type int :default 10 :range [1 100])
(randomness :type float :default 0.5 :range [0 1])
)
(map-rows frame
(lambda (y row)
(let* ((sine-shift (* amplitude (sin (/ (* y 6.28) (max 1 frequency)))))
(rand-shift (core:rand-range (- amplitude) amplitude))
(shift (floor (lerp sine-shift rand-shift randomness))))
(roll row shift 0)))))

View File

@@ -0,0 +1,7 @@
;; Sepia effect - applies sepia tone
;; Classic warm vintage look
(require-primitives "color_ops")
(define-effect sepia
:params ()
(color_ops:sepia frame))

View File

@@ -0,0 +1,8 @@
;; Sharpen effect - sharpens edges
(require-primitives "image")
(define-effect sharpen
:params (
(amount :type int :default 1 :range [0 5])
)
(image:sharpen frame amount))

View File

@@ -0,0 +1,16 @@
;; Strobe effect - holds frames for choppy look
(require-primitives "core")
(define-effect strobe
:params (
(frame_rate :type int :default 12 :range [1 60])
)
(let* ((held (state-get 'held nil))
(held-until (state-get 'held-until 0))
(frame-duration (/ 1 frame_rate)))
(if (or (core:is-nil held) (>= t held-until))
(begin
(state-set 'held (copy frame))
(state-set 'held-until (+ t frame-duration))
frame)
held)))

View File

@@ -0,0 +1,17 @@
;; Swirl effect - spiral vortex distortion
(require-primitives "geometry" "image")
(define-effect swirl
:params (
(strength :type int :default 1 :range [-10 10])
(radius :type float :default 0.5 :range [0.1 2])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(falloff :type string :default "quadratic")
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
(coords (geometry:swirl-coords w h strength radius cx cy falloff)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,9 @@
;; Threshold effect - converts to black and white
(require-primitives "color_ops")
(define-effect threshold
:params (
(level :type int :default 128 :range [0 255])
(invert :type bool :default false)
)
(color_ops:threshold frame level invert))

View File

@@ -0,0 +1,29 @@
;; Tile Grid effect - tiles image in grid
(require-primitives "geometry" "image")
(define-effect tile_grid
:params (
(rows :type int :default 2 :range [1 10])
(cols :type int :default 2 :range [1 10])
(gap :type int :default 0 :range [0 50])
)
(let* ((w (image:width frame))
(h (image:height frame))
(tile-w (floor (/ (- w (* gap (- cols 1))) cols)))
(tile-h (floor (/ (- h (* gap (- rows 1))) rows)))
(tile (image:resize frame tile-w tile-h "area"))
(result (make-image w h (list 0 0 0))))
(begin
;; Manually place tiles using nested iteration
;; This is a simplified version - full version would loop
(paste result tile 0 0)
(if (> cols 1)
(paste result tile (+ tile-w gap) 0)
nil)
(if (> rows 1)
(paste result tile 0 (+ tile-h gap))
nil)
(if (and (> cols 1) (> rows 1))
(paste result tile (+ tile-w gap) (+ tile-h gap))
nil)
result)))

View File

@@ -0,0 +1,20 @@
;; Trails effect - persistent motion trails
(require-primitives "image" "blending")
(define-effect trails
:params (
(persistence :type float :default 0.8 :range [0 0.99])
)
(let* ((buffer (state-get 'buffer nil))
(current frame))
(if (= buffer nil)
(begin
(state-set 'buffer (copy frame))
frame)
(let* ((faded (blending:blend-images buffer
(make-image (image:width frame) (image:height frame) (list 0 0 0))
(- 1 persistence)))
(result (blending:blend-mode faded current "lighten")))
(begin
(state-set 'buffer result)
result)))))

View File

@@ -0,0 +1,23 @@
;; Vignette effect - darkens corners
(require-primitives "image")
(define-effect vignette
:params (
(strength :type float :default 0.5 :range [0 1])
(radius :type int :default 1 :range [0.5 2])
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (/ w 2))
(cy (/ h 2))
(max-dist (* (sqrt (+ (* cx cx) (* cy cy))) radius)))
(map-pixels frame
(lambda (x y c)
(let* ((dx (- x cx))
(dy (- y cy))
(dist (sqrt (+ (* dx dx) (* dy dy))))
(factor (- 1 (* (/ dist max-dist) strength)))
(factor (clamp factor 0 1)))
(rgb (* (red c) factor)
(* (green c) factor)
(* (blue c) factor)))))))

View File

@@ -0,0 +1,22 @@
;; Wave effect - sine wave displacement distortion
(require-primitives "geometry" "image")
(define-effect wave
:params (
(amplitude :type int :default 10 :range [0 100])
(wavelength :type int :default 50 :range [10 500])
(speed :type int :default 1 :range [0 10])
(direction :type string :default "horizontal")
)
(let* ((w (image:width frame))
(h (image:height frame))
;; Use _time for animation phase
(phase (* (or _time 0) speed 2 pi))
;; Calculate frequency: waves per dimension
(freq (/ (if (= direction "vertical") w h) wavelength))
(axis (cond
((= direction "horizontal") "x")
((= direction "vertical") "y")
(else "both")))
(coords (geometry:wave-coords w h axis freq amplitude phase)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,8 @@
;; Zoom effect - zooms in/out from center
(require-primitives "geometry")
(define-effect zoom
:params (
(amount :type int :default 1 :range [0.1 5])
)
(geometry:scale-img frame amount amount))

1016
sexp_effects/interpreter.py Normal file

File diff suppressed because it is too large Load Diff

168
sexp_effects/parser.py Normal file
View File

@@ -0,0 +1,168 @@
"""
S-Expression Parser
Parses S-expressions into Python data structures:
- Lists become Python lists
- Symbols become Symbol objects
- Numbers become int/float
- Strings become str
- Keywords (:foo) become Keyword objects
"""
import re
from dataclasses import dataclass
from typing import Any, List, Union
@dataclass(frozen=True)
class Symbol:
"""A symbol (identifier) in the S-expression."""
name: str
def __repr__(self):
return self.name
@dataclass(frozen=True)
class Keyword:
"""A keyword like :foo in the S-expression."""
name: str
def __repr__(self):
return f":{self.name}"
# Token patterns
TOKEN_PATTERNS = [
(r'\s+', None), # Whitespace (skip)
(r';[^\n]*', None), # Comments (skip)
(r'\(', 'LPAREN'),
(r'\)', 'RPAREN'),
(r'\[', 'LBRACKET'),
(r'\]', 'RBRACKET'),
(r"'", 'QUOTE'),
(r'"([^"\\]|\\.)*"', 'STRING'),
(r':[a-zA-Z_][a-zA-Z0-9_\-]*', 'KEYWORD'),
(r'-?[0-9]+\.[0-9]+', 'FLOAT'),
(r'-?[0-9]+', 'INT'),
(r'#t|#f|true|false', 'BOOL'),
(r'[a-zA-Z_+\-*/<>=!?][a-zA-Z0-9_+\-*/<>=!?]*', 'SYMBOL'),
]
TOKEN_REGEX = '|'.join(f'(?P<{name}>{pattern})' if name else f'(?:{pattern})'
for pattern, name in TOKEN_PATTERNS)
def tokenize(source: str) -> List[tuple]:
"""Tokenize S-expression source code."""
tokens = []
for match in re.finditer(TOKEN_REGEX, source):
kind = match.lastgroup
value = match.group()
if kind:
tokens.append((kind, value))
return tokens
def parse(source: str) -> Any:
"""Parse S-expression source into Python data structures."""
tokens = tokenize(source)
pos = [0] # Use list for mutability in nested function
def parse_expr():
if pos[0] >= len(tokens):
raise SyntaxError("Unexpected end of input")
kind, value = tokens[pos[0]]
if kind == 'LPAREN':
pos[0] += 1
items = []
while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RPAREN':
items.append(parse_expr())
if pos[0] >= len(tokens):
raise SyntaxError("Missing closing parenthesis")
pos[0] += 1 # Skip RPAREN
return items
if kind == 'LBRACKET':
pos[0] += 1
items = []
while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RBRACKET':
items.append(parse_expr())
if pos[0] >= len(tokens):
raise SyntaxError("Missing closing bracket")
pos[0] += 1 # Skip RBRACKET
return items
elif kind == 'RPAREN':
raise SyntaxError("Unexpected closing parenthesis")
elif kind == 'QUOTE':
pos[0] += 1
return [Symbol('quote'), parse_expr()]
elif kind == 'STRING':
pos[0] += 1
# Remove quotes and unescape
return value[1:-1].replace('\\"', '"').replace('\\n', '\n')
elif kind == 'INT':
pos[0] += 1
return int(value)
elif kind == 'FLOAT':
pos[0] += 1
return float(value)
elif kind == 'BOOL':
pos[0] += 1
return value in ('#t', 'true')
elif kind == 'KEYWORD':
pos[0] += 1
return Keyword(value[1:]) # Remove leading :
elif kind == 'SYMBOL':
pos[0] += 1
return Symbol(value)
else:
raise SyntaxError(f"Unknown token: {kind} {value}")
result = parse_expr()
# Check for multiple top-level expressions
if pos[0] < len(tokens):
# Allow multiple top-level expressions, return as list
results = [result]
while pos[0] < len(tokens):
results.append(parse_expr())
return results
return result
def parse_file(path: str) -> Any:
"""Parse an S-expression file."""
with open(path, 'r') as f:
return parse(f.read())
# Convenience for pretty-printing
def to_sexp(obj: Any) -> str:
"""Convert Python object back to S-expression string."""
if isinstance(obj, list):
return '(' + ' '.join(to_sexp(x) for x in obj) + ')'
elif isinstance(obj, Symbol):
return obj.name
elif isinstance(obj, Keyword):
return f':{obj.name}'
elif isinstance(obj, str):
return f'"{obj}"'
elif isinstance(obj, bool):
return '#t' if obj else '#f'
elif isinstance(obj, (int, float)):
return str(obj)
else:
return repr(obj)

View File

@@ -0,0 +1,102 @@
"""
Primitive Libraries System
Provides modular loading of primitives. Core primitives are always available,
additional primitive libraries can be loaded on-demand with scoped availability.
Usage in sexp:
;; Load at recipe level - available throughout
(primitives math :path "primitive_libs/math.py")
;; Or use with-primitives for scoped access
(with-primitives "image"
(blur frame 3)) ;; blur only available inside
;; Nested scopes work
(with-primitives "math"
(with-primitives "color"
(hue-shift frame (* (sin t) 30))))
Library file format (primitive_libs/math.py):
import math
def prim_sin(x): return math.sin(x)
def prim_cos(x): return math.cos(x)
PRIMITIVES = {
'sin': prim_sin,
'cos': prim_cos,
}
"""
import importlib.util
from pathlib import Path
from typing import Dict, Callable, Any, Optional
# Cache of loaded primitive libraries
_library_cache: Dict[str, Dict[str, Any]] = {}
# Core primitives - always available, cannot be overridden
CORE_PRIMITIVES: Dict[str, Any] = {}
def register_core_primitive(name: str, fn: Callable):
"""Register a core primitive that's always available."""
CORE_PRIMITIVES[name] = fn
def load_primitive_library(name: str, path: Optional[str] = None) -> Dict[str, Any]:
"""
Load a primitive library by name or path.
Args:
name: Library name (e.g., "math", "image", "color")
path: Optional explicit path to library file
Returns:
Dict of primitive name -> function
"""
# Check cache first
cache_key = path or name
if cache_key in _library_cache:
return _library_cache[cache_key]
# Find library file
if path:
lib_path = Path(path)
else:
# Look in standard locations
lib_dir = Path(__file__).parent
lib_path = lib_dir / f"{name}.py"
if not lib_path.exists():
raise ValueError(f"Primitive library '{name}' not found at {lib_path}")
if not lib_path.exists():
raise ValueError(f"Primitive library file not found: {lib_path}")
# Load the module
spec = importlib.util.spec_from_file_location(f"prim_lib_{name}", lib_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Get PRIMITIVES dict from module
if not hasattr(module, 'PRIMITIVES'):
raise ValueError(f"Primitive library '{name}' missing PRIMITIVES dict")
primitives = module.PRIMITIVES
# Cache and return
_library_cache[cache_key] = primitives
return primitives
def get_library_names() -> list:
"""Get names of available primitive libraries."""
lib_dir = Path(__file__).parent
return [p.stem for p in lib_dir.glob("*.py") if p.stem != "__init__"]
def clear_cache():
"""Clear the library cache (useful for testing)."""
_library_cache.clear()

View File

@@ -0,0 +1,196 @@
"""
Array Primitives Library
Vectorized operations on numpy arrays for coordinate transformations.
"""
import numpy as np
# Arithmetic
def prim_arr_add(a, b):
return np.add(a, b)
def prim_arr_sub(a, b):
return np.subtract(a, b)
def prim_arr_mul(a, b):
return np.multiply(a, b)
def prim_arr_div(a, b):
return np.divide(a, b)
def prim_arr_mod(a, b):
return np.mod(a, b)
def prim_arr_neg(a):
return np.negative(a)
# Math functions
def prim_arr_sin(a):
return np.sin(a)
def prim_arr_cos(a):
return np.cos(a)
def prim_arr_tan(a):
return np.tan(a)
def prim_arr_sqrt(a):
return np.sqrt(np.maximum(a, 0))
def prim_arr_pow(a, b):
return np.power(a, b)
def prim_arr_abs(a):
return np.abs(a)
def prim_arr_exp(a):
return np.exp(a)
def prim_arr_log(a):
return np.log(np.maximum(a, 1e-10))
def prim_arr_atan2(y, x):
return np.arctan2(y, x)
# Comparison / selection
def prim_arr_min(a, b):
return np.minimum(a, b)
def prim_arr_max(a, b):
return np.maximum(a, b)
def prim_arr_clip(a, lo, hi):
return np.clip(a, lo, hi)
def prim_arr_where(cond, a, b):
return np.where(cond, a, b)
def prim_arr_floor(a):
return np.floor(a)
def prim_arr_ceil(a):
return np.ceil(a)
def prim_arr_round(a):
return np.round(a)
# Interpolation
def prim_arr_lerp(a, b, t):
return a + (b - a) * t
def prim_arr_smoothstep(edge0, edge1, x):
t = prim_arr_clip((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t * t * (3 - 2 * t)
# Creation
def prim_arr_zeros(shape):
return np.zeros(shape, dtype=np.float32)
def prim_arr_ones(shape):
return np.ones(shape, dtype=np.float32)
def prim_arr_full(shape, value):
return np.full(shape, value, dtype=np.float32)
def prim_arr_arange(start, stop, step=1):
return np.arange(start, stop, step, dtype=np.float32)
def prim_arr_linspace(start, stop, num):
return np.linspace(start, stop, num, dtype=np.float32)
def prim_arr_meshgrid(x, y):
return np.meshgrid(x, y)
# Coordinate transforms
def prim_polar_from_center(map_x, map_y, cx, cy):
"""Convert Cartesian to polar coordinates centered at (cx, cy)."""
dx = map_x - cx
dy = map_y - cy
r = np.sqrt(dx**2 + dy**2)
theta = np.arctan2(dy, dx)
return (r, theta)
def prim_cart_from_polar(r, theta, cx, cy):
"""Convert polar to Cartesian, adding center offset."""
x = r * np.cos(theta) + cx
y = r * np.sin(theta) + cy
return (x, y)
PRIMITIVES = {
# Arithmetic
'arr+': prim_arr_add,
'arr-': prim_arr_sub,
'arr*': prim_arr_mul,
'arr/': prim_arr_div,
'arr-mod': prim_arr_mod,
'arr-neg': prim_arr_neg,
# Math
'arr-sin': prim_arr_sin,
'arr-cos': prim_arr_cos,
'arr-tan': prim_arr_tan,
'arr-sqrt': prim_arr_sqrt,
'arr-pow': prim_arr_pow,
'arr-abs': prim_arr_abs,
'arr-exp': prim_arr_exp,
'arr-log': prim_arr_log,
'arr-atan2': prim_arr_atan2,
# Selection
'arr-min': prim_arr_min,
'arr-max': prim_arr_max,
'arr-clip': prim_arr_clip,
'arr-where': prim_arr_where,
'arr-floor': prim_arr_floor,
'arr-ceil': prim_arr_ceil,
'arr-round': prim_arr_round,
# Interpolation
'arr-lerp': prim_arr_lerp,
'arr-smoothstep': prim_arr_smoothstep,
# Creation
'arr-zeros': prim_arr_zeros,
'arr-ones': prim_arr_ones,
'arr-full': prim_arr_full,
'arr-arange': prim_arr_arange,
'arr-linspace': prim_arr_linspace,
'arr-meshgrid': prim_arr_meshgrid,
# Coordinates
'polar-from-center': prim_polar_from_center,
'cart-from-polar': prim_cart_from_polar,
}

View File

@@ -0,0 +1,388 @@
"""
ASCII Art Primitives Library
ASCII art rendering with per-zone expression evaluation and cell effects.
"""
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
from typing import Any, Dict, List, Optional, Callable
import colorsys
# Character sets
CHAR_SETS = {
"standard": " .:-=+*#%@",
"blocks": " ░▒▓█",
"simple": " .:oO@",
"digits": "0123456789",
"binary": "01",
"ascii": " `.-':_,^=;><+!rc*/z?sLTv)J7(|Fi{C}fI31tlu[neoZ5Yxjya]2ESwqkP6h9d4VpOGbUAKXHm8RD#$Bg0MNWQ%&@",
}
# Default font
_default_font = None
def _get_font(size: int):
"""Get monospace font at given size."""
global _default_font
try:
return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size)
except:
return ImageFont.load_default()
def _parse_color(color_str: str) -> tuple:
"""Parse color string to RGB tuple."""
if color_str.startswith('#'):
hex_color = color_str[1:]
if len(hex_color) == 3:
hex_color = ''.join(c*2 for c in hex_color)
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
colors = {
'black': (0, 0, 0), 'white': (255, 255, 255),
'red': (255, 0, 0), 'green': (0, 255, 0), 'blue': (0, 0, 255),
'yellow': (255, 255, 0), 'cyan': (0, 255, 255), 'magenta': (255, 0, 255),
'gray': (128, 128, 128), 'grey': (128, 128, 128),
}
return colors.get(color_str.lower(), (0, 0, 0))
def _cell_sample(frame: np.ndarray, cell_size: int):
"""Sample frame into cells, returning colors and luminances.
Uses cv2.resize with INTER_AREA (pixel-area averaging) which is
~25x faster than numpy reshape+mean for block downsampling.
"""
h, w = frame.shape[:2]
rows = h // cell_size
cols = w // cell_size
# Crop to exact grid then block-average via cv2 area interpolation.
cropped = frame[:rows * cell_size, :cols * cell_size]
colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
luminances = ((0.299 * colors[:, :, 0] +
0.587 * colors[:, :, 1] +
0.114 * colors[:, :, 2]) / 255.0).astype(np.float32)
return colors, luminances
def _luminance_to_char(lum: float, alphabet: str, contrast: float) -> str:
"""Map luminance to character."""
chars = CHAR_SETS.get(alphabet, alphabet)
lum = ((lum - 0.5) * contrast + 0.5)
lum = max(0, min(1, lum))
idx = int(lum * (len(chars) - 1))
return chars[idx]
def _render_char_cell(char: str, cell_size: int, color: tuple, bg_color: tuple) -> np.ndarray:
"""Render a single character to a cell image."""
img = Image.new('RGB', (cell_size, cell_size), bg_color)
draw = ImageDraw.Draw(img)
font = _get_font(cell_size)
# Center the character
bbox = draw.textbbox((0, 0), char, font=font)
text_w = bbox[2] - bbox[0]
text_h = bbox[3] - bbox[1]
x = (cell_size - text_w) // 2
y = (cell_size - text_h) // 2 - bbox[1]
draw.text((x, y), char, fill=color, font=font)
return np.array(img)
def prim_ascii_fx_zone(
frame: np.ndarray,
cols: int = 80,
char_size: int = None,
alphabet: str = "standard",
color_mode: str = "color",
background: str = "black",
contrast: float = 1.5,
char_hue = None,
char_saturation = None,
char_brightness = None,
char_scale = None,
char_rotation = None,
char_jitter = None,
cell_effect = None,
energy: float = None,
rotation_scale: float = 0,
_interp = None,
_env = None,
**extra_params
) -> np.ndarray:
"""
Render frame as ASCII art with per-zone effects.
Args:
frame: Input image
cols: Number of character columns
char_size: Cell size in pixels (overrides cols if set)
alphabet: Character set name or custom string
color_mode: "color", "mono", "invert", or color name
background: Background color name or hex
contrast: Contrast for character selection
char_hue/saturation/brightness/scale/rotation/jitter: Per-zone expressions
cell_effect: Lambda (cell, zone) -> cell for per-cell effects
energy: Energy value from audio analysis
rotation_scale: Max rotation degrees
_interp: Interpreter (auto-injected)
_env: Environment (auto-injected)
**extra_params: Additional params passed to zone dict
"""
h, w = frame.shape[:2]
# Calculate cell size
if char_size is None or char_size == 0:
cell_size = max(4, w // cols)
else:
cell_size = max(4, int(char_size))
# Sample cells
colors, luminances = _cell_sample(frame, cell_size)
rows, cols_actual = luminances.shape
# Parse background color
bg_color = _parse_color(background)
# Create output image
out_h = rows * cell_size
out_w = cols_actual * cell_size
output = np.full((out_h, out_w, 3), bg_color, dtype=np.uint8)
# Check if we have cell_effect
has_cell_effect = cell_effect is not None
# Process each cell
for r in range(rows):
for c in range(cols_actual):
lum = luminances[r, c]
cell_color = tuple(colors[r, c])
# Build zone context
zone = {
'row': r,
'col': c,
'row-norm': r / max(1, rows - 1),
'col-norm': c / max(1, cols_actual - 1),
'lum': float(lum),
'r': cell_color[0] / 255,
'g': cell_color[1] / 255,
'b': cell_color[2] / 255,
'cell_size': cell_size,
}
# Add HSV
r_f, g_f, b_f = cell_color[0]/255, cell_color[1]/255, cell_color[2]/255
hsv = colorsys.rgb_to_hsv(r_f, g_f, b_f)
zone['hue'] = hsv[0] * 360
zone['sat'] = hsv[1]
# Add energy and rotation_scale
if energy is not None:
zone['energy'] = energy
zone['rotation_scale'] = rotation_scale
# Add extra params
for k, v in extra_params.items():
if isinstance(v, (int, float, str, bool)) or v is None:
zone[k] = v
# Get character
char = _luminance_to_char(lum, alphabet, contrast)
zone['char'] = char
# Determine cell color based on mode
if color_mode == "mono":
render_color = (255, 255, 255)
elif color_mode == "invert":
render_color = tuple(255 - c for c in cell_color)
elif color_mode == "color":
render_color = cell_color
else:
render_color = _parse_color(color_mode)
zone['color'] = render_color
# Render character to cell
cell_img = _render_char_cell(char, cell_size, render_color, bg_color)
# Apply cell_effect if provided
if has_cell_effect and _interp is not None:
cell_img = _apply_cell_effect(cell_img, zone, cell_effect, _interp, _env, extra_params)
# Paste cell to output
y1, y2 = r * cell_size, (r + 1) * cell_size
x1, x2 = c * cell_size, (c + 1) * cell_size
output[y1:y2, x1:x2] = cell_img
# Resize to match input dimensions
if output.shape[:2] != frame.shape[:2]:
output = cv2.resize(output, (w, h), interpolation=cv2.INTER_LINEAR)
return output
def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params):
"""Apply cell_effect lambda to a cell image.
cell_effect is a Lambda object with params and body.
We create a child environment with zone variables and cell,
then evaluate the lambda body.
"""
# Get Environment class from the interpreter's module
Environment = type(env)
# Create child environment with zone variables
cell_env = Environment(env)
# Bind zone variables
for k, v in zone.items():
cell_env.set(k, v)
# Also bind with zone- prefix for consistency
cell_env.set('zone-row', zone.get('row', 0))
cell_env.set('zone-col', zone.get('col', 0))
cell_env.set('zone-row-norm', zone.get('row-norm', 0))
cell_env.set('zone-col-norm', zone.get('col-norm', 0))
cell_env.set('zone-lum', zone.get('lum', 0))
cell_env.set('zone-sat', zone.get('sat', 0))
cell_env.set('zone-hue', zone.get('hue', 0))
cell_env.set('zone-r', zone.get('r', 0))
cell_env.set('zone-g', zone.get('g', 0))
cell_env.set('zone-b', zone.get('b', 0))
# Inject loaded effects as callable functions
if hasattr(interp, 'effects'):
for effect_name in interp.effects:
def make_effect_fn(name):
def effect_fn(frame, *args):
params = {}
if name == 'blur' and len(args) >= 1:
params['radius'] = args[0]
elif name == 'rotate' and len(args) >= 1:
params['angle'] = args[0]
elif name == 'brightness' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'contrast' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'saturation' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'hue_shift' and len(args) >= 1:
params['degrees'] = args[0]
elif name == 'rgb_split' and len(args) >= 2:
params['offset_x'] = args[0]
params['offset_y'] = args[1]
elif name == 'pixelate' and len(args) >= 1:
params['size'] = args[0]
elif name == 'invert':
pass
result, _ = interp.run_effect(name, frame, params, {})
return result
return effect_fn
cell_env.set(effect_name, make_effect_fn(effect_name))
# Bind cell image and zone dict
cell_env.set('cell', cell_img)
cell_env.set('zone', zone)
# Evaluate the cell_effect lambda
# Lambda has params and body - we need to bind the params then evaluate
if hasattr(cell_effect, 'params') and hasattr(cell_effect, 'body'):
# Bind lambda parameters: (lambda [cell zone] body)
if len(cell_effect.params) >= 1:
cell_env.set(cell_effect.params[0], cell_img)
if len(cell_effect.params) >= 2:
cell_env.set(cell_effect.params[1], zone)
result = interp.eval(cell_effect.body, cell_env)
elif isinstance(cell_effect, list):
# Raw S-expression lambda like (lambda [cell zone] body) or (fn [cell zone] body)
# Check if it's a lambda expression
head = cell_effect[0] if cell_effect else None
head_name = head.name if head and hasattr(head, 'name') else str(head) if head else None
is_lambda = head_name in ('lambda', 'fn')
if is_lambda:
# (lambda [params...] body)
params = cell_effect[1] if len(cell_effect) > 1 else []
body = cell_effect[2] if len(cell_effect) > 2 else None
# Bind lambda parameters
if isinstance(params, list) and len(params) >= 1:
param_name = params[0].name if hasattr(params[0], 'name') else str(params[0])
cell_env.set(param_name, cell_img)
if isinstance(params, list) and len(params) >= 2:
param_name = params[1].name if hasattr(params[1], 'name') else str(params[1])
cell_env.set(param_name, zone)
result = interp.eval(body, cell_env) if body else cell_img
else:
# Some other expression - just evaluate it
result = interp.eval(cell_effect, cell_env)
elif callable(cell_effect):
# It's a callable
result = cell_effect(cell_img, zone)
else:
raise ValueError(f"cell_effect must be a Lambda, list, or callable, got {type(cell_effect)}")
if isinstance(result, np.ndarray) and result.shape == cell_img.shape:
return result
elif isinstance(result, np.ndarray):
# Shape mismatch - resize to fit
result = cv2.resize(result, (cell_img.shape[1], cell_img.shape[0]))
return result
raise ValueError(f"cell_effect must return an image array, got {type(result)}")
def _get_legacy_ascii_primitives():
"""Import ASCII primitives from legacy primitives module.
These are loaded lazily to avoid import issues during module loading.
By the time a primitive library is loaded, sexp_effects.primitives
is already in sys.modules (imported by sexp_effects.__init__).
"""
from sexp_effects.primitives import (
prim_cell_sample,
prim_luminance_to_chars,
prim_render_char_grid,
prim_render_char_grid_fx,
prim_alphabet_char,
prim_alphabet_length,
prim_map_char_grid,
prim_map_colors,
prim_make_char_grid,
prim_set_char,
prim_get_char,
prim_char_grid_dimensions,
cell_sample_extended,
)
return {
'cell-sample': prim_cell_sample,
'cell-sample-extended': cell_sample_extended,
'luminance-to-chars': prim_luminance_to_chars,
'render-char-grid': prim_render_char_grid,
'render-char-grid-fx': prim_render_char_grid_fx,
'alphabet-char': prim_alphabet_char,
'alphabet-length': prim_alphabet_length,
'map-char-grid': prim_map_char_grid,
'map-colors': prim_map_colors,
'make-char-grid': prim_make_char_grid,
'set-char': prim_set_char,
'get-char': prim_get_char,
'char-grid-dimensions': prim_char_grid_dimensions,
}
PRIMITIVES = {
'ascii-fx-zone': prim_ascii_fx_zone,
**_get_legacy_ascii_primitives(),
}

View File

@@ -0,0 +1,116 @@
"""
Blending Primitives Library
Image blending and compositing operations.
"""
import numpy as np
def prim_blend_images(a, b, alpha):
"""Blend two images: a * (1-alpha) + b * alpha."""
alpha = max(0.0, min(1.0, alpha))
return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8)
def prim_blend_mode(a, b, mode):
"""Blend using Photoshop-style blend modes."""
a = a.astype(float) / 255
b = b.astype(float) / 255
if mode == "multiply":
result = a * b
elif mode == "screen":
result = 1 - (1 - a) * (1 - b)
elif mode == "overlay":
mask = a < 0.5
result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b))
elif mode == "soft-light":
mask = b < 0.5
result = np.where(mask,
a - (1 - 2 * b) * a * (1 - a),
a + (2 * b - 1) * (np.sqrt(a) - a))
elif mode == "hard-light":
mask = b < 0.5
result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b))
elif mode == "color-dodge":
result = np.clip(a / (1 - b + 0.001), 0, 1)
elif mode == "color-burn":
result = 1 - np.clip((1 - a) / (b + 0.001), 0, 1)
elif mode == "difference":
result = np.abs(a - b)
elif mode == "exclusion":
result = a + b - 2 * a * b
elif mode == "add":
result = np.clip(a + b, 0, 1)
elif mode == "subtract":
result = np.clip(a - b, 0, 1)
elif mode == "darken":
result = np.minimum(a, b)
elif mode == "lighten":
result = np.maximum(a, b)
else:
# Default to normal (just return b)
result = b
return (result * 255).astype(np.uint8)
def prim_mask(img, mask_img):
"""Apply grayscale mask to image (white=opaque, black=transparent)."""
if len(mask_img.shape) == 3:
mask = mask_img[:, :, 0].astype(float) / 255
else:
mask = mask_img.astype(float) / 255
mask = mask[:, :, np.newaxis]
return (img.astype(float) * mask).astype(np.uint8)
def prim_alpha_composite(base, overlay, alpha_channel):
"""Composite overlay onto base using alpha channel."""
if len(alpha_channel.shape) == 3:
alpha = alpha_channel[:, :, 0].astype(float) / 255
else:
alpha = alpha_channel.astype(float) / 255
alpha = alpha[:, :, np.newaxis]
result = base.astype(float) * (1 - alpha) + overlay.astype(float) * alpha
return result.astype(np.uint8)
def prim_overlay(base, overlay, x, y, alpha=1.0):
"""Overlay image at position (x, y) with optional alpha."""
result = base.copy()
x, y = int(x), int(y)
oh, ow = overlay.shape[:2]
bh, bw = base.shape[:2]
# Clip to bounds
sx1 = max(0, -x)
sy1 = max(0, -y)
dx1 = max(0, x)
dy1 = max(0, y)
sx2 = min(ow, bw - x)
sy2 = min(oh, bh - y)
if sx2 > sx1 and sy2 > sy1:
src = overlay[sy1:sy2, sx1:sx2]
dst = result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)]
blended = (dst.astype(float) * (1 - alpha) + src.astype(float) * alpha)
result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = blended.astype(np.uint8)
return result
PRIMITIVES = {
# Basic blending
'blend-images': prim_blend_images,
'blend-mode': prim_blend_mode,
# Masking
'mask': prim_mask,
'alpha-composite': prim_alpha_composite,
# Overlay
'overlay': prim_overlay,
}

View File

@@ -0,0 +1,137 @@
"""
Color Primitives Library
Color manipulation: RGB, HSV, blending, luminance.
"""
import numpy as np
import colorsys
def prim_rgb(r, g, b):
"""Create RGB color as [r, g, b] (0-255)."""
return [int(max(0, min(255, r))),
int(max(0, min(255, g))),
int(max(0, min(255, b)))]
def prim_red(c):
return c[0]
def prim_green(c):
return c[1]
def prim_blue(c):
return c[2]
def prim_luminance(c):
"""Perceived luminance (0-1) using standard weights."""
return (0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]) / 255
def prim_rgb_to_hsv(c):
"""Convert RGB [0-255] to HSV [h:0-360, s:0-1, v:0-1]."""
r, g, b = c[0] / 255, c[1] / 255, c[2] / 255
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return [h * 360, s, v]
def prim_hsv_to_rgb(hsv):
"""Convert HSV [h:0-360, s:0-1, v:0-1] to RGB [0-255]."""
h, s, v = hsv[0] / 360, hsv[1], hsv[2]
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return [int(r * 255), int(g * 255), int(b * 255)]
def prim_rgb_to_hsl(c):
"""Convert RGB [0-255] to HSL [h:0-360, s:0-1, l:0-1]."""
r, g, b = c[0] / 255, c[1] / 255, c[2] / 255
h, l, s = colorsys.rgb_to_hls(r, g, b)
return [h * 360, s, l]
def prim_hsl_to_rgb(hsl):
"""Convert HSL [h:0-360, s:0-1, l:0-1] to RGB [0-255]."""
h, s, l = hsl[0] / 360, hsl[1], hsl[2]
r, g, b = colorsys.hls_to_rgb(h, l, s)
return [int(r * 255), int(g * 255), int(b * 255)]
def prim_blend_color(c1, c2, alpha):
"""Blend two colors: c1 * (1-alpha) + c2 * alpha."""
return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)]
def prim_average_color(img):
"""Get average color of an image."""
mean = np.mean(img, axis=(0, 1))
return [int(mean[0]), int(mean[1]), int(mean[2])]
def prim_dominant_color(img, k=1):
"""Get dominant color using k-means (simplified: just average for now)."""
return prim_average_color(img)
def prim_invert_color(c):
"""Invert a color."""
return [255 - c[0], 255 - c[1], 255 - c[2]]
def prim_grayscale_color(c):
"""Convert color to grayscale."""
gray = int(0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2])
return [gray, gray, gray]
def prim_saturate(c, amount):
"""Adjust saturation of color. amount=0 is grayscale, 1 is unchanged, >1 is more saturated."""
hsv = prim_rgb_to_hsv(c)
hsv[1] = max(0, min(1, hsv[1] * amount))
return prim_hsv_to_rgb(hsv)
def prim_brighten(c, amount):
"""Adjust brightness. amount=0 is black, 1 is unchanged, >1 is brighter."""
return [int(max(0, min(255, c[i] * amount))) for i in range(3)]
def prim_shift_hue(c, degrees):
"""Shift hue by degrees."""
hsv = prim_rgb_to_hsv(c)
hsv[0] = (hsv[0] + degrees) % 360
return prim_hsv_to_rgb(hsv)
PRIMITIVES = {
# Construction
'rgb': prim_rgb,
# Component access
'red': prim_red,
'green': prim_green,
'blue': prim_blue,
'luminance': prim_luminance,
# Color space conversion
'rgb->hsv': prim_rgb_to_hsv,
'hsv->rgb': prim_hsv_to_rgb,
'rgb->hsl': prim_rgb_to_hsl,
'hsl->rgb': prim_hsl_to_rgb,
# Blending
'blend-color': prim_blend_color,
# Analysis
'average-color': prim_average_color,
'dominant-color': prim_dominant_color,
# Manipulation
'invert-color': prim_invert_color,
'grayscale-color': prim_grayscale_color,
'saturate': prim_saturate,
'brighten': prim_brighten,
'shift-hue': prim_shift_hue,
}

View File

@@ -0,0 +1,90 @@
"""
Color Operations Primitives Library
Vectorized color adjustments: brightness, contrast, saturation, invert, HSV.
These operate on entire images for fast processing.
"""
import numpy as np
import cv2
def prim_adjust(img, brightness=0, contrast=1):
"""Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+."""
result = (img.astype(np.float32) - 128) * contrast + 128 + brightness
return np.clip(result, 0, 255).astype(np.uint8)
def prim_mix_gray(img, amount):
"""Mix image with its grayscale version. 0=original, 1=grayscale."""
gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]
gray_rgb = np.stack([gray, gray, gray], axis=-1)
result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount
return np.clip(result, 0, 255).astype(np.uint8)
def prim_invert_img(img):
"""Invert all pixel values."""
return (255 - img).astype(np.uint8)
def prim_shift_hsv(img, h=0, s=1, v=1):
"""Shift HSV: h=degrees offset, s/v=multipliers."""
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32)
hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255)
hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255)
return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)
def prim_add_noise(img, amount):
"""Add gaussian noise to image."""
noise = np.random.normal(0, amount, img.shape)
result = img.astype(np.float32) + noise
return np.clip(result, 0, 255).astype(np.uint8)
def prim_quantize(img, levels):
"""Reduce to N color levels per channel."""
levels = max(2, int(levels))
factor = 256 / levels
result = (img // factor) * factor + factor // 2
return np.clip(result, 0, 255).astype(np.uint8)
def prim_sepia(img, intensity=1.0):
"""Apply sepia tone effect."""
sepia_matrix = np.array([
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
])
sepia = np.dot(img, sepia_matrix.T)
result = img.astype(np.float32) * (1 - intensity) + sepia * intensity
return np.clip(result, 0, 255).astype(np.uint8)
def prim_grayscale(img):
"""Convert to grayscale (still RGB output)."""
gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]
return np.stack([gray, gray, gray], axis=-1).astype(np.uint8)
PRIMITIVES = {
# Brightness/Contrast
'adjust': prim_adjust,
# Saturation
'mix-gray': prim_mix_gray,
'grayscale': prim_grayscale,
# HSV manipulation
'shift-hsv': prim_shift_hsv,
# Inversion
'invert-img': prim_invert_img,
# Effects
'add-noise': prim_add_noise,
'quantize': prim_quantize,
'sepia': prim_sepia,
}

View File

@@ -0,0 +1,271 @@
"""
Core Primitives - Always available, minimal essential set.
These are the primitives that form the foundation of the language.
They cannot be overridden by libraries.
"""
# Arithmetic
def prim_add(*args):
if len(args) == 0:
return 0
result = args[0]
for arg in args[1:]:
result = result + arg
return result
def prim_sub(a, b=None):
if b is None:
return -a
return a - b
def prim_mul(*args):
if len(args) == 0:
return 1
result = args[0]
for arg in args[1:]:
result = result * arg
return result
def prim_div(a, b):
return a / b
def prim_mod(a, b):
return a % b
def prim_abs(x):
return abs(x)
def prim_min(*args):
return min(args)
def prim_max(*args):
return max(args)
def prim_round(x):
return round(x)
def prim_floor(x):
import math
return math.floor(x)
def prim_ceil(x):
import math
return math.ceil(x)
# Comparison
def prim_lt(a, b):
return a < b
def prim_gt(a, b):
return a > b
def prim_le(a, b):
return a <= b
def prim_ge(a, b):
return a >= b
def prim_eq(a, b):
if isinstance(a, float) or isinstance(b, float):
return abs(a - b) < 1e-9
return a == b
def prim_ne(a, b):
return not prim_eq(a, b)
# Logic
def prim_not(x):
return not x
def prim_and(*args):
for a in args:
if not a:
return False
return True
def prim_or(*args):
for a in args:
if a:
return True
return False
# Basic data access
def prim_get(obj, key, default=None):
"""Get value from dict or list."""
if isinstance(obj, dict):
return obj.get(key, default)
elif isinstance(obj, (list, tuple)):
try:
return obj[int(key)]
except (IndexError, ValueError):
return default
return default
def prim_nth(seq, i):
i = int(i)
if 0 <= i < len(seq):
return seq[i]
return None
def prim_first(seq):
return seq[0] if seq else None
def prim_length(seq):
return len(seq)
def prim_list(*args):
return list(args)
# Type checking
def prim_is_number(x):
return isinstance(x, (int, float))
def prim_is_string(x):
return isinstance(x, str)
def prim_is_list(x):
return isinstance(x, (list, tuple))
def prim_is_dict(x):
return isinstance(x, dict)
def prim_is_nil(x):
return x is None
# Higher-order / iteration
def prim_reduce(seq, init, fn):
"""(reduce seq init fn) — fold left: fn(fn(fn(init, s0), s1), s2) ..."""
acc = init
for item in seq:
acc = fn(acc, item)
return acc
def prim_map(seq, fn):
"""(map seq fn) — apply fn to each element, return new list."""
return [fn(item) for item in seq]
def prim_range(*args):
"""(range end), (range start end), or (range start end step) — integer range."""
if len(args) == 1:
return list(range(int(args[0])))
elif len(args) == 2:
return list(range(int(args[0]), int(args[1])))
elif len(args) >= 3:
return list(range(int(args[0]), int(args[1]), int(args[2])))
return []
# Random
import random
_rng = random.Random()
def prim_rand():
"""Return random float in [0, 1)."""
return _rng.random()
def prim_rand_int(lo, hi):
"""Return random integer in [lo, hi]."""
return _rng.randint(int(lo), int(hi))
def prim_rand_range(lo, hi):
"""Return random float in [lo, hi)."""
return lo + _rng.random() * (hi - lo)
def prim_map_range(val, from_lo, from_hi, to_lo, to_hi):
"""Map value from one range to another."""
if from_hi == from_lo:
return to_lo
t = (val - from_lo) / (from_hi - from_lo)
return to_lo + t * (to_hi - to_lo)
# Core primitives dict
PRIMITIVES = {
# Arithmetic
'+': prim_add,
'-': prim_sub,
'*': prim_mul,
'/': prim_div,
'mod': prim_mod,
'abs': prim_abs,
'min': prim_min,
'max': prim_max,
'round': prim_round,
'floor': prim_floor,
'ceil': prim_ceil,
# Comparison
'<': prim_lt,
'>': prim_gt,
'<=': prim_le,
'>=': prim_ge,
'=': prim_eq,
'!=': prim_ne,
# Logic
'not': prim_not,
'and': prim_and,
'or': prim_or,
# Data access
'get': prim_get,
'nth': prim_nth,
'first': prim_first,
'length': prim_length,
'len': prim_length,
'list': prim_list,
# Type predicates
'number?': prim_is_number,
'string?': prim_is_string,
'list?': prim_is_list,
'dict?': prim_is_dict,
'nil?': prim_is_nil,
'is-nil': prim_is_nil,
# Higher-order / iteration
'reduce': prim_reduce,
'fold': prim_reduce,
'map': prim_map,
'range': prim_range,
# Random
'rand': prim_rand,
'rand-int': prim_rand_int,
'rand-range': prim_rand_range,
'map-range': prim_map_range,
}

View File

@@ -0,0 +1,136 @@
"""
Drawing Primitives Library
Draw shapes, text, and characters on images.
"""
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
# Default font (will be loaded lazily)
_default_font = None
def _get_default_font(size=16):
"""Get default font, creating if needed."""
global _default_font
if _default_font is None or _default_font.size != size:
try:
_default_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size)
except:
_default_font = ImageFont.load_default()
return _default_font
def prim_draw_char(img, char, x, y, font_size=16, color=None):
"""Draw a single character at (x, y)."""
if color is None:
color = [255, 255, 255]
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
font = _get_default_font(font_size)
draw.text((x, y), char, fill=tuple(color), font=font)
return np.array(pil_img)
def prim_draw_text(img, text, x, y, font_size=16, color=None):
"""Draw text string at (x, y)."""
if color is None:
color = [255, 255, 255]
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
font = _get_default_font(font_size)
draw.text((x, y), text, fill=tuple(color), font=font)
return np.array(pil_img)
def prim_fill_rect(img, x, y, w, h, color=None):
"""Fill a rectangle with color."""
if color is None:
color = [255, 255, 255]
result = img.copy()
x, y, w, h = int(x), int(y), int(w), int(h)
result[y:y+h, x:x+w] = color
return result
def prim_draw_rect(img, x, y, w, h, color=None, thickness=1):
"""Draw rectangle outline."""
if color is None:
color = [255, 255, 255]
result = img.copy()
cv2.rectangle(result, (int(x), int(y)), (int(x+w), int(y+h)),
tuple(color), thickness)
return result
def prim_draw_line(img, x1, y1, x2, y2, color=None, thickness=1):
"""Draw a line from (x1, y1) to (x2, y2)."""
if color is None:
color = [255, 255, 255]
result = img.copy()
cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)),
tuple(color), thickness)
return result
def prim_draw_circle(img, cx, cy, radius, color=None, thickness=1, fill=False):
"""Draw a circle."""
if color is None:
color = [255, 255, 255]
result = img.copy()
t = -1 if fill else thickness
cv2.circle(result, (int(cx), int(cy)), int(radius), tuple(color), t)
return result
def prim_draw_ellipse(img, cx, cy, rx, ry, angle=0, color=None, thickness=1, fill=False):
"""Draw an ellipse."""
if color is None:
color = [255, 255, 255]
result = img.copy()
t = -1 if fill else thickness
cv2.ellipse(result, (int(cx), int(cy)), (int(rx), int(ry)),
angle, 0, 360, tuple(color), t)
return result
def prim_draw_polygon(img, points, color=None, thickness=1, fill=False):
"""Draw a polygon from list of [x, y] points."""
if color is None:
color = [255, 255, 255]
result = img.copy()
pts = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
if fill:
cv2.fillPoly(result, [pts], tuple(color))
else:
cv2.polylines(result, [pts], True, tuple(color), thickness)
return result
PRIMITIVES = {
# Text
'draw-char': prim_draw_char,
'draw-text': prim_draw_text,
# Rectangles
'fill-rect': prim_fill_rect,
'draw-rect': prim_draw_rect,
# Lines and shapes
'draw-line': prim_draw_line,
'draw-circle': prim_draw_circle,
'draw-ellipse': prim_draw_ellipse,
'draw-polygon': prim_draw_polygon,
}

View File

@@ -0,0 +1,119 @@
"""
Filters Primitives Library
Image filters: blur, sharpen, edges, convolution.
"""
import numpy as np
import cv2
def prim_blur(img, radius):
"""Gaussian blur with given radius."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.GaussianBlur(img, (ksize, ksize), 0)
def prim_box_blur(img, radius):
"""Box blur with given radius."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.blur(img, (ksize, ksize))
def prim_median_blur(img, radius):
"""Median blur (good for noise removal)."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.medianBlur(img, ksize)
def prim_bilateral(img, d=9, sigma_color=75, sigma_space=75):
"""Bilateral filter (edge-preserving blur)."""
return cv2.bilateralFilter(img, d, sigma_color, sigma_space)
def prim_sharpen(img, amount=1.0):
"""Sharpen image using unsharp mask."""
blurred = cv2.GaussianBlur(img, (0, 0), 3)
return cv2.addWeighted(img, 1.0 + amount, blurred, -amount, 0)
def prim_edges(img, low=50, high=150):
"""Canny edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, low, high)
return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
def prim_sobel(img, ksize=3):
"""Sobel edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
mag = np.sqrt(sobelx**2 + sobely**2)
mag = np.clip(mag, 0, 255).astype(np.uint8)
return cv2.cvtColor(mag, cv2.COLOR_GRAY2RGB)
def prim_laplacian(img, ksize=3):
"""Laplacian edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lap = cv2.Laplacian(gray, cv2.CV_64F, ksize=ksize)
lap = np.abs(lap)
lap = np.clip(lap, 0, 255).astype(np.uint8)
return cv2.cvtColor(lap, cv2.COLOR_GRAY2RGB)
def prim_emboss(img):
"""Emboss effect."""
kernel = np.array([[-2, -1, 0],
[-1, 1, 1],
[ 0, 1, 2]])
result = cv2.filter2D(img, -1, kernel)
return np.clip(result + 128, 0, 255).astype(np.uint8)
def prim_dilate(img, size=1):
"""Morphological dilation."""
kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8)
return cv2.dilate(img, kernel)
def prim_erode(img, size=1):
"""Morphological erosion."""
kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8)
return cv2.erode(img, kernel)
def prim_convolve(img, kernel):
"""Apply custom convolution kernel."""
kernel = np.array(kernel, dtype=np.float32)
return cv2.filter2D(img, -1, kernel)
PRIMITIVES = {
# Blur
'blur': prim_blur,
'box-blur': prim_box_blur,
'median-blur': prim_median_blur,
'bilateral': prim_bilateral,
# Sharpen
'sharpen': prim_sharpen,
# Edges
'edges': prim_edges,
'sobel': prim_sobel,
'laplacian': prim_laplacian,
# Effects
'emboss': prim_emboss,
# Morphology
'dilate': prim_dilate,
'erode': prim_erode,
# Custom
'convolve': prim_convolve,
}

View File

@@ -0,0 +1,143 @@
"""
Geometry Primitives Library
Geometric transforms: rotate, scale, flip, translate, remap.
"""
import numpy as np
import cv2
def prim_translate(img, dx, dy):
"""Translate image by (dx, dy) pixels."""
h, w = img.shape[:2]
M = np.float32([[1, 0, dx], [0, 1, dy]])
return cv2.warpAffine(img, M, (w, h))
def prim_rotate(img, angle, cx=None, cy=None):
"""Rotate image by angle degrees around center (cx, cy)."""
h, w = img.shape[:2]
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)
return cv2.warpAffine(img, M, (w, h))
def prim_scale(img, sx, sy, cx=None, cy=None):
"""Scale image by (sx, sy) around center (cx, cy)."""
h, w = img.shape[:2]
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
# Build transform matrix
M = np.float32([
[sx, 0, cx * (1 - sx)],
[0, sy, cy * (1 - sy)]
])
return cv2.warpAffine(img, M, (w, h))
def prim_flip_h(img):
"""Flip image horizontally."""
return cv2.flip(img, 1)
def prim_flip_v(img):
"""Flip image vertically."""
return cv2.flip(img, 0)
def prim_flip(img, direction="horizontal"):
"""Flip image in given direction."""
if direction in ("horizontal", "h"):
return prim_flip_h(img)
elif direction in ("vertical", "v"):
return prim_flip_v(img)
elif direction in ("both", "hv", "vh"):
return cv2.flip(img, -1)
return img
def prim_transpose(img):
"""Transpose image (swap x and y)."""
return np.transpose(img, (1, 0, 2))
def prim_remap(img, map_x, map_y):
"""Remap image using coordinate maps."""
return cv2.remap(img, map_x.astype(np.float32),
map_y.astype(np.float32),
cv2.INTER_LINEAR)
def prim_make_coords(w, h):
"""Create coordinate grids for remapping."""
x = np.arange(w, dtype=np.float32)
y = np.arange(h, dtype=np.float32)
map_x, map_y = np.meshgrid(x, y)
return (map_x, map_y)
def prim_perspective(img, src_pts, dst_pts):
"""Apply perspective transform."""
src = np.float32(src_pts)
dst = np.float32(dst_pts)
M = cv2.getPerspectiveTransform(src, dst)
h, w = img.shape[:2]
return cv2.warpPerspective(img, M, (w, h))
def prim_affine(img, src_pts, dst_pts):
"""Apply affine transform using 3 point pairs."""
src = np.float32(src_pts)
dst = np.float32(dst_pts)
M = cv2.getAffineTransform(src, dst)
h, w = img.shape[:2]
return cv2.warpAffine(img, M, (w, h))
def _get_legacy_geometry_primitives():
"""Import geometry primitives from legacy primitives module."""
from sexp_effects.primitives import (
prim_coords_x,
prim_coords_y,
prim_ripple_displace,
prim_fisheye_displace,
prim_kaleidoscope_displace,
)
return {
'coords-x': prim_coords_x,
'coords-y': prim_coords_y,
'ripple-displace': prim_ripple_displace,
'fisheye-displace': prim_fisheye_displace,
'kaleidoscope-displace': prim_kaleidoscope_displace,
}
PRIMITIVES = {
# Basic transforms
'translate': prim_translate,
'rotate-img': prim_rotate,
'scale-img': prim_scale,
# Flips
'flip-h': prim_flip_h,
'flip-v': prim_flip_v,
'flip': prim_flip,
'transpose': prim_transpose,
# Remapping
'remap': prim_remap,
'make-coords': prim_make_coords,
# Advanced transforms
'perspective': prim_perspective,
'affine': prim_affine,
# Displace / coordinate ops (from legacy primitives)
**_get_legacy_geometry_primitives(),
}

View File

@@ -0,0 +1,144 @@
"""
Image Primitives Library
Basic image operations: dimensions, pixels, resize, crop, paste.
"""
import numpy as np
import cv2
def prim_width(img):
return img.shape[1]
def prim_height(img):
return img.shape[0]
def prim_make_image(w, h, color=None):
"""Create a new image filled with color (default black)."""
if color is None:
color = [0, 0, 0]
img = np.zeros((h, w, 3), dtype=np.uint8)
img[:] = color
return img
def prim_copy(img):
return img.copy()
def prim_pixel(img, x, y):
"""Get pixel color at (x, y) as [r, g, b]."""
h, w = img.shape[:2]
if 0 <= x < w and 0 <= y < h:
return list(img[int(y), int(x)])
return [0, 0, 0]
def prim_set_pixel(img, x, y, color):
"""Set pixel at (x, y) to color, returns modified image."""
result = img.copy()
h, w = result.shape[:2]
if 0 <= x < w and 0 <= y < h:
result[int(y), int(x)] = color
return result
def prim_sample(img, x, y):
"""Bilinear sample at float coordinates, returns [r, g, b] as floats."""
h, w = img.shape[:2]
x = max(0, min(w - 1.001, x))
y = max(0, min(h - 1.001, y))
x0, y0 = int(x), int(y)
x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1)
fx, fy = x - x0, y - y0
c00 = img[y0, x0].astype(float)
c10 = img[y0, x1].astype(float)
c01 = img[y1, x0].astype(float)
c11 = img[y1, x1].astype(float)
top = c00 * (1 - fx) + c10 * fx
bottom = c01 * (1 - fx) + c11 * fx
return list(top * (1 - fy) + bottom * fy)
def prim_channel(img, c):
"""Extract single channel (0=R, 1=G, 2=B)."""
return img[:, :, c]
def prim_merge_channels(r, g, b):
"""Merge three single-channel arrays into RGB image."""
return np.stack([r, g, b], axis=2).astype(np.uint8)
def prim_resize(img, w, h, mode="linear"):
"""Resize image to w x h."""
interp = cv2.INTER_LINEAR
if mode == "nearest":
interp = cv2.INTER_NEAREST
elif mode == "cubic":
interp = cv2.INTER_CUBIC
elif mode == "area":
interp = cv2.INTER_AREA
return cv2.resize(img, (int(w), int(h)), interpolation=interp)
def prim_crop(img, x, y, w, h):
"""Crop rectangle from image."""
x, y, w, h = int(x), int(y), int(w), int(h)
ih, iw = img.shape[:2]
x = max(0, min(x, iw - 1))
y = max(0, min(y, ih - 1))
w = min(w, iw - x)
h = min(h, ih - y)
return img[y:y+h, x:x+w].copy()
def prim_paste(dst, src, x, y):
"""Paste src onto dst at position (x, y)."""
result = dst.copy()
x, y = int(x), int(y)
sh, sw = src.shape[:2]
dh, dw = dst.shape[:2]
# Clip to bounds
sx1 = max(0, -x)
sy1 = max(0, -y)
dx1 = max(0, x)
dy1 = max(0, y)
sx2 = min(sw, dw - x)
sy2 = min(sh, dh - y)
if sx2 > sx1 and sy2 > sy1:
result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = src[sy1:sy2, sx1:sx2]
return result
PRIMITIVES = {
# Dimensions
'width': prim_width,
'height': prim_height,
# Creation
'make-image': prim_make_image,
'copy': prim_copy,
# Pixel access
'pixel': prim_pixel,
'set-pixel': prim_set_pixel,
'sample': prim_sample,
# Channels
'channel': prim_channel,
'merge-channels': prim_merge_channels,
# Geometry
'resize': prim_resize,
'crop': prim_crop,
'paste': prim_paste,
}

View File

@@ -0,0 +1,164 @@
"""
Math Primitives Library
Trigonometry, rounding, clamping, random numbers, etc.
"""
import math
import random as rand_module
def prim_sin(x):
return math.sin(x)
def prim_cos(x):
return math.cos(x)
def prim_tan(x):
return math.tan(x)
def prim_asin(x):
return math.asin(x)
def prim_acos(x):
return math.acos(x)
def prim_atan(x):
return math.atan(x)
def prim_atan2(y, x):
return math.atan2(y, x)
def prim_sqrt(x):
return math.sqrt(x)
def prim_pow(x, y):
return math.pow(x, y)
def prim_exp(x):
return math.exp(x)
def prim_log(x, base=None):
if base is None:
return math.log(x)
return math.log(x, base)
def prim_abs(x):
return abs(x)
def prim_floor(x):
return math.floor(x)
def prim_ceil(x):
return math.ceil(x)
def prim_round(x):
return round(x)
def prim_min(*args):
if len(args) == 1 and hasattr(args[0], '__iter__'):
return min(args[0])
return min(args)
def prim_max(*args):
if len(args) == 1 and hasattr(args[0], '__iter__'):
return max(args[0])
return max(args)
def prim_clamp(x, lo, hi):
return max(lo, min(hi, x))
def prim_lerp(a, b, t):
"""Linear interpolation: a + (b - a) * t"""
return a + (b - a) * t
def prim_smoothstep(edge0, edge1, x):
"""Smooth interpolation between 0 and 1."""
t = prim_clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t * t * (3 - 2 * t)
def prim_random(lo=0.0, hi=1.0):
return rand_module.uniform(lo, hi)
def prim_randint(lo, hi):
return rand_module.randint(lo, hi)
def prim_gaussian(mean=0.0, std=1.0):
return rand_module.gauss(mean, std)
def prim_sign(x):
if x > 0:
return 1
elif x < 0:
return -1
return 0
def prim_fract(x):
"""Fractional part of x."""
return x - math.floor(x)
PRIMITIVES = {
# Trigonometry
'sin': prim_sin,
'cos': prim_cos,
'tan': prim_tan,
'asin': prim_asin,
'acos': prim_acos,
'atan': prim_atan,
'atan2': prim_atan2,
# Powers and roots
'sqrt': prim_sqrt,
'pow': prim_pow,
'exp': prim_exp,
'log': prim_log,
# Rounding
'abs': prim_abs,
'floor': prim_floor,
'ceil': prim_ceil,
'round': prim_round,
'sign': prim_sign,
'fract': prim_fract,
# Min/max/clamp
'min': prim_min,
'max': prim_max,
'clamp': prim_clamp,
'lerp': prim_lerp,
'smoothstep': prim_smoothstep,
# Random
'random': prim_random,
'randint': prim_randint,
'gaussian': prim_gaussian,
# Constants
'pi': math.pi,
'tau': math.tau,
'e': math.e,
}

View File

@@ -0,0 +1,462 @@
"""
Streaming primitives for video/audio processing.
These primitives handle video source reading and audio analysis,
keeping the interpreter completely generic.
GPU Acceleration:
- Set STREAMING_GPU_PERSIST=1 to output CuPy arrays (frames stay on GPU)
- Hardware video decoding (NVDEC) is used when available
- Dramatically improves performance on GPU nodes
"""
import os
import numpy as np
import subprocess
import json
from pathlib import Path
# Try to import CuPy for GPU acceleration
try:
import cupy as cp
CUPY_AVAILABLE = True
except ImportError:
cp = None
CUPY_AVAILABLE = False
# GPU persistence mode - output CuPy arrays instead of numpy
# Disabled by default until all primitives support GPU frames
GPU_PERSIST = os.environ.get("STREAMING_GPU_PERSIST", "0") == "1" and CUPY_AVAILABLE
# Check for hardware decode support (cached)
_HWDEC_AVAILABLE = None
def _check_hwdec():
"""Check if NVIDIA hardware decode is available."""
global _HWDEC_AVAILABLE
if _HWDEC_AVAILABLE is not None:
return _HWDEC_AVAILABLE
try:
result = subprocess.run(["nvidia-smi"], capture_output=True, timeout=2)
if result.returncode != 0:
_HWDEC_AVAILABLE = False
return False
result = subprocess.run(["ffmpeg", "-hwaccels"], capture_output=True, text=True, timeout=5)
_HWDEC_AVAILABLE = "cuda" in result.stdout
except Exception:
_HWDEC_AVAILABLE = False
return _HWDEC_AVAILABLE
class VideoSource:
"""Video source with persistent streaming pipe for fast sequential reads."""
def __init__(self, path: str, fps: float = 30):
self.path = Path(path)
self.fps = fps # Output fps for the stream
self._frame_size = None
self._duration = None
self._proc = None # Persistent ffmpeg process
self._stream_time = 0.0 # Current position in stream
self._frame_time = 1.0 / fps # Time per frame at output fps
self._last_read_time = -1
self._cached_frame = None
# Check if file exists
if not self.path.exists():
raise FileNotFoundError(f"Video file not found: {self.path}")
# Get video info
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
"-show_streams", str(self.path)]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
raise RuntimeError(f"Failed to probe video '{self.path}': {result.stderr}")
try:
info = json.loads(result.stdout)
except json.JSONDecodeError:
raise RuntimeError(f"Invalid video file or ffprobe failed: {self.path}")
for stream in info.get("streams", []):
if stream.get("codec_type") == "video":
self._frame_size = (stream.get("width", 720), stream.get("height", 720))
# Try direct duration field first
if "duration" in stream:
self._duration = float(stream["duration"])
# Fall back to tags.DURATION (webm format: "00:01:00.124000000")
elif "tags" in stream and "DURATION" in stream["tags"]:
dur_str = stream["tags"]["DURATION"]
parts = dur_str.split(":")
if len(parts) == 3:
h, m, s = parts
self._duration = int(h) * 3600 + int(m) * 60 + float(s)
break
# Fallback: check format duration if stream duration not found
if self._duration is None and "format" in info and "duration" in info["format"]:
self._duration = float(info["format"]["duration"])
if not self._frame_size:
self._frame_size = (720, 720)
import sys
print(f"VideoSource: {self.path.name} duration={self._duration} size={self._frame_size}", file=sys.stderr)
def _start_stream(self, seek_time: float = 0):
"""Start or restart the ffmpeg streaming process.
Uses NVIDIA hardware decoding (NVDEC) when available for better performance.
"""
if self._proc:
self._proc.kill()
self._proc = None
# Check file exists before trying to open
if not self.path.exists():
raise FileNotFoundError(f"Video file not found: {self.path}")
w, h = self._frame_size
# Build ffmpeg command with optional hardware decode
cmd = ["ffmpeg", "-v", "error"]
# Use hardware decode if available (significantly faster)
if _check_hwdec():
cmd.extend(["-hwaccel", "cuda"])
cmd.extend([
"-ss", f"{seek_time:.3f}",
"-i", str(self.path),
"-f", "rawvideo", "-pix_fmt", "rgb24",
"-s", f"{w}x{h}",
"-r", str(self.fps), # Output at specified fps
"-"
])
self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self._stream_time = seek_time
# Check if process started successfully by reading first bit of stderr
import select
import sys
readable, _, _ = select.select([self._proc.stderr], [], [], 0.5)
if readable:
err = self._proc.stderr.read(4096).decode('utf-8', errors='ignore')
if err:
print(f"ffmpeg error for {self.path.name}: {err}", file=sys.stderr)
def _read_frame_from_stream(self):
"""Read one frame from the stream.
Returns CuPy array if GPU_PERSIST is enabled, numpy array otherwise.
"""
w, h = self._frame_size
frame_size = w * h * 3
if not self._proc or self._proc.poll() is not None:
return None
data = self._proc.stdout.read(frame_size)
if len(data) < frame_size:
return None
frame = np.frombuffer(data, dtype=np.uint8).reshape((h, w, 3)).copy()
# Transfer to GPU if persistence mode enabled
if GPU_PERSIST:
return cp.asarray(frame)
return frame
def read(self) -> np.ndarray:
"""Read frame (uses last cached or t=0)."""
if self._cached_frame is not None:
return self._cached_frame
return self.read_at(0)
def read_at(self, t: float) -> np.ndarray:
"""Read frame at specific time using streaming with smart seeking."""
# Cache check - return same frame for same time
if t == self._last_read_time and self._cached_frame is not None:
return self._cached_frame
w, h = self._frame_size
# Loop time if video is shorter
seek_time = t
if self._duration and self._duration > 0:
seek_time = t % self._duration
# If we're within 0.1s of the end, wrap to beginning to avoid EOF issues
if seek_time > self._duration - 0.1:
seek_time = 0.0
# Decide whether to seek or continue streaming
# Seek if: no stream, going backwards (more than 1 frame), or jumping more than 2 seconds ahead
# Allow small backward tolerance to handle floating point and timing jitter
need_seek = (
self._proc is None or
self._proc.poll() is not None or
seek_time < self._stream_time - self._frame_time or # More than 1 frame backward
seek_time > self._stream_time + 2.0
)
if need_seek:
import sys
reason = "no proc" if self._proc is None else "proc dead" if self._proc.poll() is not None else "backward" if seek_time < self._stream_time else "jump"
print(f"SEEK {self.path.name}: t={t:.4f} seek={seek_time:.4f} stream={self._stream_time:.4f} ({reason})", file=sys.stderr)
self._start_stream(seek_time)
# Skip frames to reach target time
skip_retries = 0
while self._stream_time + self._frame_time <= seek_time:
frame = self._read_frame_from_stream()
if frame is None:
# Stream ended or failed - restart from seek point
import time
skip_retries += 1
if skip_retries > 3:
# Give up skipping, just start fresh at seek_time
self._start_stream(seek_time)
time.sleep(0.1)
break
self._start_stream(seek_time)
time.sleep(0.05)
continue
self._stream_time += self._frame_time
skip_retries = 0 # Reset on successful read
# Read the target frame with retry logic
frame = None
max_retries = 3
for attempt in range(max_retries):
frame = self._read_frame_from_stream()
if frame is not None:
break
# Stream failed - try restarting
import sys
import time
print(f"RETRY {self.path.name}: attempt {attempt+1}/{max_retries} at t={t:.2f}", file=sys.stderr)
# Check for ffmpeg errors
if self._proc and self._proc.stderr:
try:
import select
readable, _, _ = select.select([self._proc.stderr], [], [], 0.1)
if readable:
err = self._proc.stderr.read(4096).decode('utf-8', errors='ignore')
if err:
print(f"ffmpeg error: {err}", file=sys.stderr)
except:
pass
# Wait a bit and restart
time.sleep(0.1)
self._start_stream(seek_time)
# Give ffmpeg time to start
time.sleep(0.1)
if frame is None:
import sys
raise RuntimeError(f"Failed to read video frame from {self.path.name} at t={t:.2f} after {max_retries} retries")
else:
self._stream_time += self._frame_time
self._last_read_time = t
self._cached_frame = frame
return frame
def skip(self):
"""No-op for seek-based reading."""
pass
@property
def size(self):
return self._frame_size
def close(self):
if self._proc:
self._proc.kill()
self._proc = None
class AudioAnalyzer:
"""Audio analyzer for energy and beat detection."""
def __init__(self, path: str, sample_rate: int = 22050):
self.path = Path(path)
self.sample_rate = sample_rate
# Check if file exists
if not self.path.exists():
raise FileNotFoundError(f"Audio file not found: {self.path}")
# Load audio via ffmpeg
cmd = ["ffmpeg", "-v", "error", "-i", str(self.path),
"-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"]
result = subprocess.run(cmd, capture_output=True)
if result.returncode != 0:
raise RuntimeError(f"Failed to load audio '{self.path}': {result.stderr.decode()}")
self._audio = np.frombuffer(result.stdout, dtype=np.float32)
if len(self._audio) == 0:
raise RuntimeError(f"Audio file is empty or invalid: {self.path}")
# Get duration
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
"-show_format", str(self.path)]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
raise RuntimeError(f"Failed to probe audio '{self.path}': {result.stderr}")
info = json.loads(result.stdout)
self.duration = float(info.get("format", {}).get("duration", 60))
# Beat detection state
self._flux_history = []
self._last_beat_time = -1
self._beat_count = 0
self._last_beat_check_time = -1
# Cache beat result for current time (so multiple scans see same result)
self._beat_cache_time = -1
self._beat_cache_result = False
def get_energy(self, t: float) -> float:
"""Get energy level at time t (0-1)."""
idx = int(t * self.sample_rate)
start = max(0, idx - 512)
end = min(len(self._audio), idx + 512)
if start >= end:
return 0.0
return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0)
def get_beat(self, t: float) -> bool:
"""Check if there's a beat at time t."""
# Return cached result if same time (multiple scans query same frame)
if t == self._beat_cache_time:
return self._beat_cache_result
idx = int(t * self.sample_rate)
size = 2048
start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2)
if end - start < size/2:
self._beat_cache_time = t
self._beat_cache_result = False
return False
curr = self._audio[start:end]
pstart, pend = max(0, start - 512), max(0, end - 512)
if pend <= pstart:
self._beat_cache_time = t
self._beat_cache_result = False
return False
prev = self._audio[pstart:pend]
curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr))))
prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev))))
n = min(len(curr_spec), len(prev_spec))
flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1)
self._flux_history.append((t, flux))
if len(self._flux_history) > 50:
self._flux_history = self._flux_history[-50:]
if len(self._flux_history) < 5:
self._beat_cache_time = t
self._beat_cache_result = False
return False
recent = [f for _, f in self._flux_history[-20:]]
threshold = np.mean(recent) + 1.5 * np.std(recent)
is_beat = flux > threshold and (t - self._last_beat_time) > 0.1
if is_beat:
self._last_beat_time = t
if t > self._last_beat_check_time:
self._beat_count += 1
self._last_beat_check_time = t
# Cache result for this time
self._beat_cache_time = t
self._beat_cache_result = is_beat
return is_beat
def get_beat_count(self, t: float) -> int:
"""Get cumulative beat count up to time t."""
# Ensure beat detection has run up to this time
self.get_beat(t)
return self._beat_count
# === Primitives ===
def prim_make_video_source(path: str, fps: float = 30):
"""Create a video source from a file path."""
return VideoSource(path, fps)
def prim_source_read(source: VideoSource, t: float = None):
"""Read a frame from a video source."""
import sys
if t is not None:
frame = source.read_at(t)
# Debug: show source and time
if int(t * 10) % 10 == 0: # Every second
print(f"READ {source.path.name}: t={t:.2f} stream={source._stream_time:.2f}", file=sys.stderr)
return frame
return source.read()
def prim_source_skip(source: VideoSource):
"""Skip a frame (keep pipe in sync)."""
source.skip()
def prim_source_size(source: VideoSource):
"""Get (width, height) of source."""
return source.size
def prim_make_audio_analyzer(path: str):
"""Create an audio analyzer from a file path."""
return AudioAnalyzer(path)
def prim_audio_energy(analyzer: AudioAnalyzer, t: float) -> float:
"""Get energy level (0-1) at time t."""
return analyzer.get_energy(t)
def prim_audio_beat(analyzer: AudioAnalyzer, t: float) -> bool:
"""Check if there's a beat at time t."""
return analyzer.get_beat(t)
def prim_audio_beat_count(analyzer: AudioAnalyzer, t: float) -> int:
"""Get cumulative beat count up to time t."""
return analyzer.get_beat_count(t)
def prim_audio_duration(analyzer: AudioAnalyzer) -> float:
"""Get audio duration in seconds."""
return analyzer.duration
# Export primitives
PRIMITIVES = {
# Video source
'make-video-source': prim_make_video_source,
'source-read': prim_source_read,
'source-skip': prim_source_skip,
'source-size': prim_source_size,
# Audio analyzer
'make-audio-analyzer': prim_make_audio_analyzer,
'audio-energy': prim_audio_energy,
'audio-beat': prim_audio_beat,
'audio-beat-count': prim_audio_beat_count,
'audio-duration': prim_audio_duration,
}

3043
sexp_effects/primitives.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,236 @@
#!/usr/bin/env python3
"""
Test the S-expression effect interpreter.
"""
import numpy as np
import sys
from pathlib import Path
# Add parent to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sexp_effects import (
get_interpreter,
load_effects_dir,
run_effect,
list_effects,
parse,
)
def test_parser():
"""Test S-expression parser."""
print("Testing parser...")
# Simple expressions
assert parse("42") == 42
assert parse("3.14") == 3.14
assert parse('"hello"') == "hello"
assert parse("true") == True
# Lists
assert parse("(+ 1 2)")[0].name == "+"
assert parse("(+ 1 2)")[1] == 1
# Nested
expr = parse("(define x (+ 1 2))")
assert expr[0].name == "define"
print(" Parser OK")
def test_interpreter_basics():
"""Test basic interpreter operations."""
print("Testing interpreter basics...")
interp = get_interpreter()
# Math
assert interp.eval(parse("(+ 1 2)")) == 3
assert interp.eval(parse("(* 3 4)")) == 12
assert interp.eval(parse("(- 10 3)")) == 7
# Comparison
assert interp.eval(parse("(< 1 2)")) == True
assert interp.eval(parse("(> 1 2)")) == False
# Let binding
assert interp.eval(parse("(let ((x 5)) x)")) == 5
assert interp.eval(parse("(let ((x 5) (y 3)) (+ x y))")) == 8
# Lambda
result = interp.eval(parse("((lambda (x) (* x 2)) 5)"))
assert result == 10
# If
assert interp.eval(parse("(if true 1 2)")) == 1
assert interp.eval(parse("(if false 1 2)")) == 2
print(" Interpreter basics OK")
def test_primitives():
"""Test image primitives."""
print("Testing primitives...")
interp = get_interpreter()
# Create test image
img = np.zeros((100, 100, 3), dtype=np.uint8)
img[50, 50] = [255, 128, 64]
interp.global_env.set('test_img', img)
# Width/height
assert interp.eval(parse("(width test_img)")) == 100
assert interp.eval(parse("(height test_img)")) == 100
# Pixel
pixel = interp.eval(parse("(pixel test_img 50 50)"))
assert pixel == [255, 128, 64]
# RGB
color = interp.eval(parse("(rgb 100 150 200)"))
assert color == [100, 150, 200]
# Luminance
lum = interp.eval(parse("(luminance (rgb 100 100 100))"))
assert abs(lum - 100) < 1
print(" Primitives OK")
def test_effect_loading():
"""Test loading effects from .sexp files."""
print("Testing effect loading...")
# Load all effects
effects_dir = Path(__file__).parent / "effects"
load_effects_dir(str(effects_dir))
effects = list_effects()
print(f" Loaded {len(effects)} effects: {', '.join(sorted(effects))}")
assert len(effects) > 0
print(" Effect loading OK")
def test_effect_execution():
"""Test running effects on images."""
print("Testing effect execution...")
# Create test image
img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
# Load effects
effects_dir = Path(__file__).parent / "effects"
load_effects_dir(str(effects_dir))
# Test each effect
effects = list_effects()
passed = 0
failed = []
for name in sorted(effects):
try:
result, state = run_effect(name, img.copy(), {'_time': 0.5}, {})
assert isinstance(result, np.ndarray)
assert result.shape == img.shape
passed += 1
print(f" {name}: OK")
except Exception as e:
failed.append((name, str(e)))
print(f" {name}: FAILED - {e}")
print(f" Passed: {passed}/{len(effects)}")
if failed:
print(f" Failed: {[f[0] for f in failed]}")
return passed, failed
def test_ascii_fx_zone():
"""Test ascii_fx_zone effect with zone expressions."""
print("Testing ascii_fx_zone...")
interp = get_interpreter()
# Load the effect
effects_dir = Path(__file__).parent / "effects"
load_effects_dir(str(effects_dir))
# Create gradient test frame
frame = np.zeros((120, 160, 3), dtype=np.uint8)
for x in range(160):
frame[:, x] = int(x / 160 * 255)
frame = np.stack([frame[:,:,0]]*3, axis=2)
# Test 1: Basic without expressions
result, _ = run_effect('ascii_fx_zone', frame, {'cols': 20}, {})
assert result.shape == frame.shape
print(" Basic run: OK")
# Test 2: With zone-lum expression
expr = parse('(* zone-lum 180)')
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_hue': expr
}, {})
assert result.shape == frame.shape
print(" Zone-lum expression: OK")
# Test 3: With multiple expressions
scale_expr = parse('(+ 0.5 (* zone-lum 0.5))')
rot_expr = parse('(* zone-row-norm 30)')
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_scale': scale_expr,
'char_rotation': rot_expr
}, {})
assert result.shape == frame.shape
print(" Multiple expressions: OK")
# Test 4: With numeric literals
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_hue': 90,
'char_scale': 1.2
}, {})
assert result.shape == frame.shape
print(" Numeric literals: OK")
# Test 5: Zone position expressions
col_expr = parse('(* zone-col-norm 360)')
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_hue': col_expr
}, {})
assert result.shape == frame.shape
print(" Zone position expression: OK")
print(" ascii_fx_zone OK")
def main():
print("=" * 60)
print("S-Expression Effect Interpreter Tests")
print("=" * 60)
test_parser()
test_interpreter_basics()
test_primitives()
test_effect_loading()
test_ascii_fx_zone()
passed, failed = test_effect_execution()
print("=" * 60)
if not failed:
print("All tests passed!")
else:
print(f"Tests completed with {len(failed)} failures")
print("=" * 60)
if __name__ == "__main__":
main()

44
streaming/__init__.py Normal file
View File

@@ -0,0 +1,44 @@
"""
Streaming video compositor for real-time effect processing.
This module provides a frame-by-frame streaming architecture that:
- Reads from multiple video sources with automatic looping
- Applies effects inline (no intermediate files)
- Composites layers with time-varying weights
- Outputs to display, file, or stream
Usage:
from streaming import StreamingCompositor, VideoSource, AudioAnalyzer
compositor = StreamingCompositor(
sources=["video1.mp4", "video2.mp4"],
effects_per_source=[...],
compositor_config={...},
)
# With live audio
audio = AudioAnalyzer(device=0)
compositor.run(output="output.mp4", duration=60, audio=audio)
# With preview window
compositor.run(output="preview", duration=60)
Backends:
- numpy: Works everywhere, ~3-5 fps (default)
- glsl: Requires GPU, 30+ fps real-time (future)
"""
from .sources import VideoSource, ImageSource
from .compositor import StreamingCompositor
from .backends import NumpyBackend, get_backend
from .output import DisplayOutput, FileOutput
__all__ = [
"StreamingCompositor",
"VideoSource",
"ImageSource",
"NumpyBackend",
"get_backend",
"DisplayOutput",
"FileOutput",
]

486
streaming/audio.py Normal file
View File

@@ -0,0 +1,486 @@
"""
Live audio analysis for reactive effects.
Provides real-time audio features:
- Energy (RMS amplitude)
- Beat detection
- Frequency bands (bass, mid, high)
"""
import numpy as np
from typing import Optional
import threading
import time
class AudioAnalyzer:
"""
Real-time audio analyzer using sounddevice.
Captures audio from microphone/line-in and computes
features in real-time for effect parameter bindings.
Example:
analyzer = AudioAnalyzer(device=0)
analyzer.start()
# In compositor loop:
energy = analyzer.get_energy()
beat = analyzer.get_beat()
analyzer.stop()
"""
def __init__(
self,
device: int = None,
sample_rate: int = 44100,
block_size: int = 1024,
buffer_seconds: float = 0.5,
):
"""
Initialize audio analyzer.
Args:
device: Audio input device index (None = default)
sample_rate: Audio sample rate
block_size: Samples per block
buffer_seconds: Ring buffer duration
"""
self.sample_rate = sample_rate
self.block_size = block_size
self.device = device
# Ring buffer for recent audio
buffer_size = int(sample_rate * buffer_seconds)
self._buffer = np.zeros(buffer_size, dtype=np.float32)
self._buffer_pos = 0
self._lock = threading.Lock()
# Beat detection state
self._last_energy = 0
self._energy_history = []
self._last_beat_time = 0
self._beat_threshold = 1.5 # Energy ratio for beat detection
self._min_beat_interval = 0.1 # Min seconds between beats
# Stream state
self._stream = None
self._running = False
def _audio_callback(self, indata, frames, time_info, status):
"""Called by sounddevice for each audio block."""
with self._lock:
# Add to ring buffer
data = indata[:, 0] if len(indata.shape) > 1 else indata
n = len(data)
if self._buffer_pos + n <= len(self._buffer):
self._buffer[self._buffer_pos:self._buffer_pos + n] = data
else:
# Wrap around
first = len(self._buffer) - self._buffer_pos
self._buffer[self._buffer_pos:] = data[:first]
self._buffer[:n - first] = data[first:]
self._buffer_pos = (self._buffer_pos + n) % len(self._buffer)
def start(self):
"""Start audio capture."""
try:
import sounddevice as sd
except ImportError:
print("Warning: sounddevice not installed. Audio analysis disabled.")
print("Install with: pip install sounddevice")
return
self._stream = sd.InputStream(
device=self.device,
channels=1,
samplerate=self.sample_rate,
blocksize=self.block_size,
callback=self._audio_callback,
)
self._stream.start()
self._running = True
def stop(self):
"""Stop audio capture."""
if self._stream:
self._stream.stop()
self._stream.close()
self._stream = None
self._running = False
def get_energy(self) -> float:
"""
Get current audio energy (RMS amplitude).
Returns:
Energy value normalized to 0-1 range (approximately)
"""
with self._lock:
# Use recent samples
recent = 2048
if self._buffer_pos >= recent:
data = self._buffer[self._buffer_pos - recent:self._buffer_pos]
else:
data = np.concatenate([
self._buffer[-(recent - self._buffer_pos):],
self._buffer[:self._buffer_pos]
])
# RMS energy
rms = np.sqrt(np.mean(data ** 2))
# Normalize (typical mic input is quite low)
normalized = min(1.0, rms * 10)
return normalized
def get_beat(self) -> bool:
"""
Detect if current moment is a beat.
Simple onset detection based on energy spikes.
Returns:
True if beat detected, False otherwise
"""
current_energy = self.get_energy()
now = time.time()
# Update energy history
self._energy_history.append(current_energy)
if len(self._energy_history) > 20:
self._energy_history.pop(0)
# Need enough history
if len(self._energy_history) < 5:
self._last_energy = current_energy
return False
# Average recent energy
avg_energy = np.mean(self._energy_history[:-1])
# Beat if current energy is significantly above average
is_beat = (
current_energy > avg_energy * self._beat_threshold and
now - self._last_beat_time > self._min_beat_interval and
current_energy > self._last_energy # Rising edge
)
if is_beat:
self._last_beat_time = now
self._last_energy = current_energy
return is_beat
def get_spectrum(self, bands: int = 3) -> np.ndarray:
"""
Get frequency spectrum divided into bands.
Args:
bands: Number of frequency bands (default 3: bass, mid, high)
Returns:
Array of band energies, normalized to 0-1
"""
with self._lock:
# Use recent samples for FFT
n = 2048
if self._buffer_pos >= n:
data = self._buffer[self._buffer_pos - n:self._buffer_pos]
else:
data = np.concatenate([
self._buffer[-(n - self._buffer_pos):],
self._buffer[:self._buffer_pos]
])
# FFT
fft = np.abs(np.fft.rfft(data * np.hanning(len(data))))
# Divide into bands
band_size = len(fft) // bands
result = np.zeros(bands)
for i in range(bands):
start = i * band_size
end = start + band_size
result[i] = np.mean(fft[start:end])
# Normalize
max_val = np.max(result)
if max_val > 0:
result = result / max_val
return result
@property
def is_running(self) -> bool:
return self._running
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
class FileAudioAnalyzer:
"""
Audio analyzer that reads from a file (for testing/development).
Pre-computes analysis and plays back in sync with video.
"""
def __init__(self, path: str, analysis_data: dict = None):
"""
Initialize from audio file.
Args:
path: Path to audio file
analysis_data: Pre-computed analysis (times, values, etc.)
"""
self.path = path
self.analysis_data = analysis_data or {}
self._current_time = 0
def set_time(self, t: float):
"""Set current playback time."""
self._current_time = t
def get_energy(self) -> float:
"""Get energy at current time from pre-computed data."""
track = self.analysis_data.get("energy", {})
return self._interpolate(track, self._current_time)
def get_beat(self) -> bool:
"""Check if current time is near a beat."""
track = self.analysis_data.get("beats", {})
times = track.get("times", [])
# Check if we're within 50ms of a beat
for beat_time in times:
if abs(beat_time - self._current_time) < 0.05:
return True
return False
def _interpolate(self, track: dict, t: float) -> float:
"""Interpolate value at time t."""
times = track.get("times", [])
values = track.get("values", [])
if not times or not values:
return 0.0
if t <= times[0]:
return values[0]
if t >= times[-1]:
return values[-1]
# Find bracket and interpolate
for i in range(len(times) - 1):
if times[i] <= t <= times[i + 1]:
alpha = (t - times[i]) / (times[i + 1] - times[i])
return values[i] * (1 - alpha) + values[i + 1] * alpha
return values[-1]
@property
def is_running(self) -> bool:
return True
class StreamingAudioAnalyzer:
"""
Real-time audio analyzer that streams from a file.
Reads audio in sync with video time and computes features on-the-fly.
No pre-computation needed - analysis happens as frames are processed.
"""
def __init__(self, path: str, sample_rate: int = 22050, hop_length: int = 512):
"""
Initialize streaming audio analyzer.
Args:
path: Path to audio file
sample_rate: Sample rate for analysis
hop_length: Hop length for feature extraction
"""
import subprocess
import json
self.path = path
self.sample_rate = sample_rate
self.hop_length = hop_length
self._current_time = 0.0
# Get audio duration
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
"-show_format", str(path)]
result = subprocess.run(cmd, capture_output=True, text=True)
info = json.loads(result.stdout)
self.duration = float(info["format"]["duration"])
# Audio buffer and state
self._audio_data = None
self._energy_history = []
self._last_energy = 0
self._last_beat_time = -1
self._beat_threshold = 1.5
self._min_beat_interval = 0.15
# Load audio lazily
self._loaded = False
def _load_audio(self):
"""Load audio data on first use."""
if self._loaded:
return
import subprocess
# Use ffmpeg to decode audio to raw PCM
cmd = [
"ffmpeg", "-v", "quiet",
"-i", str(self.path),
"-f", "f32le", # 32-bit float, little-endian
"-ac", "1", # mono
"-ar", str(self.sample_rate),
"-"
]
result = subprocess.run(cmd, capture_output=True)
self._audio_data = np.frombuffer(result.stdout, dtype=np.float32)
self._loaded = True
def set_time(self, t: float):
"""Set current playback time."""
self._current_time = t
def get_energy(self) -> float:
"""Compute energy at current time."""
self._load_audio()
if self._audio_data is None or len(self._audio_data) == 0:
return 0.0
# Get sample index for current time
sample_idx = int(self._current_time * self.sample_rate)
window_size = self.hop_length * 2
start = max(0, sample_idx - window_size // 2)
end = min(len(self._audio_data), sample_idx + window_size // 2)
if start >= end:
return 0.0
# RMS energy
chunk = self._audio_data[start:end]
rms = np.sqrt(np.mean(chunk ** 2))
# Normalize to 0-1 range (approximate)
energy = min(1.0, rms * 3.0)
self._last_energy = energy
return energy
def get_beat(self) -> bool:
"""Detect beat using spectral flux (change in frequency content)."""
self._load_audio()
if self._audio_data is None or len(self._audio_data) == 0:
return False
# Get audio chunks for current and previous frame
sample_idx = int(self._current_time * self.sample_rate)
chunk_size = self.hop_length * 2
# Current chunk
start = max(0, sample_idx - chunk_size // 2)
end = min(len(self._audio_data), sample_idx + chunk_size // 2)
if end - start < chunk_size // 2:
return False
current_chunk = self._audio_data[start:end]
# Previous chunk (one hop back)
prev_start = max(0, start - self.hop_length)
prev_end = max(0, end - self.hop_length)
if prev_end <= prev_start:
return False
prev_chunk = self._audio_data[prev_start:prev_end]
# Compute spectra
current_spec = np.abs(np.fft.rfft(current_chunk * np.hanning(len(current_chunk))))
prev_spec = np.abs(np.fft.rfft(prev_chunk * np.hanning(len(prev_chunk))))
# Spectral flux: sum of positive differences (onset = new frequencies appearing)
min_len = min(len(current_spec), len(prev_spec))
diff = current_spec[:min_len] - prev_spec[:min_len]
flux = np.sum(np.maximum(0, diff)) # Only count increases
# Normalize by spectrum size
flux = flux / (min_len + 1)
# Update flux history
self._energy_history.append((self._current_time, flux))
while self._energy_history and self._energy_history[0][0] < self._current_time - 1.5:
self._energy_history.pop(0)
if len(self._energy_history) < 3:
return False
# Adaptive threshold based on recent flux values
flux_values = [f for t, f in self._energy_history]
mean_flux = np.mean(flux_values)
std_flux = np.std(flux_values) + 0.001 # Avoid division by zero
# Beat if flux is above mean (more sensitive threshold)
threshold = mean_flux + std_flux * 0.3 # Lower = more sensitive
min_interval = 0.1 # Allow up to 600 BPM
time_ok = self._current_time - self._last_beat_time > min_interval
is_beat = flux > threshold and time_ok
if is_beat:
self._last_beat_time = self._current_time
return is_beat
def get_spectrum(self, bands: int = 3) -> np.ndarray:
"""Get frequency spectrum at current time."""
self._load_audio()
if self._audio_data is None or len(self._audio_data) == 0:
return np.zeros(bands)
sample_idx = int(self._current_time * self.sample_rate)
n = 2048
start = max(0, sample_idx - n // 2)
end = min(len(self._audio_data), sample_idx + n // 2)
if end - start < n // 2:
return np.zeros(bands)
chunk = self._audio_data[start:end]
# FFT
fft = np.abs(np.fft.rfft(chunk * np.hanning(len(chunk))))
# Divide into bands
band_size = len(fft) // bands
result = np.zeros(bands)
for i in range(bands):
s, e = i * band_size, (i + 1) * band_size
result[i] = np.mean(fft[s:e])
# Normalize
max_val = np.max(result)
if max_val > 0:
result = result / max_val
return result
@property
def is_running(self) -> bool:
return True

308
streaming/backends.py Normal file
View File

@@ -0,0 +1,308 @@
"""
Effect processing backends.
Provides abstraction over different rendering backends:
- numpy: CPU-based, works everywhere, ~3-5 fps
- glsl: GPU-based, requires OpenGL, 30+ fps (future)
"""
import numpy as np
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from pathlib import Path
class Backend(ABC):
"""Abstract base class for effect processing backends."""
@abstractmethod
def process_frame(
self,
frames: List[np.ndarray],
effects_per_frame: List[List[Dict]],
compositor_config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""
Process multiple input frames through effects and composite.
Args:
frames: List of input frames (one per source)
effects_per_frame: List of effect chains (one per source)
compositor_config: How to blend the layers
t: Current time in seconds
analysis_data: Analysis data for binding resolution
Returns:
Composited output frame
"""
pass
@abstractmethod
def load_effect(self, effect_path: Path) -> Any:
"""Load an effect definition."""
pass
class NumpyBackend(Backend):
"""
CPU-based effect processing using NumPy.
Uses existing sexp_effects interpreter for effect execution.
Works on any system, but limited to ~3-5 fps for complex effects.
"""
def __init__(self, recipe_dir: Path = None, minimal_primitives: bool = True):
self.recipe_dir = recipe_dir or Path(".")
self.minimal_primitives = minimal_primitives
self._interpreter = None
self._loaded_effects = {}
def _get_interpreter(self):
"""Lazy-load the sexp interpreter."""
if self._interpreter is None:
from sexp_effects import get_interpreter
self._interpreter = get_interpreter(minimal_primitives=self.minimal_primitives)
return self._interpreter
def load_effect(self, effect_path: Path) -> Any:
"""Load an effect from sexp file."""
effect_key = str(effect_path)
if effect_key not in self._loaded_effects:
interp = self._get_interpreter()
interp.load_effect(str(effect_path))
self._loaded_effects[effect_key] = effect_path.stem
return self._loaded_effects[effect_key]
def _resolve_binding(self, value: Any, t: float, analysis_data: Dict) -> Any:
"""Resolve a parameter binding to its value at time t."""
if not isinstance(value, dict):
return value
if "_binding" in value or "_bind" in value:
source = value.get("source") or value.get("_bind")
feature = value.get("feature", "values")
range_map = value.get("range")
track = analysis_data.get(source, {})
times = track.get("times", [])
values = track.get("values", [])
if not times or not values:
return 0.0
# Find value at time t (linear interpolation)
if t <= times[0]:
val = values[0]
elif t >= times[-1]:
val = values[-1]
else:
# Binary search for bracket
for i in range(len(times) - 1):
if times[i] <= t <= times[i + 1]:
alpha = (t - times[i]) / (times[i + 1] - times[i])
val = values[i] * (1 - alpha) + values[i + 1] * alpha
break
else:
val = values[-1]
# Apply range mapping
if range_map and len(range_map) == 2:
val = range_map[0] + val * (range_map[1] - range_map[0])
return val
return value
def _apply_effect(
self,
frame: np.ndarray,
effect_name: str,
params: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""Apply a single effect to a frame."""
# Resolve bindings in params
resolved_params = {"_time": t}
for key, value in params.items():
if key in ("effect", "effect_path", "cid", "analysis_refs"):
continue
resolved_params[key] = self._resolve_binding(value, t, analysis_data)
# Try fast native effects first
result = self._apply_native_effect(frame, effect_name, resolved_params)
if result is not None:
return result
# Fall back to sexp interpreter for complex effects
interp = self._get_interpreter()
if effect_name in interp.effects:
result, _ = interp.run_effect(effect_name, frame, resolved_params, {})
return result
# Unknown effect - pass through
return frame
def _apply_native_effect(
self,
frame: np.ndarray,
effect_name: str,
params: Dict,
) -> Optional[np.ndarray]:
"""Fast native numpy effects for real-time streaming."""
import cv2
if effect_name == "zoom":
amount = float(params.get("amount", 1.0))
if abs(amount - 1.0) < 0.01:
return frame
h, w = frame.shape[:2]
# Crop center and resize
new_w, new_h = int(w / amount), int(h / amount)
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = frame[y1:y1+new_h, x1:x1+new_w]
return cv2.resize(cropped, (w, h))
elif effect_name == "rotate":
angle = float(params.get("angle", 0))
if abs(angle) < 0.5:
return frame
h, w = frame.shape[:2]
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
return cv2.warpAffine(frame, matrix, (w, h))
elif effect_name == "brightness":
amount = float(params.get("amount", 1.0))
return np.clip(frame * amount, 0, 255).astype(np.uint8)
elif effect_name == "invert":
amount = float(params.get("amount", 1.0))
if amount < 0.5:
return frame
return 255 - frame
# Not a native effect
return None
def process_frame(
self,
frames: List[np.ndarray],
effects_per_frame: List[List[Dict]],
compositor_config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""
Process frames through effects and composite.
"""
if not frames:
return np.zeros((720, 1280, 3), dtype=np.uint8)
processed = []
# Apply effects to each input frame
for i, (frame, effects) in enumerate(zip(frames, effects_per_frame)):
result = frame.copy()
for effect_config in effects:
effect_name = effect_config.get("effect", "")
if effect_name:
result = self._apply_effect(
result, effect_name, effect_config, t, analysis_data
)
processed.append(result)
# Composite layers
if len(processed) == 1:
return processed[0]
return self._composite(processed, compositor_config, t, analysis_data)
def _composite(
self,
frames: List[np.ndarray],
config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""Composite multiple frames into one."""
mode = config.get("mode", "alpha")
weights = config.get("weights", [1.0 / len(frames)] * len(frames))
# Resolve weight bindings
resolved_weights = []
for w in weights:
resolved_weights.append(self._resolve_binding(w, t, analysis_data))
# Normalize weights
total = sum(resolved_weights)
if total > 0:
resolved_weights = [w / total for w in resolved_weights]
else:
resolved_weights = [1.0 / len(frames)] * len(frames)
# Resize frames to match first frame
target_h, target_w = frames[0].shape[:2]
resized = []
for frame in frames:
if frame.shape[:2] != (target_h, target_w):
import cv2
frame = cv2.resize(frame, (target_w, target_h))
resized.append(frame.astype(np.float32))
# Weighted blend
result = np.zeros_like(resized[0])
for frame, weight in zip(resized, resolved_weights):
result += frame * weight
return np.clip(result, 0, 255).astype(np.uint8)
class GLSLBackend(Backend):
"""
GPU-based effect processing using OpenGL/GLSL.
Requires GPU with OpenGL 3.3+ support (or Mesa software renderer).
Achieves 30+ fps real-time processing.
TODO: Implement when ready for GPU acceleration.
"""
def __init__(self):
raise NotImplementedError(
"GLSL backend not yet implemented. Use NumpyBackend for now."
)
def load_effect(self, effect_path: Path) -> Any:
pass
def process_frame(
self,
frames: List[np.ndarray],
effects_per_frame: List[List[Dict]],
compositor_config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
pass
def get_backend(name: str = "numpy", **kwargs) -> Backend:
"""
Get a backend by name.
Args:
name: "numpy" or "glsl"
**kwargs: Backend-specific options
Returns:
Backend instance
"""
if name == "numpy":
return NumpyBackend(**kwargs)
elif name == "glsl":
return GLSLBackend(**kwargs)
else:
raise ValueError(f"Unknown backend: {name}")

595
streaming/compositor.py Normal file
View File

@@ -0,0 +1,595 @@
"""
Streaming video compositor.
Main entry point for the streaming pipeline. Combines:
- Multiple video sources (with looping)
- Per-source effect chains
- Layer compositing
- Optional live audio analysis
- Output to display/file/stream
"""
import time
import sys
import numpy as np
from typing import List, Dict, Any, Optional, Union
from pathlib import Path
from .sources import Source, VideoSource
from .backends import Backend, NumpyBackend, get_backend
from .output import Output, DisplayOutput, FileOutput, MultiOutput
class StreamingCompositor:
"""
Real-time streaming video compositor.
Reads frames from multiple sources, applies effects, composites layers,
and outputs the result - all frame-by-frame without intermediate files.
Example:
compositor = StreamingCompositor(
sources=["video1.mp4", "video2.mp4"],
effects_per_source=[
[{"effect": "rotate", "angle": 45}],
[{"effect": "zoom", "amount": 1.5}],
],
compositor_config={"mode": "alpha", "weights": [0.5, 0.5]},
)
compositor.run(output="preview", duration=60)
"""
def __init__(
self,
sources: List[Union[str, Source]],
effects_per_source: List[List[Dict]] = None,
compositor_config: Dict = None,
analysis_data: Dict = None,
backend: str = "numpy",
recipe_dir: Path = None,
fps: float = 30,
audio_source: str = None,
):
"""
Initialize the streaming compositor.
Args:
sources: List of video paths or Source objects
effects_per_source: List of effect chains, one per source
compositor_config: How to blend layers (mode, weights)
analysis_data: Pre-computed analysis data for bindings
backend: "numpy" or "glsl"
recipe_dir: Directory for resolving relative effect paths
fps: Output frame rate
audio_source: Path to audio file for streaming analysis
"""
self.fps = fps
self.recipe_dir = recipe_dir or Path(".")
self.analysis_data = analysis_data or {}
# Initialize streaming audio analyzer if audio source provided
self._audio_analyzer = None
self._audio_source = audio_source
if audio_source:
from .audio import StreamingAudioAnalyzer
self._audio_analyzer = StreamingAudioAnalyzer(audio_source)
print(f"Streaming audio: {audio_source}", file=sys.stderr)
# Initialize sources
self.sources: List[Source] = []
for src in sources:
if isinstance(src, Source):
self.sources.append(src)
elif isinstance(src, (str, Path)):
self.sources.append(VideoSource(str(src), target_fps=fps))
else:
raise ValueError(f"Unknown source type: {type(src)}")
# Effect chains (default: no effects)
self.effects_per_source = effects_per_source or [[] for _ in self.sources]
if len(self.effects_per_source) != len(self.sources):
raise ValueError(
f"effects_per_source length ({len(self.effects_per_source)}) "
f"must match sources length ({len(self.sources)})"
)
# Compositor config (default: equal blend)
self.compositor_config = compositor_config or {
"mode": "alpha",
"weights": [1.0 / len(self.sources)] * len(self.sources),
}
# Initialize backend
self.backend: Backend = get_backend(
backend,
recipe_dir=self.recipe_dir,
)
# Load effects
self._load_effects()
def _load_effects(self):
"""Pre-load all effect definitions."""
for effects in self.effects_per_source:
for effect_config in effects:
effect_path = effect_config.get("effect_path")
if effect_path:
full_path = self.recipe_dir / effect_path
if full_path.exists():
self.backend.load_effect(full_path)
def _create_output(
self,
output: Union[str, Output],
size: tuple,
) -> Output:
"""Create output target from string or Output object."""
if isinstance(output, Output):
return output
if output == "preview":
return DisplayOutput("Streaming Preview", size,
audio_source=self._audio_source, fps=self.fps)
elif output == "null":
from .output import NullOutput
return NullOutput()
elif isinstance(output, str):
return FileOutput(output, size, fps=self.fps, audio_source=self._audio_source)
else:
raise ValueError(f"Unknown output type: {output}")
def run(
self,
output: Union[str, Output] = "preview",
duration: float = None,
audio_analyzer=None,
show_fps: bool = True,
recipe_executor=None,
):
"""
Run the streaming compositor.
Args:
output: Output target - "preview", filename, or Output object
duration: Duration in seconds (None = run until quit)
audio_analyzer: Optional AudioAnalyzer for live audio reactivity
show_fps: Show FPS counter in console
recipe_executor: Optional StreamingRecipeExecutor for full recipe logic
"""
# Determine output size from first source
output_size = self.sources[0].size
# Create output
out = self._create_output(output, output_size)
# Determine duration
if duration is None:
# Run until stopped (or min source duration if not looping)
duration = min(s.duration for s in self.sources)
if duration == float('inf'):
duration = 3600 # 1 hour max for live sources
total_frames = int(duration * self.fps)
frame_time = 1.0 / self.fps
print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr)
print(f"Duration: {duration:.1f}s, {total_frames} frames @ {self.fps}fps", file=sys.stderr)
print(f"Output size: {output_size[0]}x{output_size[1]}", file=sys.stderr)
print(f"Press 'q' to quit (if preview)", file=sys.stderr)
# Frame loop
start_time = time.time()
frame_count = 0
fps_update_interval = 30 # Update FPS display every N frames
last_fps_time = start_time
last_fps_count = 0
try:
for frame_num in range(total_frames):
if not out.is_open:
print(f"\nOutput closed at frame {frame_num}", file=sys.stderr)
break
t = frame_num * frame_time
try:
# Update analysis data from streaming audio (file-based)
energy = 0.0
is_beat = False
if self._audio_analyzer:
self._update_from_audio(self._audio_analyzer, t)
energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0]
is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5
elif audio_analyzer:
self._update_from_audio(audio_analyzer, t)
energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0]
is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5
# Read frames from all sources
frames = [src.read_frame(t) for src in self.sources]
# Process through recipe executor if provided
if recipe_executor:
result = self._process_with_executor(
frames, recipe_executor, energy, is_beat, t
)
else:
# Simple backend processing
result = self.backend.process_frame(
frames,
self.effects_per_source,
self.compositor_config,
t,
self.analysis_data,
)
# Output
out.write(result, t)
frame_count += 1
# FPS display
if show_fps and frame_count % fps_update_interval == 0:
now = time.time()
elapsed = now - last_fps_time
if elapsed > 0:
current_fps = (frame_count - last_fps_count) / elapsed
progress = frame_num / total_frames * 100
print(
f"\r {progress:5.1f}% | {current_fps:5.1f} fps | "
f"frame {frame_num}/{total_frames}",
end="", file=sys.stderr
)
last_fps_time = now
last_fps_count = frame_count
except Exception as e:
print(f"\nError at frame {frame_num}, t={t:.1f}s: {e}", file=sys.stderr)
import traceback
traceback.print_exc()
break
except KeyboardInterrupt:
print("\nInterrupted", file=sys.stderr)
finally:
out.close()
for src in self.sources:
if hasattr(src, 'close'):
src.close()
# Final stats
elapsed = time.time() - start_time
avg_fps = frame_count / elapsed if elapsed > 0 else 0
print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", file=sys.stderr)
def _process_with_executor(
self,
frames: List[np.ndarray],
executor,
energy: float,
is_beat: bool,
t: float,
) -> np.ndarray:
"""
Process frames using the recipe executor for full pipeline.
Implements:
1. process-pair: two clips per source with effects, blended
2. cycle-crossfade: dynamic composition with zoom and weights
3. Final effects: whole-spin, ripple
"""
import cv2
# Target size from first source
target_h, target_w = frames[0].shape[:2]
# Resize all frames to target size (letterbox to preserve aspect ratio)
resized_frames = []
for frame in frames:
fh, fw = frame.shape[:2]
if (fh, fw) != (target_h, target_w):
# Calculate scale to fit while preserving aspect ratio
scale = min(target_w / fw, target_h / fh)
new_w, new_h = int(fw * scale), int(fh * scale)
resized = cv2.resize(frame, (new_w, new_h))
# Center on black canvas
canvas = np.zeros((target_h, target_w, 3), dtype=np.uint8)
x_off = (target_w - new_w) // 2
y_off = (target_h - new_h) // 2
canvas[y_off:y_off+new_h, x_off:x_off+new_w] = resized
resized_frames.append(canvas)
else:
resized_frames.append(frame)
frames = resized_frames
# Update executor state
executor.on_frame(energy, is_beat, t)
# Get weights to know which sources are active
weights = executor.get_cycle_weights()
# Process each source as a "pair" (clip A and B with different effects)
processed_pairs = []
for i, frame in enumerate(frames):
# Skip sources with zero weight (but still need placeholder)
if i < len(weights) and weights[i] < 0.001:
processed_pairs.append(None)
continue
# Get effect params for clip A and B
params_a = executor.get_effect_params(i, "a", energy)
params_b = executor.get_effect_params(i, "b", energy)
pair_params = executor.get_pair_params(i)
# Process clip A
clip_a = self._apply_clip_effects(frame.copy(), params_a, t)
# Process clip B
clip_b = self._apply_clip_effects(frame.copy(), params_b, t)
# Blend A and B using pair_mix opacity
opacity = pair_params["blend_opacity"]
blended = cv2.addWeighted(
clip_a, 1 - opacity,
clip_b, opacity,
0
)
# Apply pair rotation
h, w = blended.shape[:2]
center = (w // 2, h // 2)
angle = pair_params["pair_rotation"]
if abs(angle) > 0.5:
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
blended = cv2.warpAffine(blended, matrix, (w, h))
processed_pairs.append(blended)
# Cycle-crossfade composition
weights = executor.get_cycle_weights()
zooms = executor.get_cycle_zooms()
# Apply zoom per pair and composite
h, w = target_h, target_w
result = np.zeros((h, w, 3), dtype=np.float32)
for idx, (pair, weight, zoom) in enumerate(zip(processed_pairs, weights, zooms)):
# Skip zero-weight sources
if pair is None or weight < 0.001:
continue
orig_shape = pair.shape
# Apply zoom
if zoom > 1.01:
# Zoom in: crop center and resize up
new_w, new_h = int(w / zoom), int(h / zoom)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = pair[y1:y1+new_h, x1:x1+new_w]
pair = cv2.resize(cropped, (w, h))
elif zoom < 0.99:
# Zoom out: shrink video and center on black
scaled_w, scaled_h = int(w * zoom), int(h * zoom)
if scaled_w > 0 and scaled_h > 0:
shrunk = cv2.resize(pair, (scaled_w, scaled_h))
canvas = np.zeros((h, w, 3), dtype=np.uint8)
x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2
canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk
pair = canvas.copy()
# Draw colored border - size indicates zoom level
border_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
color = border_colors[idx % 4]
thickness = max(3, int(10 * weight)) # Thicker border = higher weight
pair = np.ascontiguousarray(pair)
pair[:thickness, :] = color
pair[-thickness:, :] = color
pair[:, :thickness] = color
pair[:, -thickness:] = color
result += pair.astype(np.float32) * weight
result = np.clip(result, 0, 255).astype(np.uint8)
# Apply final effects (whole-spin, ripple)
final_params = executor.get_final_effects(energy)
# Whole spin
spin_angle = final_params["whole_spin_angle"]
if abs(spin_angle) > 0.5:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, spin_angle, 1.0)
result = cv2.warpAffine(result, matrix, (w, h))
# Ripple effect
amp = final_params["ripple_amplitude"]
if amp > 1:
result = self._apply_ripple(result, amp,
final_params["ripple_cx"],
final_params["ripple_cy"],
t)
return result
def _apply_clip_effects(self, frame: np.ndarray, params: dict, t: float) -> np.ndarray:
"""Apply per-clip effects: rotate, zoom, invert, hue_shift, ascii."""
import cv2
h, w = frame.shape[:2]
# Rotate
angle = params["rotate_angle"]
if abs(angle) > 0.5:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
frame = cv2.warpAffine(frame, matrix, (w, h))
# Zoom
zoom = params["zoom_amount"]
if abs(zoom - 1.0) > 0.01:
new_w, new_h = int(w / zoom), int(h / zoom)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(w, x1 + new_w), min(h, y1 + new_h)
if x2 > x1 and y2 > y1:
cropped = frame[y1:y2, x1:x2]
frame = cv2.resize(cropped, (w, h))
# Invert
if params["invert_amount"] > 0.5:
frame = 255 - frame
# Hue shift
hue_deg = params["hue_degrees"]
if abs(hue_deg) > 1:
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int32) + int(hue_deg / 2)) % 180
frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
# ASCII art
if params["ascii_mix"] > 0.5:
char_size = max(4, int(params["ascii_char_size"]))
frame = self._apply_ascii(frame, char_size)
return frame
def _apply_ascii(self, frame: np.ndarray, char_size: int) -> np.ndarray:
"""Apply ASCII art effect."""
import cv2
from PIL import Image, ImageDraw, ImageFont
h, w = frame.shape[:2]
chars = " .:-=+*#%@"
# Get font
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", char_size)
except:
font = ImageFont.load_default()
# Sample cells using area interpolation (fast block average)
rows = h // char_size
cols = w // char_size
if rows < 1 or cols < 1:
return frame
# Crop to exact grid and downsample
cropped = frame[:rows * char_size, :cols * char_size]
cell_colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
# Compute luminance
luminances = (0.299 * cell_colors[:, :, 0] +
0.587 * cell_colors[:, :, 1] +
0.114 * cell_colors[:, :, 2]) / 255.0
# Create output image
out_h = rows * char_size
out_w = cols * char_size
output = Image.new('RGB', (out_w, out_h), (0, 0, 0))
draw = ImageDraw.Draw(output)
# Draw characters
for r in range(rows):
for c in range(cols):
lum = luminances[r, c]
color = tuple(cell_colors[r, c])
# Map luminance to character
idx = int(lum * (len(chars) - 1))
char = chars[idx]
# Draw character
x = c * char_size
y = r * char_size
draw.text((x, y), char, fill=color, font=font)
# Convert back to numpy and resize to original
result = np.array(output)
if result.shape[:2] != (h, w):
result = cv2.resize(result, (w, h), interpolation=cv2.INTER_LINEAR)
return result
def _apply_ripple(self, frame: np.ndarray, amplitude: float,
cx: float, cy: float, t: float = 0) -> np.ndarray:
"""Apply ripple distortion effect."""
import cv2
h, w = frame.shape[:2]
center_x, center_y = cx * w, cy * h
max_dim = max(w, h)
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Distance from center
dx = x_coords - center_x
dy = y_coords - center_y
dist = np.sqrt(dx*dx + dy*dy)
# Ripple parameters (matching recipe: frequency=8, decay=2, speed=5)
freq = 8
decay = 2
speed = 5
phase = t * speed * 2 * np.pi
# Ripple displacement (matching original formula)
ripple = np.sin(2 * np.pi * freq * dist / max_dim + phase) * amplitude
# Apply decay
if decay > 0:
ripple = ripple * np.exp(-dist * decay / max_dim)
# Displace along radial direction
with np.errstate(divide='ignore', invalid='ignore'):
norm_dx = np.where(dist > 0, dx / dist, 0)
norm_dy = np.where(dist > 0, dy / dist, 0)
map_x = (x_coords + ripple * norm_dx).astype(np.float32)
map_y = (y_coords + ripple * norm_dy).astype(np.float32)
return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT)
def _update_from_audio(self, analyzer, t: float):
"""Update analysis data from audio analyzer (streaming or live)."""
# Set time for file-based streaming analyzers
if hasattr(analyzer, 'set_time'):
analyzer.set_time(t)
# Get current audio features
energy = analyzer.get_energy() if hasattr(analyzer, 'get_energy') else 0
beat = analyzer.get_beat() if hasattr(analyzer, 'get_beat') else False
# Update analysis tracks - these can be referenced by effect bindings
self.analysis_data["live_energy"] = {
"times": [t],
"values": [energy],
"duration": float('inf'),
}
self.analysis_data["live_beat"] = {
"times": [t],
"values": [1.0 if beat else 0.0],
"duration": float('inf'),
}
def quick_preview(
sources: List[str],
effects: List[List[Dict]] = None,
duration: float = 10,
fps: float = 30,
):
"""
Quick preview helper - show sources with optional effects.
Example:
quick_preview(["video1.mp4", "video2.mp4"], duration=30)
"""
compositor = StreamingCompositor(
sources=sources,
effects_per_source=effects,
fps=fps,
)
compositor.run(output="preview", duration=duration)

125
streaming/demo.py Normal file
View File

@@ -0,0 +1,125 @@
#!/usr/bin/env python3
"""
Demo script for streaming compositor.
Usage:
# Preview two videos blended
python -m streaming.demo preview video1.mp4 video2.mp4
# Record output to file
python -m streaming.demo record video1.mp4 video2.mp4 -o output.mp4
# Benchmark (no output)
python -m streaming.demo benchmark video1.mp4 --duration 10
"""
import argparse
import sys
from pathlib import Path
# Add parent to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from streaming import StreamingCompositor, VideoSource
from streaming.output import NullOutput
def demo_preview(sources: list, duration: float, effects: bool = False):
"""Preview sources with optional simple effects."""
effects_config = None
if effects:
effects_config = [
[{"effect": "rotate", "angle": 15}],
[{"effect": "zoom", "amount": 1.2}],
][:len(sources)]
compositor = StreamingCompositor(
sources=sources,
effects_per_source=effects_config,
recipe_dir=Path(__file__).parent.parent,
)
compositor.run(output="preview", duration=duration)
def demo_record(sources: list, output_path: str, duration: float):
"""Record blended output to file."""
compositor = StreamingCompositor(
sources=sources,
recipe_dir=Path(__file__).parent.parent,
)
compositor.run(output=output_path, duration=duration)
def demo_benchmark(sources: list, duration: float):
"""Benchmark processing speed (no output)."""
compositor = StreamingCompositor(
sources=sources,
recipe_dir=Path(__file__).parent.parent,
)
compositor.run(output="null", duration=duration)
def demo_audio_reactive(sources: list, duration: float):
"""Preview with live audio reactivity."""
from streaming.audio import AudioAnalyzer
# Create compositor with energy-reactive effects
effects_config = [
[{
"effect": "zoom",
"amount": {"_binding": True, "source": "live_energy", "feature": "values", "range": [1.0, 1.5]},
}]
for _ in sources
]
compositor = StreamingCompositor(
sources=sources,
effects_per_source=effects_config,
recipe_dir=Path(__file__).parent.parent,
)
# Start audio analyzer
try:
with AudioAnalyzer() as audio:
print("Audio analyzer started. Make some noise!", file=sys.stderr)
compositor.run(output="preview", duration=duration, audio_analyzer=audio)
except Exception as e:
print(f"Audio not available: {e}", file=sys.stderr)
print("Running without audio...", file=sys.stderr)
compositor.run(output="preview", duration=duration)
def main():
parser = argparse.ArgumentParser(description="Streaming compositor demo")
parser.add_argument("mode", choices=["preview", "record", "benchmark", "audio"],
help="Demo mode")
parser.add_argument("sources", nargs="+", help="Video source files")
parser.add_argument("-o", "--output", help="Output file (for record mode)")
parser.add_argument("-d", "--duration", type=float, default=30,
help="Duration in seconds")
parser.add_argument("--effects", action="store_true",
help="Apply simple effects (for preview)")
args = parser.parse_args()
# Verify sources exist
for src in args.sources:
if not Path(src).exists():
print(f"Error: Source not found: {src}", file=sys.stderr)
sys.exit(1)
if args.mode == "preview":
demo_preview(args.sources, args.duration, args.effects)
elif args.mode == "record":
if not args.output:
print("Error: --output required for record mode", file=sys.stderr)
sys.exit(1)
demo_record(args.sources, args.output, args.duration)
elif args.mode == "benchmark":
demo_benchmark(args.sources, args.duration)
elif args.mode == "audio":
demo_audio_reactive(args.sources, args.duration)
if __name__ == "__main__":
main()

369
streaming/output.py Normal file
View File

@@ -0,0 +1,369 @@
"""
Output targets for streaming compositor.
Supports:
- Display window (preview)
- File output (recording)
- Stream output (RTMP, etc.) - future
"""
import numpy as np
import subprocess
from abc import ABC, abstractmethod
from typing import Tuple, Optional
from pathlib import Path
class Output(ABC):
"""Abstract base class for output targets."""
@abstractmethod
def write(self, frame: np.ndarray, t: float):
"""Write a frame to the output."""
pass
@abstractmethod
def close(self):
"""Close the output and clean up resources."""
pass
@property
@abstractmethod
def is_open(self) -> bool:
"""Check if output is still open/valid."""
pass
class DisplayOutput(Output):
"""
Display frames using mpv (handles Wayland properly).
Useful for live preview. Press 'q' to quit.
"""
def __init__(self, title: str = "Streaming Preview", size: Tuple[int, int] = None,
audio_source: str = None, fps: float = 30):
self.title = title
self.size = size
self.audio_source = audio_source
self.fps = fps
self._is_open = True
self._process = None
self._audio_process = None
def _start_mpv(self, frame_size: Tuple[int, int]):
"""Start mpv process for display."""
import sys
w, h = frame_size
cmd = [
"mpv",
"--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={self.fps}",
f"--title={self.title}",
"-",
]
print(f"Starting mpv: {' '.join(cmd)}", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Start audio playback if we have an audio source
if self.audio_source:
audio_cmd = [
"ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
str(self.audio_source)
]
print(f"Starting audio: {self.audio_source}", file=sys.stderr)
self._audio_process = subprocess.Popen(
audio_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def write(self, frame: np.ndarray, t: float):
"""Display frame."""
if not self._is_open:
return
# Ensure frame is correct format
if frame.dtype != np.uint8:
frame = np.clip(frame, 0, 255).astype(np.uint8)
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
# Start mpv on first frame
if self._process is None:
self._start_mpv((frame.shape[1], frame.shape[0]))
# Check if mpv is still running
if self._process.poll() is not None:
self._is_open = False
return
try:
self._process.stdin.write(frame.tobytes())
self._process.stdin.flush() # Prevent buffering
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close the display and audio."""
if self._process:
try:
self._process.stdin.close()
except:
pass
self._process.terminate()
self._process.wait()
if self._audio_process:
self._audio_process.terminate()
self._audio_process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
if self._process and self._process.poll() is not None:
self._is_open = False
return self._is_open
class FileOutput(Output):
"""
Write frames to a video file using ffmpeg.
"""
def __init__(
self,
path: str,
size: Tuple[int, int],
fps: float = 30,
codec: str = "libx264",
crf: int = 18,
preset: str = "fast",
audio_source: str = None,
):
self.path = Path(path)
self.size = size
self.fps = fps
self._is_open = True
# Build ffmpeg command
cmd = [
"ffmpeg", "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-pix_fmt", "rgb24",
"-s", f"{size[0]}x{size[1]}",
"-r", str(fps),
"-i", "-",
]
# Add audio input if provided
if audio_source:
cmd.extend(["-i", str(audio_source)])
# Explicitly map: video from input 0 (rawvideo), audio from input 1
cmd.extend(["-map", "0:v", "-map", "1:a"])
cmd.extend([
"-c:v", codec,
"-preset", preset,
"-crf", str(crf),
"-pix_fmt", "yuv420p",
])
# Add audio codec if we have audio
if audio_source:
cmd.extend(["-c:a", "aac", "-b:a", "192k", "-shortest"])
cmd.append(str(self.path))
import sys
print(f"FileOutput cmd: {' '.join(cmd)}", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=None, # Show errors for debugging
)
def write(self, frame: np.ndarray, t: float):
"""Write frame to video file."""
if not self._is_open or self._process.poll() is not None:
self._is_open = False
return
# Resize if needed
if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]:
import cv2
frame = cv2.resize(frame, self.size)
try:
self._process.stdin.write(frame.tobytes())
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close the video file."""
if self._process:
self._process.stdin.close()
self._process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
return self._is_open and self._process.poll() is None
class MultiOutput(Output):
"""
Write to multiple outputs simultaneously.
Useful for recording while showing preview.
"""
def __init__(self, outputs: list):
self.outputs = outputs
def write(self, frame: np.ndarray, t: float):
for output in self.outputs:
if output.is_open:
output.write(frame, t)
def close(self):
for output in self.outputs:
output.close()
@property
def is_open(self) -> bool:
return any(o.is_open for o in self.outputs)
class NullOutput(Output):
"""
Discard frames (for benchmarking).
"""
def __init__(self):
self._is_open = True
self.frame_count = 0
def write(self, frame: np.ndarray, t: float):
self.frame_count += 1
def close(self):
self._is_open = False
@property
def is_open(self) -> bool:
return self._is_open
class PipeOutput(Output):
"""
Pipe frames directly to mpv.
Launches mpv with rawvideo demuxer and writes frames to stdin.
"""
def __init__(self, size: Tuple[int, int], fps: float = 30, audio_source: str = None):
self.size = size
self.fps = fps
self.audio_source = audio_source
self._is_open = True
self._process = None
self._audio_process = None
self._started = False
def _start(self):
"""Start mpv and audio on first frame."""
if self._started:
return
self._started = True
import sys
w, h = self.size
# Start mpv
cmd = [
"mpv", "--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={self.fps}",
"--title=Streaming",
"-"
]
print(f"Starting mpv: {w}x{h} @ {self.fps}fps", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
# Start audio
if self.audio_source:
audio_cmd = [
"ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
str(self.audio_source)
]
print(f"Starting audio: {self.audio_source}", file=sys.stderr)
self._audio_process = subprocess.Popen(
audio_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def write(self, frame: np.ndarray, t: float):
"""Write frame to mpv."""
if not self._is_open:
return
self._start()
# Check mpv still running
if self._process.poll() is not None:
self._is_open = False
return
# Resize if needed
if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]:
import cv2
frame = cv2.resize(frame, self.size)
# Ensure correct format
if frame.dtype != np.uint8:
frame = np.clip(frame, 0, 255).astype(np.uint8)
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
try:
self._process.stdin.write(frame.tobytes())
self._process.stdin.flush()
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close mpv and audio."""
if self._process:
try:
self._process.stdin.close()
except:
pass
self._process.terminate()
self._process.wait()
if self._audio_process:
self._audio_process.terminate()
self._audio_process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
if self._process and self._process.poll() is not None:
self._is_open = False
return self._is_open

846
streaming/pipeline.py Normal file
View File

@@ -0,0 +1,846 @@
"""
Streaming pipeline executor.
Directly executes compiled sexp recipes frame-by-frame.
No adapter layer - frames and analysis flow through the DAG.
"""
import sys
import time
import numpy as np
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
from .sources import VideoSource
from .audio import StreamingAudioAnalyzer
from .output import DisplayOutput, FileOutput
from .sexp_interp import SexpInterpreter
@dataclass
class FrameContext:
"""Context passed through the pipeline for each frame."""
t: float # Current time
energy: float = 0.0
is_beat: bool = False
beat_count: int = 0
analysis: Dict[str, Any] = field(default_factory=dict)
class StreamingPipeline:
"""
Executes a compiled sexp recipe as a streaming pipeline.
Frames flow through the DAG directly - no adapter needed.
Each node is evaluated lazily when its output is requested.
"""
def __init__(self, compiled_recipe, recipe_dir: Path = None, fps: float = 30, seed: int = 42,
output_size: tuple = None):
self.recipe = compiled_recipe
self.recipe_dir = recipe_dir or Path(".")
self.fps = fps
self.seed = seed
# Build node lookup
self.nodes = {n['id']: n for n in compiled_recipe.nodes}
# Runtime state
self.sources: Dict[str, VideoSource] = {}
self.audio_analyzer: Optional[StreamingAudioAnalyzer] = None
self.audio_source_path: Optional[str] = None
# Sexp interpreter for expressions
self.interp = SexpInterpreter()
# Scan state (node_id -> current value)
self.scan_state: Dict[str, Any] = {}
self.scan_emit: Dict[str, Any] = {}
# SLICE_ON state
self.slice_on_acc: Dict[str, Any] = {}
self.slice_on_result: Dict[str, Any] = {}
# Frame cache for current timestep (cleared each frame)
self._frame_cache: Dict[str, np.ndarray] = {}
# Context for current frame
self.ctx = FrameContext(t=0.0)
# Output size (w, h) - set after sources are initialized
self._output_size = output_size
# Initialize
self._init_sources()
self._init_scans()
self._init_slice_on()
# Set output size from first source if not specified
if self._output_size is None and self.sources:
first_source = next(iter(self.sources.values()))
self._output_size = first_source._size
def _init_sources(self):
"""Initialize video and audio sources."""
for node in self.recipe.nodes:
if node.get('type') == 'SOURCE':
config = node.get('config', {})
path = config.get('path')
if path:
full_path = (self.recipe_dir / path).resolve()
suffix = full_path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
if not full_path.exists():
print(f"Warning: video not found: {full_path}", file=sys.stderr)
continue
self.sources[node['id']] = VideoSource(
str(full_path),
target_fps=self.fps
)
elif suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
if not full_path.exists():
print(f"Warning: audio not found: {full_path}", file=sys.stderr)
continue
self.audio_source_path = str(full_path)
self.audio_analyzer = StreamingAudioAnalyzer(str(full_path))
def _init_scans(self):
"""Initialize scan nodes with their initial state."""
import random
seed_offset = 0
for node in self.recipe.nodes:
if node.get('type') == 'SCAN':
config = node.get('config', {})
# Create RNG for this scan
scan_seed = config.get('seed', self.seed + seed_offset)
rng = random.Random(scan_seed)
seed_offset += 1
# Evaluate initial value
init_expr = config.get('init', 0)
init_value = self.interp.eval(init_expr, {})
self.scan_state[node['id']] = {
'value': init_value,
'rng': rng,
'config': config,
}
# Compute initial emit
self._update_scan_emit(node['id'])
def _update_scan_emit(self, node_id: str):
"""Update the emit value for a scan."""
state = self.scan_state[node_id]
config = state['config']
emit_expr = config.get('emit_expr', config.get('emit', None))
if emit_expr is None:
# No emit expression - emit the value directly
self.scan_emit[node_id] = state['value']
return
# Build environment from state
env = {}
if isinstance(state['value'], dict):
env.update(state['value'])
else:
env['acc'] = state['value']
env['beat_count'] = self.ctx.beat_count
env['time'] = self.ctx.t
# Set RNG for interpreter
self.interp.rng = state['rng']
self.scan_emit[node_id] = self.interp.eval(emit_expr, env)
def _step_scan(self, node_id: str):
"""Step a scan forward on beat."""
state = self.scan_state[node_id]
config = state['config']
step_expr = config.get('step_expr', config.get('step', None))
if step_expr is None:
return
# Build environment
env = {}
if isinstance(state['value'], dict):
env.update(state['value'])
else:
env['acc'] = state['value']
env['beat_count'] = self.ctx.beat_count
env['time'] = self.ctx.t
# Set RNG
self.interp.rng = state['rng']
# Evaluate step
new_value = self.interp.eval(step_expr, env)
state['value'] = new_value
# Update emit
self._update_scan_emit(node_id)
def _init_slice_on(self):
"""Initialize SLICE_ON nodes."""
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
config = node.get('config', {})
init = config.get('init', {})
self.slice_on_acc[node['id']] = dict(init)
# Evaluate initial state
self._eval_slice_on(node['id'])
def _eval_slice_on(self, node_id: str):
"""Evaluate a SLICE_ON node's Lambda."""
node = self.nodes[node_id]
config = node.get('config', {})
fn = config.get('fn')
videos = config.get('videos', [])
if not fn:
return
acc = self.slice_on_acc[node_id]
n_videos = len(videos)
# Set up environment
self.interp.globals['videos'] = list(range(n_videos))
try:
from .sexp_interp import eval_slice_on_lambda
result = eval_slice_on_lambda(
fn, acc, self.ctx.beat_count, 0, 1,
list(range(n_videos)), self.interp
)
self.slice_on_result[node_id] = result
# Update accumulator
if 'acc' in result:
self.slice_on_acc[node_id] = result['acc']
except Exception as e:
print(f"SLICE_ON eval error: {e}", file=sys.stderr)
def _on_beat(self):
"""Called when a beat is detected."""
self.ctx.beat_count += 1
# Step all scans
for node_id in self.scan_state:
self._step_scan(node_id)
# Step all SLICE_ON nodes
for node_id in self.slice_on_acc:
self._eval_slice_on(node_id)
def _get_frame(self, node_id: str) -> Optional[np.ndarray]:
"""
Get the output frame for a node at current time.
Recursively evaluates inputs as needed.
Results are cached for the current timestep.
"""
if node_id in self._frame_cache:
return self._frame_cache[node_id]
node = self.nodes.get(node_id)
if not node:
return None
node_type = node.get('type')
if node_type == 'SOURCE':
frame = self._eval_source(node)
elif node_type == 'SEGMENT':
frame = self._eval_segment(node)
elif node_type == 'EFFECT':
frame = self._eval_effect(node)
elif node_type == 'SLICE_ON':
frame = self._eval_slice_on_frame(node)
else:
# Unknown node type - try to pass through input
inputs = node.get('inputs', [])
frame = self._get_frame(inputs[0]) if inputs else None
self._frame_cache[node_id] = frame
return frame
def _eval_source(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SOURCE node."""
source = self.sources.get(node['id'])
if source:
return source.read_frame(self.ctx.t)
return None
def _eval_segment(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SEGMENT node (time segment of source)."""
inputs = node.get('inputs', [])
if not inputs:
return None
config = node.get('config', {})
start = config.get('start', 0)
duration = config.get('duration')
# Resolve any bindings
if isinstance(start, dict):
start = self._resolve_binding(start) if start.get('_binding') else 0
if isinstance(duration, dict):
duration = self._resolve_binding(duration) if duration.get('_binding') else None
# Adjust time for segment
t_local = self.ctx.t + (start if isinstance(start, (int, float)) else 0)
if duration and isinstance(duration, (int, float)):
t_local = t_local % duration # Loop within segment
# Get source frame at adjusted time
source_id = inputs[0]
source = self.sources.get(source_id)
if source:
return source.read_frame(t_local)
return self._get_frame(source_id)
def _eval_effect(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate an EFFECT node."""
import cv2
inputs = node.get('inputs', [])
config = node.get('config', {})
effect_name = config.get('effect')
# Get input frame(s)
input_frames = [self._get_frame(inp) for inp in inputs]
input_frames = [f for f in input_frames if f is not None]
if not input_frames:
return None
frame = input_frames[0]
# Resolve bindings in config
params = self._resolve_config(config)
# Apply effect based on name
if effect_name == 'rotate':
angle = params.get('angle', 0)
if abs(angle) > 0.5:
h, w = frame.shape[:2]
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
frame = cv2.warpAffine(frame, matrix, (w, h))
elif effect_name == 'zoom':
amount = params.get('amount', 1.0)
if abs(amount - 1.0) > 0.01:
frame = self._apply_zoom(frame, amount)
elif effect_name == 'invert':
amount = params.get('amount', 0)
if amount > 0.01:
inverted = 255 - frame
frame = cv2.addWeighted(frame, 1 - amount, inverted, amount, 0)
elif effect_name == 'hue_shift':
degrees = params.get('degrees', 0)
if abs(degrees) > 1:
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + int(degrees / 2)) % 180
frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
elif effect_name == 'blend':
if len(input_frames) >= 2:
opacity = params.get('opacity', 0.5)
frame = cv2.addWeighted(input_frames[0], 1 - opacity,
input_frames[1], opacity, 0)
elif effect_name == 'blend_multi':
weights = params.get('weights', [])
if len(input_frames) > 1 and weights:
h, w = input_frames[0].shape[:2]
result = np.zeros((h, w, 3), dtype=np.float32)
for f, wt in zip(input_frames, weights):
if f is not None and wt > 0.001:
if f.shape[:2] != (h, w):
f = cv2.resize(f, (w, h))
result += f.astype(np.float32) * wt
frame = np.clip(result, 0, 255).astype(np.uint8)
elif effect_name == 'ripple':
amp = params.get('amplitude', 0)
if amp > 1:
frame = self._apply_ripple(frame, amp,
params.get('center_x', 0.5),
params.get('center_y', 0.5),
params.get('frequency', 8),
params.get('decay', 2),
params.get('speed', 5))
return frame
def _eval_slice_on_frame(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SLICE_ON node - returns composited frame."""
import cv2
config = node.get('config', {})
video_ids = config.get('videos', [])
result = self.slice_on_result.get(node['id'], {})
if not result:
# No result yet - return first video
if video_ids:
return self._get_frame(video_ids[0])
return None
# Get layers and compose info
layers = result.get('layers', [])
compose = result.get('compose', {})
weights = compose.get('weights', [])
if not layers or not weights:
if video_ids:
return self._get_frame(video_ids[0])
return None
# Get frames for each layer
frames = []
for i, layer in enumerate(layers):
video_idx = layer.get('video', i)
if video_idx < len(video_ids):
frame = self._get_frame(video_ids[video_idx])
# Apply layer effects (zoom)
effects = layer.get('effects', [])
for eff in effects:
eff_name = eff.get('effect')
if hasattr(eff_name, 'name'):
eff_name = eff_name.name
if eff_name == 'zoom':
zoom_amt = eff.get('amount', 1.0)
if frame is not None:
frame = self._apply_zoom(frame, zoom_amt)
frames.append(frame)
else:
frames.append(None)
# Composite with weights - use consistent output size
if self._output_size:
w, h = self._output_size
else:
# Fallback to first non-None frame size
for f in frames:
if f is not None:
h, w = f.shape[:2]
break
else:
return None
output = np.zeros((h, w, 3), dtype=np.float32)
for frame, weight in zip(frames, weights):
if frame is None or weight < 0.001:
continue
# Resize to output size
if frame.shape[1] != w or frame.shape[0] != h:
frame = cv2.resize(frame, (w, h))
output += frame.astype(np.float32) * weight
# Normalize weights
total_weight = sum(wt for wt in weights if wt > 0.001)
if total_weight > 0 and abs(total_weight - 1.0) > 0.01:
output /= total_weight
return np.clip(output, 0, 255).astype(np.uint8)
def _resolve_config(self, config: dict) -> dict:
"""Resolve bindings in effect config to actual values."""
resolved = {}
for key, value in config.items():
if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry',
'analysis_refs', 'inputs', 'cid'):
continue
if isinstance(value, dict) and value.get('_binding'):
resolved[key] = self._resolve_binding(value)
elif isinstance(value, dict) and value.get('_expr'):
resolved[key] = self._resolve_expr(value)
else:
resolved[key] = value
return resolved
def _resolve_binding(self, binding: dict) -> Any:
"""Resolve a binding to its current value."""
source_id = binding.get('source')
feature = binding.get('feature', 'values')
range_map = binding.get('range')
# Get raw value from scan or analysis
if source_id in self.scan_emit:
value = self.scan_emit[source_id]
elif source_id in self.ctx.analysis:
data = self.ctx.analysis[source_id]
value = data.get(feature, data.get('values', [0]))[0] if isinstance(data, dict) else data
else:
# Fallback to energy
value = self.ctx.energy
# Extract feature from dict
if isinstance(value, dict) and feature in value:
value = value[feature]
# Apply range mapping
if range_map and isinstance(value, (int, float)):
lo, hi = range_map
value = lo + value * (hi - lo)
return value
def _resolve_expr(self, expr: dict) -> Any:
"""Resolve a compiled expression."""
env = {
'energy': self.ctx.energy,
'beat_count': self.ctx.beat_count,
't': self.ctx.t,
}
# Add scan values
for scan_id, value in self.scan_emit.items():
# Use short form if available
env[scan_id] = value
# Extract the actual expression from _expr wrapper
actual_expr = expr.get('_expr', expr)
return self.interp.eval(actual_expr, env)
def _apply_zoom(self, frame: np.ndarray, amount: float) -> np.ndarray:
"""Apply zoom to frame."""
import cv2
h, w = frame.shape[:2]
if amount > 1.01:
# Zoom in: crop center
new_w, new_h = int(w / amount), int(h / amount)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = frame[y1:y1+new_h, x1:x1+new_w]
return cv2.resize(cropped, (w, h))
elif amount < 0.99:
# Zoom out: shrink and center
scaled_w, scaled_h = int(w * amount), int(h * amount)
if scaled_w > 0 and scaled_h > 0:
shrunk = cv2.resize(frame, (scaled_w, scaled_h))
canvas = np.zeros((h, w, 3), dtype=np.uint8)
x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2
canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk
return canvas
return frame
def _apply_ripple(self, frame: np.ndarray, amplitude: float,
cx: float, cy: float, frequency: float,
decay: float, speed: float) -> np.ndarray:
"""Apply ripple effect."""
import cv2
h, w = frame.shape[:2]
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize to center
center_x, center_y = w * cx, h * cy
dx = x_coords - center_x
dy = y_coords - center_y
dist = np.sqrt(dx**2 + dy**2)
# Ripple displacement
phase = self.ctx.t * speed
ripple = amplitude * np.sin(dist / frequency - phase) * np.exp(-dist * decay / max(w, h))
# Displace coordinates
angle = np.arctan2(dy, dx)
map_x = (x_coords + ripple * np.cos(angle)).astype(np.float32)
map_y = (y_coords + ripple * np.sin(angle)).astype(np.float32)
return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
def _find_output_node(self) -> Optional[str]:
"""Find the final output node (MUX or last EFFECT)."""
# Look for MUX node
for node in self.recipe.nodes:
if node.get('type') == 'MUX':
return node['id']
# Otherwise find last EFFECT after SLICE_ON
last_effect = None
found_slice_on = False
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
found_slice_on = True
elif node.get('type') == 'EFFECT' and found_slice_on:
last_effect = node['id']
return last_effect
def render_frame(self, t: float) -> Optional[np.ndarray]:
"""Render a single frame at time t."""
# Clear frame cache
self._frame_cache.clear()
# Update context
self.ctx.t = t
# Update audio analysis
if self.audio_analyzer:
self.audio_analyzer.set_time(t)
energy = self.audio_analyzer.get_energy()
is_beat = self.audio_analyzer.get_beat()
# Beat edge detection
was_beat = self.ctx.is_beat
self.ctx.energy = energy
self.ctx.is_beat = is_beat
if is_beat and not was_beat:
self._on_beat()
# Store in analysis dict
self.ctx.analysis['live_energy'] = {'values': [energy]}
self.ctx.analysis['live_beat'] = {'values': [1.0 if is_beat else 0.0]}
# Find output node and render
output_node = self._find_output_node()
if output_node:
frame = self._get_frame(output_node)
# Normalize to output size
if frame is not None and self._output_size:
w, h = self._output_size
if frame.shape[1] != w or frame.shape[0] != h:
import cv2
frame = cv2.resize(frame, (w, h))
return frame
return None
def run(self, output: str = "preview", duration: float = None):
"""
Run the pipeline.
Args:
output: "preview", filename, or Output object
duration: Duration in seconds (default: audio duration or 60s)
"""
# Determine duration
if duration is None:
if self.audio_analyzer:
duration = self.audio_analyzer.duration
else:
duration = 60.0
# Create output
if output == "preview":
# Get frame size from first source
first_source = next(iter(self.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
out = DisplayOutput(size=(w, h), fps=self.fps, audio_source=self.audio_source_path)
elif isinstance(output, str):
first_source = next(iter(self.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
out = FileOutput(output, size=(w, h), fps=self.fps, audio_source=self.audio_source_path)
else:
out = output
frame_time = 1.0 / self.fps
n_frames = int(duration * self.fps)
print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr)
print(f"Duration: {duration:.1f}s, {n_frames} frames @ {self.fps}fps", file=sys.stderr)
start_time = time.time()
frame_count = 0
try:
for frame_num in range(n_frames):
t = frame_num * frame_time
frame = self.render_frame(t)
if frame is not None:
out.write(frame, t)
frame_count += 1
# Progress
if frame_num % 50 == 0:
elapsed = time.time() - start_time
fps = frame_count / elapsed if elapsed > 0 else 0
pct = 100 * frame_num / n_frames
print(f"\r{pct:5.1f}% | {fps:5.1f} fps | frame {frame_num}/{n_frames}",
end="", file=sys.stderr)
except KeyboardInterrupt:
print("\nInterrupted", file=sys.stderr)
finally:
out.close()
for src in self.sources.values():
src.close()
elapsed = time.time() - start_time
avg_fps = frame_count / elapsed if elapsed > 0 else 0
print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)",
file=sys.stderr)
def run_pipeline(recipe_path: str, output: str = "preview",
duration: float = None, fps: float = None):
"""
Run a recipe through the streaming pipeline.
No adapter layer - directly executes the compiled recipe.
"""
from pathlib import Path
# Add artdag to path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.compiler import compile_string
recipe_path = Path(recipe_path)
recipe_text = recipe_path.read_text()
compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent)
pipeline = StreamingPipeline(
compiled,
recipe_dir=recipe_path.parent,
fps=fps or compiled.encoding.get('fps', 30),
)
pipeline.run(output=output, duration=duration)
def run_pipeline_piped(recipe_path: str, duration: float = None, fps: float = None):
"""
Run pipeline and pipe directly to mpv with audio.
"""
import subprocess
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.compiler import compile_string
recipe_path = Path(recipe_path)
recipe_text = recipe_path.read_text()
compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent)
pipeline = StreamingPipeline(
compiled,
recipe_dir=recipe_path.parent,
fps=fps or compiled.encoding.get('fps', 30),
)
# Get frame info
first_source = next(iter(pipeline.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
# Determine duration
if duration is None:
if pipeline.audio_analyzer:
duration = pipeline.audio_analyzer.duration
else:
duration = 60.0
actual_fps = fps or compiled.encoding.get('fps', 30)
n_frames = int(duration * actual_fps)
frame_time = 1.0 / actual_fps
print(f"Streaming {n_frames} frames @ {actual_fps}fps to mpv", file=sys.stderr)
# Start mpv
mpv_cmd = [
"mpv", "--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={actual_fps}",
"--title=Streaming Pipeline",
"-"
]
mpv = subprocess.Popen(mpv_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL)
# Start audio if available
audio_proc = None
if pipeline.audio_source_path:
audio_cmd = ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
pipeline.audio_source_path]
audio_proc = subprocess.Popen(audio_cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
try:
import cv2
for frame_num in range(n_frames):
if mpv.poll() is not None:
break # mpv closed
t = frame_num * frame_time
frame = pipeline.render_frame(t)
if frame is not None:
# Ensure consistent frame size
if frame.shape[1] != w or frame.shape[0] != h:
frame = cv2.resize(frame, (w, h))
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
try:
mpv.stdin.write(frame.tobytes())
mpv.stdin.flush()
except BrokenPipeError:
break
except KeyboardInterrupt:
pass
finally:
if mpv.stdin:
mpv.stdin.close()
mpv.terminate()
if audio_proc:
audio_proc.terminate()
for src in pipeline.sources.values():
src.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run sexp recipe through streaming pipeline")
parser.add_argument("recipe", help="Path to .sexp recipe file")
parser.add_argument("-o", "--output", default="pipe",
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
parser.add_argument("-d", "--duration", type=float, default=None,
help="Duration in seconds (default: audio duration)")
parser.add_argument("--fps", type=float, default=None,
help="Frame rate (default: from recipe)")
args = parser.parse_args()
if args.output == "pipe":
run_pipeline_piped(args.recipe, duration=args.duration, fps=args.fps)
else:
run_pipeline(args.recipe, output=args.output, duration=args.duration, fps=args.fps)

470
streaming/recipe_adapter.py Normal file
View File

@@ -0,0 +1,470 @@
"""
Adapter to run sexp recipes through the streaming compositor.
Bridges the gap between:
- Existing recipe format (sexp files with stages, effects, analysis)
- Streaming compositor (sources, effect chains, compositor config)
"""
import sys
from pathlib import Path
from typing import Dict, List, Any, Optional
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from .compositor import StreamingCompositor
from .sources import VideoSource
from .audio import FileAudioAnalyzer
class RecipeAdapter:
"""
Adapts a compiled sexp recipe to run through the streaming compositor.
Example:
adapter = RecipeAdapter("effects/quick_test.sexp")
adapter.run(output="preview", duration=60)
"""
def __init__(
self,
recipe_path: str,
params: Dict[str, Any] = None,
backend: str = "numpy",
):
"""
Load and prepare a recipe for streaming.
Args:
recipe_path: Path to .sexp recipe file
params: Parameter overrides
backend: "numpy" or "glsl"
"""
self.recipe_path = Path(recipe_path)
self.recipe_dir = self.recipe_path.parent
self.params = params or {}
self.backend = backend
# Compile recipe
self._compile()
def _compile(self):
"""Compile the recipe and extract structure."""
from artdag.sexp.compiler import compile_string
recipe_text = self.recipe_path.read_text()
self.compiled = compile_string(recipe_text, self.params, recipe_dir=self.recipe_dir)
# Extract key info
self.sources = {} # name -> path
self.effects_registry = {} # effect_name -> path
self.analyzers = {} # name -> analyzer info
# Walk nodes to find sources and structure
# nodes is a list in CompiledRecipe
for node in self.compiled.nodes:
node_type = node.get("type", "")
if node_type == "SOURCE":
config = node.get("config", {})
path = config.get("path")
if path:
self.sources[node["id"]] = self.recipe_dir / path
elif node_type == "ANALYZE":
config = node.get("config", {})
self.analyzers[node["id"]] = {
"analyzer": config.get("analyzer"),
"path": config.get("analyzer_path"),
}
# Get effects registry from compiled recipe
# registry has 'effects' sub-dict
effects_dict = self.compiled.registry.get("effects", {})
for name, info in effects_dict.items():
if info.get("path"):
self.effects_registry[name] = Path(info["path"])
def run_analysis(self) -> Dict[str, Any]:
"""
Run analysis phase (energy, beats, etc.).
Returns:
Dict of analysis track name -> {times, values, duration}
"""
print(f"Running analysis...", file=sys.stderr)
# Use existing planner's analysis execution
from artdag.sexp.planner import create_plan
analysis_data = {}
def on_analysis(node_id: str, results: dict):
analysis_data[node_id] = results
print(f" {node_id[:16]}...: {len(results.get('times', []))} samples", file=sys.stderr)
# Create plan (runs analysis as side effect)
plan = create_plan(
self.compiled,
inputs={},
recipe_dir=self.recipe_dir,
on_analysis=on_analysis,
)
# Also store named analysis tracks
for name, data in plan.analysis.items():
analysis_data[name] = data
return analysis_data
def build_compositor(
self,
analysis_data: Dict[str, Any] = None,
fps: float = None,
) -> StreamingCompositor:
"""
Build a streaming compositor from the recipe.
This is a simplified version that handles common patterns.
Complex recipes may need manual configuration.
Args:
analysis_data: Pre-computed analysis data
Returns:
Configured StreamingCompositor
"""
# Extract video and audio sources in SLICE_ON input order
video_sources = []
audio_source = None
# Find audio source first
for node_id, path in self.sources.items():
suffix = path.suffix.lower()
if suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
audio_source = str(path)
break
# Find SLICE_ON node to get correct video order
slice_on_inputs = None
for node in self.compiled.nodes:
if node.get('type') == 'SLICE_ON':
# Use 'videos' config key which has the correct order
config = node.get('config', {})
slice_on_inputs = config.get('videos', [])
break
if slice_on_inputs:
# Trace each SLICE_ON input back to its SOURCE
node_lookup = {n['id']: n for n in self.compiled.nodes}
def trace_to_source(node_id, visited=None):
"""Trace a node back to its SOURCE, return source path."""
if visited is None:
visited = set()
if node_id in visited:
return None
visited.add(node_id)
node = node_lookup.get(node_id)
if not node:
return None
if node.get('type') == 'SOURCE':
return self.sources.get(node_id)
# Recurse through inputs
for inp in node.get('inputs', []):
result = trace_to_source(inp, visited)
if result:
return result
return None
# Build video_sources in SLICE_ON input order
for inp_id in slice_on_inputs:
source_path = trace_to_source(inp_id)
if source_path:
suffix = source_path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
video_sources.append(str(source_path))
# Fallback to definition order if no SLICE_ON
if not video_sources:
for node_id, path in self.sources.items():
suffix = path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
video_sources.append(str(path))
if not video_sources:
raise ValueError("No video sources found in recipe")
# Build effect chains - use live audio bindings (matching video_sources count)
effects_per_source = self._build_streaming_effects(n_sources=len(video_sources))
# Build compositor config from recipe
compositor_config = self._extract_compositor_config(analysis_data)
return StreamingCompositor(
sources=video_sources,
effects_per_source=effects_per_source,
compositor_config=compositor_config,
analysis_data=analysis_data or {},
backend=self.backend,
recipe_dir=self.recipe_dir,
fps=fps or self.compiled.encoding.get("fps", 30),
audio_source=audio_source,
)
def _build_streaming_effects(self, n_sources: int = None) -> List[List[Dict]]:
"""
Build effect chains for streaming with live audio bindings.
Replicates the recipe's effect pipeline:
- Per source: rotate, zoom, invert, hue_shift, ascii_art
- All driven by live_energy and live_beat
"""
if n_sources is None:
n_sources = len([p for p in self.sources.values()
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
effects_per_source = []
for i in range(n_sources):
# Alternate rotation direction per source
rot_dir = 1 if i % 2 == 0 else -1
effects = [
# Rotate - energy drives angle
{
"effect": "rotate",
"effect_path": str(self.effects_registry.get("rotate", "")),
"angle": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [0, 45 * rot_dir],
},
},
# Zoom - energy drives amount
{
"effect": "zoom",
"effect_path": str(self.effects_registry.get("zoom", "")),
"amount": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [1.0, 1.5] if i % 2 == 0 else [1.0, 0.7],
},
},
# Invert - beat triggers
{
"effect": "invert",
"effect_path": str(self.effects_registry.get("invert", "")),
"amount": {
"_binding": True,
"source": "live_beat",
"feature": "values",
"range": [0, 1],
},
},
# Hue shift - energy drives hue
{
"effect": "hue_shift",
"effect_path": str(self.effects_registry.get("hue_shift", "")),
"degrees": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [0, 180],
},
},
# ASCII art - energy drives char size, beat triggers mix
{
"effect": "ascii_art",
"effect_path": str(self.effects_registry.get("ascii_art", "")),
"char_size": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [4, 32],
},
"mix": {
"_binding": True,
"source": "live_beat",
"feature": "values",
"range": [0, 1],
},
},
]
effects_per_source.append(effects)
return effects_per_source
def _extract_effects(self) -> List[List[Dict]]:
"""Extract effect chains for each source (legacy, pre-computed analysis)."""
# Simplified: find EFFECT nodes and their configs
effects_per_source = []
for node_id, path in self.sources.items():
if path.suffix.lower() not in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
continue
# Find effects that depend on this source
# This is simplified - real implementation would trace the DAG
effects = []
for node in self.compiled.nodes:
if node.get("type") == "EFFECT":
config = node.get("config", {})
effect_name = config.get("effect")
if effect_name and effect_name in self.effects_registry:
effect_config = {
"effect": effect_name,
"effect_path": str(self.effects_registry[effect_name]),
}
# Copy only effect params (filter out internal fields)
internal_fields = (
"effect", "effect_path", "cid", "effect_cid",
"effects_registry", "analysis_refs", "inputs",
)
for k, v in config.items():
if k not in internal_fields:
effect_config[k] = v
effects.append(effect_config)
break # One effect per source for now
effects_per_source.append(effects)
return effects_per_source
def _extract_compositor_config(self, analysis_data: Dict) -> Dict:
"""Extract compositor configuration."""
# Look for blend_multi or similar composition nodes
for node in self.compiled.nodes:
if node.get("type") == "EFFECT":
config = node.get("config", {})
if config.get("effect") == "blend_multi":
return {
"mode": config.get("mode", "alpha"),
"weights": config.get("weights", []),
}
# Default: equal blend
n_sources = len([p for p in self.sources.values()
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
return {
"mode": "alpha",
"weights": [1.0 / n_sources] * n_sources if n_sources > 0 else [1.0],
}
def run(
self,
output: str = "preview",
duration: float = None,
fps: float = None,
):
"""
Run the recipe through streaming compositor.
Everything streams: video frames read on-demand, audio analyzed in real-time.
No pre-computation.
Args:
output: "preview", filename, or Output object
duration: Duration in seconds (default: audio duration)
fps: Frame rate (default from recipe, or 30)
"""
# Build compositor with recipe executor for full pipeline
from .recipe_executor import StreamingRecipeExecutor
compositor = self.build_compositor(analysis_data={}, fps=fps)
# Use audio duration if not specified
if duration is None:
if compositor._audio_analyzer:
duration = compositor._audio_analyzer.duration
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
else:
# Live mode - run until quit
print("Live mode - press 'q' to quit", file=sys.stderr)
# Create sexp executor that interprets the recipe
from .sexp_executor import SexpStreamingExecutor
executor = SexpStreamingExecutor(self.compiled, seed=42)
compositor.run(output=output, duration=duration, recipe_executor=executor)
def run_recipe(
recipe_path: str,
output: str = "preview",
duration: float = None,
params: Dict = None,
fps: float = None,
):
"""
Run a recipe through streaming compositor.
Everything streams in real-time: video frames, audio analysis.
No pre-computation - starts immediately.
Example:
run_recipe("effects/quick_test.sexp", output="preview", duration=30)
run_recipe("effects/quick_test.sexp", output="preview", fps=5) # Lower fps for slow systems
"""
adapter = RecipeAdapter(recipe_path, params=params)
adapter.run(output=output, duration=duration, fps=fps)
def run_recipe_piped(
recipe_path: str,
duration: float = None,
params: Dict = None,
fps: float = None,
):
"""
Run recipe and pipe directly to mpv.
"""
from .output import PipeOutput
adapter = RecipeAdapter(recipe_path, params=params)
compositor = adapter.build_compositor(analysis_data={}, fps=fps)
# Get frame size
if compositor.sources:
first_source = compositor.sources[0]
w, h = first_source._size
else:
w, h = 720, 720
actual_fps = fps or adapter.compiled.encoding.get('fps', 30)
# Create pipe output
pipe_out = PipeOutput(
size=(w, h),
fps=actual_fps,
audio_source=compositor._audio_source
)
# Create executor
from .sexp_executor import SexpStreamingExecutor
executor = SexpStreamingExecutor(adapter.compiled, seed=42)
# Run with pipe output
compositor.run(output=pipe_out, duration=duration, recipe_executor=executor)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor")
parser.add_argument("recipe", help="Path to .sexp recipe file")
parser.add_argument("-o", "--output", default="pipe",
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
parser.add_argument("-d", "--duration", type=float, default=None,
help="Duration in seconds (default: audio duration)")
parser.add_argument("--fps", type=float, default=None,
help="Frame rate (default: from recipe)")
args = parser.parse_args()
if args.output == "pipe":
run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps)
else:
run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps)

View File

@@ -0,0 +1,415 @@
"""
Streaming recipe executor.
Implements the full recipe logic for real-time streaming:
- Scans (state machines that evolve on beats)
- Process-pair template (two clips with sporadic effects, blended)
- Cycle-crossfade (dynamic composition cycling through video pairs)
"""
import random
import numpy as np
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
@dataclass
class ScanState:
"""State for a scan (beat-driven state machine)."""
value: Any = 0
rng: random.Random = field(default_factory=random.Random)
class StreamingScans:
"""
Real-time scan executor.
Scans are state machines that evolve on each beat.
They drive effect parameters like invert triggers, hue shifts, etc.
"""
def __init__(self, seed: int = 42, n_sources: int = 4):
self.master_seed = seed
self.n_sources = n_sources
self.scans: Dict[str, ScanState] = {}
self.beat_count = 0
self.current_time = 0.0
self.last_beat_time = 0.0
self._init_scans()
def _init_scans(self):
"""Initialize all scans with their own RNG seeds."""
scan_names = []
# Per-pair scans (dynamic based on n_sources)
for i in range(self.n_sources):
scan_names.extend([
f"inv_a_{i}", f"inv_b_{i}", f"hue_a_{i}", f"hue_b_{i}",
f"ascii_a_{i}", f"ascii_b_{i}", f"pair_mix_{i}", f"pair_rot_{i}",
])
# Global scans
scan_names.extend(["whole_spin", "ripple_gate", "cycle"])
for i, name in enumerate(scan_names):
rng = random.Random(self.master_seed + i)
self.scans[name] = ScanState(value=self._init_value(name), rng=rng)
def _init_value(self, name: str) -> Any:
"""Get initial value for a scan."""
if name.startswith("inv_") or name.startswith("ascii_"):
return 0 # Counter for remaining beats
elif name.startswith("hue_"):
return {"rem": 0, "hue": 0}
elif name.startswith("pair_mix"):
return {"rem": 0, "opacity": 0.5}
elif name.startswith("pair_rot"):
pair_idx = int(name.split("_")[-1])
rot_dir = 1 if pair_idx % 2 == 0 else -1
return {"beat": 0, "clen": 25, "dir": rot_dir, "angle": 0}
elif name == "whole_spin":
return {
"phase": 0, # 0 = waiting, 1 = spinning
"beat": 0, # beats into current phase
"plen": 20, # beats in this phase
"dir": 1, # spin direction
"total_angle": 0.0, # cumulative angle after all spins
"spin_start_angle": 0.0, # angle when current spin started
"spin_start_time": 0.0, # time when current spin started
"spin_end_time": 0.0, # estimated time when spin ends
}
elif name == "ripple_gate":
return {"rem": 0, "cx": 0.5, "cy": 0.5}
elif name == "cycle":
return {"cycle": 0, "beat": 0, "clen": 60}
return 0
def on_beat(self):
"""Update all scans on a beat."""
self.beat_count += 1
# Estimate beat interval from last two beats
beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5
self.last_beat_time = self.current_time
for name, state in self.scans.items():
state.value = self._step_scan(name, state.value, state.rng, beat_interval)
def _step_scan(self, name: str, value: Any, rng: random.Random, beat_interval: float = 0.5) -> Any:
"""Step a scan forward by one beat."""
# Invert scan: 10% chance, lasts 1-5 beats
if name.startswith("inv_"):
if value > 0:
return value - 1
elif rng.random() < 0.1:
return rng.randint(1, 5)
return 0
# Hue scan: 10% chance, random hue 30-330, lasts 1-5 beats
elif name.startswith("hue_"):
if value["rem"] > 0:
return {"rem": value["rem"] - 1, "hue": value["hue"]}
elif rng.random() < 0.1:
return {"rem": rng.randint(1, 5), "hue": rng.uniform(30, 330)}
return {"rem": 0, "hue": 0}
# ASCII scan: 5% chance, lasts 1-3 beats
elif name.startswith("ascii_"):
if value > 0:
return value - 1
elif rng.random() < 0.05:
return rng.randint(1, 3)
return 0
# Pair mix: changes every 1-11 beats
elif name.startswith("pair_mix"):
if value["rem"] > 0:
return {"rem": value["rem"] - 1, "opacity": value["opacity"]}
return {"rem": rng.randint(1, 11), "opacity": rng.choice([0, 0.5, 1.0])}
# Pair rotation: full rotation every 20-30 beats
elif name.startswith("pair_rot"):
beat = value["beat"]
clen = value["clen"]
dir_ = value["dir"]
angle = value["angle"]
if beat + 1 < clen:
new_angle = angle + dir_ * (360 / clen)
return {"beat": beat + 1, "clen": clen, "dir": dir_, "angle": new_angle}
else:
return {"beat": 0, "clen": rng.randint(20, 30), "dir": -dir_, "angle": angle}
# Whole spin: sporadic 720 degree spins (cumulative - stays rotated)
elif name == "whole_spin":
phase = value["phase"]
beat = value["beat"]
plen = value["plen"]
dir_ = value["dir"]
total_angle = value.get("total_angle", 0.0)
spin_start_angle = value.get("spin_start_angle", 0.0)
spin_start_time = value.get("spin_start_time", 0.0)
spin_end_time = value.get("spin_end_time", 0.0)
if phase == 1:
# Currently spinning
if beat + 1 < plen:
return {
"phase": 1, "beat": beat + 1, "plen": plen, "dir": dir_,
"total_angle": total_angle,
"spin_start_angle": spin_start_angle,
"spin_start_time": spin_start_time,
"spin_end_time": spin_end_time,
}
else:
# Spin complete - update total_angle with final spin
new_total = spin_start_angle + dir_ * 720.0
return {
"phase": 0, "beat": 0, "plen": rng.randint(20, 40), "dir": dir_,
"total_angle": new_total,
"spin_start_angle": new_total,
"spin_start_time": self.current_time,
"spin_end_time": self.current_time,
}
else:
# Waiting phase
if beat + 1 < plen:
return {
"phase": 0, "beat": beat + 1, "plen": plen, "dir": dir_,
"total_angle": total_angle,
"spin_start_angle": spin_start_angle,
"spin_start_time": spin_start_time,
"spin_end_time": spin_end_time,
}
else:
# Start new spin
new_dir = 1 if rng.random() < 0.5 else -1
new_plen = rng.randint(10, 25)
spin_duration = new_plen * beat_interval
return {
"phase": 1, "beat": 0, "plen": new_plen, "dir": new_dir,
"total_angle": total_angle,
"spin_start_angle": total_angle,
"spin_start_time": self.current_time,
"spin_end_time": self.current_time + spin_duration,
}
# Ripple gate: 5% chance, lasts 1-20 beats
elif name == "ripple_gate":
if value["rem"] > 0:
return {"rem": value["rem"] - 1, "cx": value["cx"], "cy": value["cy"]}
elif rng.random() < 0.05:
return {"rem": rng.randint(1, 20),
"cx": rng.uniform(0.1, 0.9),
"cy": rng.uniform(0.1, 0.9)}
return {"rem": 0, "cx": 0.5, "cy": 0.5}
# Cycle: track which video pair is active
elif name == "cycle":
beat = value["beat"]
clen = value["clen"]
cycle = value["cycle"]
if beat + 1 < clen:
return {"cycle": cycle, "beat": beat + 1, "clen": clen}
else:
# Move to next pair, vary cycle length
return {"cycle": (cycle + 1) % 4, "beat": 0,
"clen": 40 + (self.beat_count * 7) % 41}
return value
def get_emit(self, name: str) -> float:
"""Get emitted value for a scan."""
value = self.scans[name].value
if name.startswith("inv_") or name.startswith("ascii_"):
return 1.0 if value > 0 else 0.0
elif name.startswith("hue_"):
return value["hue"] if value["rem"] > 0 else 0.0
elif name.startswith("pair_mix"):
return value["opacity"]
elif name.startswith("pair_rot"):
return value["angle"]
elif name == "whole_spin":
# Smooth time-based interpolation during spin
phase = value.get("phase", 0)
if phase == 1:
# Currently spinning - interpolate based on time
spin_start_time = value.get("spin_start_time", 0.0)
spin_end_time = value.get("spin_end_time", spin_start_time + 1.0)
spin_start_angle = value.get("spin_start_angle", 0.0)
dir_ = value.get("dir", 1)
duration = spin_end_time - spin_start_time
if duration > 0:
progress = (self.current_time - spin_start_time) / duration
progress = max(0.0, min(1.0, progress)) # clamp to 0-1
else:
progress = 1.0
return spin_start_angle + progress * 720.0 * dir_
else:
# Not spinning - return cumulative angle
return value.get("total_angle", 0.0)
elif name == "ripple_gate":
return 1.0 if value["rem"] > 0 else 0.0
elif name == "cycle":
return value
return 0.0
class StreamingRecipeExecutor:
"""
Executes a recipe in streaming mode.
Implements:
- process-pair: two video clips with opposite effects, blended
- cycle-crossfade: dynamic cycling through video pairs
- Final effects: whole-spin rotation, ripple
"""
def __init__(self, n_sources: int = 4, seed: int = 42):
self.n_sources = n_sources
self.scans = StreamingScans(seed, n_sources=n_sources)
self.last_beat_detected = False
self.current_time = 0.0
def on_frame(self, energy: float, is_beat: bool, t: float = 0.0):
"""Called each frame with current audio analysis."""
self.current_time = t
self.scans.current_time = t
# Update scans on beat
if is_beat and not self.last_beat_detected:
self.scans.on_beat()
self.last_beat_detected = is_beat
def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict:
"""
Get effect parameters for a source clip.
Args:
source_idx: Which video source (0-3)
clip: "a" or "b" (each source has two clips)
energy: Current audio energy (0-1)
"""
suffix = f"_{source_idx}"
# Rotation ranges alternate
if source_idx % 2 == 0:
rot_range = [0, 45] if clip == "a" else [0, -45]
zoom_range = [1, 1.5] if clip == "a" else [1, 0.5]
else:
rot_range = [0, -45] if clip == "a" else [0, 45]
zoom_range = [1, 0.5] if clip == "a" else [1, 1.5]
return {
"rotate_angle": rot_range[0] + energy * (rot_range[1] - rot_range[0]),
"zoom_amount": zoom_range[0] + energy * (zoom_range[1] - zoom_range[0]),
"invert_amount": self.scans.get_emit(f"inv_{clip}{suffix}"),
"hue_degrees": self.scans.get_emit(f"hue_{clip}{suffix}"),
"ascii_mix": 0, # Disabled - too slow without GPU
"ascii_char_size": 4 + energy * 28, # 4-32
}
def get_pair_params(self, source_idx: int) -> Dict:
"""Get blend and rotation params for a video pair."""
suffix = f"_{source_idx}"
return {
"blend_opacity": self.scans.get_emit(f"pair_mix{suffix}"),
"pair_rotation": self.scans.get_emit(f"pair_rot{suffix}"),
}
def get_cycle_weights(self) -> List[float]:
"""Get blend weights for cycle-crossfade composition."""
cycle_state = self.scans.get_emit("cycle")
active = cycle_state["cycle"]
beat = cycle_state["beat"]
clen = cycle_state["clen"]
n = self.n_sources
phase3 = beat * 3
weights = []
for p in range(n):
prev = (p + n - 1) % n
if active == p:
if phase3 < clen:
w = 0.9
elif phase3 < clen * 2:
w = 0.9 - ((phase3 - clen) / clen) * 0.85
else:
w = 0.05
elif active == prev:
if phase3 < clen:
w = 0.05
elif phase3 < clen * 2:
w = 0.05 + ((phase3 - clen) / clen) * 0.85
else:
w = 0.9
else:
w = 0.05
weights.append(w)
# Normalize
total = sum(weights)
if total > 0:
weights = [w / total for w in weights]
return weights
def get_cycle_zooms(self) -> List[float]:
"""Get zoom amounts for cycle-crossfade."""
cycle_state = self.scans.get_emit("cycle")
active = cycle_state["cycle"]
beat = cycle_state["beat"]
clen = cycle_state["clen"]
n = self.n_sources
phase3 = beat * 3
zooms = []
for p in range(n):
prev = (p + n - 1) % n
if active == p:
if phase3 < clen:
z = 1.0
elif phase3 < clen * 2:
z = 1.0 + ((phase3 - clen) / clen) * 1.0
else:
z = 0.1
elif active == prev:
if phase3 < clen:
z = 3.0 # Start big
elif phase3 < clen * 2:
z = 3.0 - ((phase3 - clen) / clen) * 2.0 # Shrink to 1.0
else:
z = 1.0
else:
z = 0.1
zooms.append(z)
return zooms
def get_final_effects(self, energy: float) -> Dict:
"""Get final composition effects (whole-spin, ripple)."""
ripple_gate = self.scans.get_emit("ripple_gate")
ripple_state = self.scans.scans["ripple_gate"].value
return {
"whole_spin_angle": self.scans.get_emit("whole_spin"),
"ripple_amplitude": ripple_gate * (5 + energy * 45), # 5-50
"ripple_cx": ripple_state["cx"],
"ripple_cy": ripple_state["cy"],
}

678
streaming/sexp_executor.py Normal file
View File

@@ -0,0 +1,678 @@
"""
Streaming S-expression executor.
Executes compiled sexp recipes in real-time by:
- Evaluating scan expressions on each beat
- Resolving bindings to get effect parameter values
- Applying effects frame-by-frame
- Evaluating SLICE_ON Lambda for cycle crossfade
"""
import random
import numpy as np
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
from .sexp_interp import SexpInterpreter, eval_slice_on_lambda
@dataclass
class ScanState:
"""Runtime state for a scan."""
node_id: str
name: Optional[str]
value: Any
rng: random.Random
init_expr: dict
step_expr: dict
emit_expr: dict
class ExprEvaluator:
"""
Evaluates compiled expression ASTs.
Expressions are dicts with:
- _expr: True (marks as expression)
- op: operation name
- args: list of arguments
- name: for 'var' ops
- keys: for 'dict' ops
"""
def __init__(self, rng: random.Random = None):
self.rng = rng or random.Random()
def eval(self, expr: Any, env: Dict[str, Any]) -> Any:
"""Evaluate an expression in the given environment."""
# Literal values
if not isinstance(expr, dict):
return expr
# Check if it's an expression
if not expr.get('_expr'):
# It's a plain dict - return as-is
return expr
op = expr.get('op')
args = expr.get('args', [])
# Evaluate based on operation
if op == 'var':
name = expr.get('name')
if name in env:
return env[name]
raise KeyError(f"Unknown variable: {name}")
elif op == 'dict':
keys = expr.get('keys', [])
values = [self.eval(a, env) for a in args]
return dict(zip(keys, values))
elif op == 'get':
obj = self.eval(args[0], env)
key = args[1]
return obj.get(key) if isinstance(obj, dict) else obj[key]
elif op == 'if':
cond = self.eval(args[0], env)
if cond:
return self.eval(args[1], env)
elif len(args) > 2:
return self.eval(args[2], env)
return None
# Comparison ops
elif op == '<':
return self.eval(args[0], env) < self.eval(args[1], env)
elif op == '>':
return self.eval(args[0], env) > self.eval(args[1], env)
elif op == '<=':
return self.eval(args[0], env) <= self.eval(args[1], env)
elif op == '>=':
return self.eval(args[0], env) >= self.eval(args[1], env)
elif op == '=':
return self.eval(args[0], env) == self.eval(args[1], env)
elif op == '!=':
return self.eval(args[0], env) != self.eval(args[1], env)
# Arithmetic ops
elif op == '+':
return self.eval(args[0], env) + self.eval(args[1], env)
elif op == '-':
return self.eval(args[0], env) - self.eval(args[1], env)
elif op == '*':
return self.eval(args[0], env) * self.eval(args[1], env)
elif op == '/':
return self.eval(args[0], env) / self.eval(args[1], env)
elif op == 'mod':
return self.eval(args[0], env) % self.eval(args[1], env)
# Random ops
elif op == 'rand':
return self.rng.random()
elif op == 'rand-int':
lo = self.eval(args[0], env)
hi = self.eval(args[1], env)
return self.rng.randint(lo, hi)
elif op == 'rand-range':
lo = self.eval(args[0], env)
hi = self.eval(args[1], env)
return self.rng.uniform(lo, hi)
# Logic ops
elif op == 'and':
return all(self.eval(a, env) for a in args)
elif op == 'or':
return any(self.eval(a, env) for a in args)
elif op == 'not':
return not self.eval(args[0], env)
else:
raise ValueError(f"Unknown operation: {op}")
class SexpStreamingExecutor:
"""
Executes a compiled sexp recipe in streaming mode.
Reads scan definitions, effect chains, and bindings from the
compiled recipe and executes them frame-by-frame.
"""
def __init__(self, compiled_recipe, seed: int = 42):
self.recipe = compiled_recipe
self.master_seed = seed
# Build node lookup
self.nodes = {n['id']: n for n in compiled_recipe.nodes}
# State (must be initialized before _init_scans)
self.beat_count = 0
self.current_time = 0.0
self.last_beat_time = 0.0
self.last_beat_detected = False
self.energy = 0.0
# Initialize scans
self.scans: Dict[str, ScanState] = {}
self.scan_outputs: Dict[str, Any] = {} # Current emit values by node_id
self._init_scans()
# Initialize SLICE_ON interpreter
self.sexp_interp = SexpInterpreter(random.Random(seed))
self._slice_on_lambda = None
self._slice_on_acc = None
self._slice_on_result = None # Last evaluation result {layers, compose, acc}
self._init_slice_on()
def _init_slice_on(self):
"""Initialize SLICE_ON Lambda for cycle crossfade."""
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
config = node.get('config', {})
self._slice_on_lambda = config.get('fn')
init = config.get('init', {})
self._slice_on_acc = {
'cycle': init.get('cycle', 0),
'beat': init.get('beat', 0),
'clen': init.get('clen', 60),
}
# Evaluate initial state
self._eval_slice_on()
break
def _eval_slice_on(self):
"""Evaluate the SLICE_ON Lambda with current state."""
if not self._slice_on_lambda:
return
n = len(self._get_video_sources())
videos = list(range(n)) # Placeholder video indices
try:
result = eval_slice_on_lambda(
self._slice_on_lambda,
self._slice_on_acc,
self.beat_count,
0.0, # start time (not used for weights)
1.0, # end time (not used for weights)
videos,
self.sexp_interp,
)
self._slice_on_result = result
# Update accumulator for next beat
if 'acc' in result:
self._slice_on_acc = result['acc']
except Exception as e:
import sys
print(f"SLICE_ON eval error: {e}", file=sys.stderr)
def _init_scans(self):
"""Initialize all scan nodes from the recipe."""
seed_offset = 0
for node in self.recipe.nodes:
if node.get('type') == 'SCAN':
node_id = node['id']
config = node.get('config', {})
# Create RNG with unique seed
scan_seed = config.get('seed', self.master_seed + seed_offset)
rng = random.Random(scan_seed)
seed_offset += 1
# Evaluate initial value
init_expr = config.get('init', 0)
evaluator = ExprEvaluator(rng)
init_value = evaluator.eval(init_expr, {})
self.scans[node_id] = ScanState(
node_id=node_id,
name=node.get('name'),
value=init_value,
rng=rng,
init_expr=init_expr,
step_expr=config.get('step_expr', {}),
emit_expr=config.get('emit_expr', {}),
)
# Compute initial emit
self._update_emit(node_id)
def _update_emit(self, node_id: str):
"""Update the emit value for a scan."""
scan = self.scans[node_id]
evaluator = ExprEvaluator(scan.rng)
# Build environment from current state
env = self._build_scan_env(scan)
# Evaluate emit expression
emit_value = evaluator.eval(scan.emit_expr, env)
self.scan_outputs[node_id] = emit_value
def _build_scan_env(self, scan: ScanState) -> Dict[str, Any]:
"""Build environment for scan expression evaluation."""
env = {}
# Add state variables
if isinstance(scan.value, dict):
env.update(scan.value)
else:
env['acc'] = scan.value
# Add beat count
env['beat_count'] = self.beat_count
env['time'] = self.current_time
return env
def on_beat(self):
"""Update all scans on a beat."""
self.beat_count += 1
# Estimate beat interval
beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5
self.last_beat_time = self.current_time
# Step each scan
for node_id, scan in self.scans.items():
evaluator = ExprEvaluator(scan.rng)
env = self._build_scan_env(scan)
# Evaluate step expression
new_value = evaluator.eval(scan.step_expr, env)
scan.value = new_value
# Update emit
self._update_emit(node_id)
# Step the cycle state
self._step_cycle()
def on_frame(self, energy: float, is_beat: bool, t: float = 0.0):
"""Called each frame with audio analysis."""
self.current_time = t
self.energy = energy
# Update scans on beat (edge detection)
if is_beat and not self.last_beat_detected:
self.on_beat()
self.last_beat_detected = is_beat
def resolve_binding(self, binding: dict) -> Any:
"""Resolve a binding to get the current value."""
if not isinstance(binding, dict) or not binding.get('_binding'):
return binding
source_id = binding.get('source')
feature = binding.get('feature', 'values')
range_map = binding.get('range')
# Get the raw value
if source_id in self.scan_outputs:
value = self.scan_outputs[source_id]
else:
# Might be an analyzer reference - use energy as fallback
value = self.energy
# Extract feature if value is a dict
if isinstance(value, dict) and feature in value:
value = value[feature]
# Apply range mapping
if range_map and isinstance(value, (int, float)):
lo, hi = range_map
value = lo + value * (hi - lo)
return value
def get_effect_params(self, effect_node: dict) -> Dict[str, Any]:
"""Get resolved parameters for an effect node."""
config = effect_node.get('config', {})
params = {}
for key, value in config.items():
# Skip internal fields
if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', 'analysis_refs'):
continue
# Resolve bindings
params[key] = self.resolve_binding(value)
return params
def get_scan_value(self, name: str) -> Any:
"""Get scan output by name."""
for node_id, scan in self.scans.items():
if scan.name == name:
return self.scan_outputs.get(node_id)
return None
def get_all_scan_values(self) -> Dict[str, Any]:
"""Get all named scan outputs."""
result = {}
for node_id, scan in self.scans.items():
if scan.name:
result[scan.name] = self.scan_outputs.get(node_id)
return result
# === Compositor interface methods ===
def _get_video_sources(self) -> List[str]:
"""Get list of video source node IDs."""
sources = []
for node in self.recipe.nodes:
if node.get('type') == 'SOURCE':
sources.append(node['id'])
# Filter to video only (exclude audio - last one is usually audio)
# Look at file extensions in the paths
return sources[:-1] if len(sources) > 1 else sources
def _trace_effect_chain(self, start_id: str, stop_at_blend: bool = True) -> List[dict]:
"""Trace effect chain from a node, returning effects in order."""
chain = []
current_id = start_id
for _ in range(20): # Max depth
# Find node that uses current as input
next_node = None
for node in self.recipe.nodes:
if current_id in node.get('inputs', []):
if node.get('type') == 'EFFECT':
effect_type = node.get('config', {}).get('effect')
chain.append(node)
if stop_at_blend and effect_type == 'blend':
return chain
next_node = node
break
elif node.get('type') == 'SEGMENT':
next_node = node
break
if next_node is None:
break
current_id = next_node['id']
return chain
def _find_clip_chains(self, source_idx: int) -> tuple:
"""Find effect chains for clip A and B from a source."""
sources = self._get_video_sources()
if source_idx >= len(sources):
return [], []
source_id = sources[source_idx]
# Find SEGMENT node
segment_id = None
for node in self.recipe.nodes:
if node.get('type') == 'SEGMENT' and source_id in node.get('inputs', []):
segment_id = node['id']
break
if not segment_id:
return [], []
# Find the two effect chains from segment (clip A and clip B)
chains = []
for node in self.recipe.nodes:
if segment_id in node.get('inputs', []) and node.get('type') == 'EFFECT':
chain = self._trace_effect_chain(segment_id)
# Get chain starting from this specific branch
branch_chain = [node]
current = node['id']
for _ in range(10):
found = False
for n in self.recipe.nodes:
if current in n.get('inputs', []) and n.get('type') == 'EFFECT':
branch_chain.append(n)
if n.get('config', {}).get('effect') == 'blend':
break
current = n['id']
found = True
break
if not found:
break
chains.append(branch_chain)
# Return first two chains as A and B
chain_a = chains[0] if len(chains) > 0 else []
chain_b = chains[1] if len(chains) > 1 else []
return chain_a, chain_b
def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict:
"""Get effect parameters for a source clip (compositor interface)."""
# Get the correct chain for this clip
chain_a, chain_b = self._find_clip_chains(source_idx)
chain = chain_a if clip == 'a' else chain_b
# Default params
params = {
"rotate_angle": 0,
"zoom_amount": 1.0,
"invert_amount": 0,
"hue_degrees": 0,
"ascii_mix": 0,
"ascii_char_size": 8,
}
# Resolve from effects in chain
for eff in chain:
config = eff.get('config', {})
effect_type = config.get('effect')
if effect_type == 'rotate':
angle_binding = config.get('angle')
if angle_binding:
if isinstance(angle_binding, dict) and angle_binding.get('_binding'):
# Bound to analyzer - use energy with range
range_map = angle_binding.get('range')
if range_map:
lo, hi = range_map
params["rotate_angle"] = lo + energy * (hi - lo)
else:
params["rotate_angle"] = self.resolve_binding(angle_binding)
else:
params["rotate_angle"] = angle_binding if isinstance(angle_binding, (int, float)) else 0
elif effect_type == 'zoom':
amount_binding = config.get('amount')
if amount_binding:
if isinstance(amount_binding, dict) and amount_binding.get('_binding'):
range_map = amount_binding.get('range')
if range_map:
lo, hi = range_map
params["zoom_amount"] = lo + energy * (hi - lo)
else:
params["zoom_amount"] = self.resolve_binding(amount_binding)
else:
params["zoom_amount"] = amount_binding if isinstance(amount_binding, (int, float)) else 1.0
elif effect_type == 'invert':
amount_binding = config.get('amount')
if amount_binding:
val = self.resolve_binding(amount_binding)
params["invert_amount"] = val if isinstance(val, (int, float)) else 0
elif effect_type == 'hue_shift':
deg_binding = config.get('degrees')
if deg_binding:
val = self.resolve_binding(deg_binding)
params["hue_degrees"] = val if isinstance(val, (int, float)) else 0
elif effect_type == 'ascii_art':
mix_binding = config.get('mix')
if mix_binding:
val = self.resolve_binding(mix_binding)
params["ascii_mix"] = val if isinstance(val, (int, float)) else 0
size_binding = config.get('char_size')
if size_binding:
if isinstance(size_binding, dict) and size_binding.get('_binding'):
range_map = size_binding.get('range')
if range_map:
lo, hi = range_map
params["ascii_char_size"] = lo + energy * (hi - lo)
return params
def get_pair_params(self, source_idx: int) -> Dict:
"""Get blend and rotation params for a video pair (compositor interface)."""
params = {
"blend_opacity": 0.5,
"pair_rotation": 0,
}
# Find the blend node for this source
chain_a, _ = self._find_clip_chains(source_idx)
# The last effect in chain_a should be the blend
blend_node = None
for eff in reversed(chain_a):
if eff.get('config', {}).get('effect') == 'blend':
blend_node = eff
break
if blend_node:
config = blend_node.get('config', {})
opacity_binding = config.get('opacity')
if opacity_binding:
val = self.resolve_binding(opacity_binding)
if isinstance(val, (int, float)):
params["blend_opacity"] = val
# Find rotate after blend (pair rotation)
blend_id = blend_node['id']
for node in self.recipe.nodes:
if blend_id in node.get('inputs', []) and node.get('type') == 'EFFECT':
if node.get('config', {}).get('effect') == 'rotate':
angle_binding = node.get('config', {}).get('angle')
if angle_binding:
val = self.resolve_binding(angle_binding)
if isinstance(val, (int, float)):
params["pair_rotation"] = val
break
return params
def _get_cycle_state(self) -> dict:
"""Get current cycle state from SLICE_ON or internal tracking."""
if not hasattr(self, '_cycle_state'):
# Initialize from SLICE_ON node
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
init = node.get('config', {}).get('init', {})
self._cycle_state = {
'cycle': init.get('cycle', 0),
'beat': init.get('beat', 0),
'clen': init.get('clen', 60),
}
break
else:
self._cycle_state = {'cycle': 0, 'beat': 0, 'clen': 60}
return self._cycle_state
def _step_cycle(self):
"""Step the cycle state forward on beat by evaluating SLICE_ON Lambda."""
# Use interpreter to evaluate the Lambda
self._eval_slice_on()
def get_cycle_weights(self) -> List[float]:
"""Get blend weights for cycle-crossfade from SLICE_ON result."""
n = len(self._get_video_sources())
if n == 0:
return [1.0]
# Get weights from interpreted result
if self._slice_on_result:
compose = self._slice_on_result.get('compose', {})
weights = compose.get('weights', [])
if weights and len(weights) == n:
# Normalize
total = sum(weights)
if total > 0:
return [w / total for w in weights]
# Fallback: equal weights
return [1.0 / n] * n
def get_cycle_zooms(self) -> List[float]:
"""Get zoom amounts for cycle-crossfade from SLICE_ON result."""
n = len(self._get_video_sources())
if n == 0:
return [1.0]
# Get zooms from interpreted result (layers -> effects -> zoom amount)
if self._slice_on_result:
layers = self._slice_on_result.get('layers', [])
if layers and len(layers) == n:
zooms = []
for layer in layers:
effects = layer.get('effects', [])
zoom_amt = 1.0
for eff in effects:
if eff.get('effect') == 'zoom' or (hasattr(eff.get('effect'), 'name') and eff.get('effect').name == 'zoom'):
zoom_amt = eff.get('amount', 1.0)
break
zooms.append(zoom_amt)
return zooms
# Fallback
return [1.0] * n
def _get_final_rotate_scan_id(self) -> str:
"""Find the scan ID that drives the final rotation (after SLICE_ON)."""
if hasattr(self, '_final_rotate_scan_id'):
return self._final_rotate_scan_id
# Find SLICE_ON node index
slice_on_idx = None
for i, node in enumerate(self.recipe.nodes):
if node.get('type') == 'SLICE_ON':
slice_on_idx = i
break
# Find rotate effect after SLICE_ON
if slice_on_idx is not None:
for node in self.recipe.nodes[slice_on_idx + 1:]:
if node.get('type') == 'EFFECT':
config = node.get('config', {})
if config.get('effect') == 'rotate':
angle_binding = config.get('angle', {})
if isinstance(angle_binding, dict) and angle_binding.get('_binding'):
self._final_rotate_scan_id = angle_binding.get('source')
return self._final_rotate_scan_id
self._final_rotate_scan_id = None
return None
def get_final_effects(self, energy: float) -> Dict:
"""Get final composition effects (compositor interface)."""
# Get named scans
scan_values = self.get_all_scan_values()
# Whole spin - get from the specific scan bound to final rotate effect
whole_spin = 0
final_rotate_scan_id = self._get_final_rotate_scan_id()
if final_rotate_scan_id and final_rotate_scan_id in self.scan_outputs:
val = self.scan_outputs[final_rotate_scan_id]
if isinstance(val, dict) and 'angle' in val:
whole_spin = val['angle']
elif isinstance(val, (int, float)):
whole_spin = val
# Ripple
ripple_gate = scan_values.get('ripple-gate', 0)
ripple_cx = scan_values.get('ripple-cx', 0.5)
ripple_cy = scan_values.get('ripple-cy', 0.5)
if isinstance(ripple_gate, dict):
ripple_gate = ripple_gate.get('gate', 0) if 'gate' in ripple_gate else 1
return {
"whole_spin_angle": whole_spin,
"ripple_amplitude": ripple_gate * (5 + energy * 45),
"ripple_cx": ripple_cx if isinstance(ripple_cx, (int, float)) else 0.5,
"ripple_cy": ripple_cy if isinstance(ripple_cy, (int, float)) else 0.5,
}

376
streaming/sexp_interp.py Normal file
View File

@@ -0,0 +1,376 @@
"""
S-expression interpreter for streaming execution.
Evaluates sexp expressions including:
- let bindings
- lambda definitions and calls
- Arithmetic, comparison, logic operators
- dict/list operations
- Random number generation
"""
import random
from typing import Any, Dict, List, Callable
from dataclasses import dataclass
@dataclass
class Lambda:
"""Runtime lambda value."""
params: List[str]
body: Any
closure: Dict[str, Any]
class Symbol:
"""Symbol reference."""
def __init__(self, name: str):
self.name = name
def __repr__(self):
return f"Symbol({self.name})"
class SexpInterpreter:
"""
Interprets S-expressions in real-time.
Handles the full sexp language used in recipes.
"""
def __init__(self, rng: random.Random = None):
self.rng = rng or random.Random()
self.globals: Dict[str, Any] = {}
def eval(self, expr: Any, env: Dict[str, Any] = None) -> Any:
"""Evaluate an expression in the given environment."""
if env is None:
env = {}
# Literals
if isinstance(expr, (int, float, str, bool)) or expr is None:
return expr
# Symbol lookup
if isinstance(expr, Symbol) or (hasattr(expr, 'name') and hasattr(expr, '__class__') and expr.__class__.__name__ == 'Symbol'):
name = expr.name if hasattr(expr, 'name') else str(expr)
if name in env:
return env[name]
if name in self.globals:
return self.globals[name]
raise NameError(f"Undefined symbol: {name}")
# Compiled expression dict (from compiler)
if isinstance(expr, dict):
if expr.get('_expr'):
return self._eval_compiled_expr(expr, env)
# Plain dict - evaluate values that might be expressions
result = {}
for k, v in expr.items():
# Some keys should keep Symbol values as strings (effect names, modes)
if k in ('effect', 'mode') and hasattr(v, 'name'):
result[k] = v.name
else:
result[k] = self.eval(v, env)
return result
# List expression (sexp)
if isinstance(expr, (list, tuple)) and len(expr) > 0:
return self._eval_list(expr, env)
# Empty list
if isinstance(expr, (list, tuple)):
return []
return expr
def _eval_compiled_expr(self, expr: dict, env: Dict[str, Any]) -> Any:
"""Evaluate a compiled expression dict."""
op = expr.get('op')
args = expr.get('args', [])
if op == 'var':
name = expr.get('name')
if name in env:
return env[name]
if name in self.globals:
return self.globals[name]
raise NameError(f"Undefined: {name}")
elif op == 'dict':
keys = expr.get('keys', [])
values = [self.eval(a, env) for a in args]
return dict(zip(keys, values))
elif op == 'get':
obj = self.eval(args[0], env)
key = args[1]
return obj.get(key) if isinstance(obj, dict) else obj[key]
elif op == 'if':
cond = self.eval(args[0], env)
if cond:
return self.eval(args[1], env)
elif len(args) > 2:
return self.eval(args[2], env)
return None
# Comparison
elif op == '<':
return self.eval(args[0], env) < self.eval(args[1], env)
elif op == '>':
return self.eval(args[0], env) > self.eval(args[1], env)
elif op == '<=':
return self.eval(args[0], env) <= self.eval(args[1], env)
elif op == '>=':
return self.eval(args[0], env) >= self.eval(args[1], env)
elif op == '=':
return self.eval(args[0], env) == self.eval(args[1], env)
elif op == '!=':
return self.eval(args[0], env) != self.eval(args[1], env)
# Arithmetic
elif op == '+':
return self.eval(args[0], env) + self.eval(args[1], env)
elif op == '-':
return self.eval(args[0], env) - self.eval(args[1], env)
elif op == '*':
return self.eval(args[0], env) * self.eval(args[1], env)
elif op == '/':
return self.eval(args[0], env) / self.eval(args[1], env)
elif op == 'mod':
return self.eval(args[0], env) % self.eval(args[1], env)
# Random
elif op == 'rand':
return self.rng.random()
elif op == 'rand-int':
return self.rng.randint(self.eval(args[0], env), self.eval(args[1], env))
elif op == 'rand-range':
return self.rng.uniform(self.eval(args[0], env), self.eval(args[1], env))
# Logic
elif op == 'and':
return all(self.eval(a, env) for a in args)
elif op == 'or':
return any(self.eval(a, env) for a in args)
elif op == 'not':
return not self.eval(args[0], env)
else:
raise ValueError(f"Unknown op: {op}")
def _eval_list(self, expr: list, env: Dict[str, Any]) -> Any:
"""Evaluate a list expression (sexp form)."""
if len(expr) == 0:
return []
head = expr[0]
# Get head name
if isinstance(head, Symbol) or (hasattr(head, 'name') and hasattr(head, '__class__')):
head_name = head.name if hasattr(head, 'name') else str(head)
elif isinstance(head, str):
head_name = head
else:
# Not a symbol - check if it's a data list or function call
if isinstance(head, dict):
# List of dicts - evaluate each element as data
return [self.eval(item, env) for item in expr]
# Otherwise evaluate as function call
fn = self.eval(head, env)
args = [self.eval(a, env) for a in expr[1:]]
return self._call(fn, args, env)
# Special forms
if head_name == 'let':
return self._eval_let(expr, env)
elif head_name in ('lambda', 'fn'):
return self._eval_lambda(expr, env)
elif head_name == 'if':
return self._eval_if(expr, env)
elif head_name == 'dict':
return self._eval_dict(expr, env)
elif head_name == 'get':
obj = self.eval(expr[1], env)
key = self.eval(expr[2], env) if len(expr) > 2 else expr[2]
if isinstance(key, str):
return obj.get(key) if isinstance(obj, dict) else getattr(obj, key, None)
return obj[key]
elif head_name == 'len':
return len(self.eval(expr[1], env))
elif head_name == 'range':
start = self.eval(expr[1], env)
end = self.eval(expr[2], env) if len(expr) > 2 else start
if len(expr) == 2:
return list(range(end))
return list(range(start, end))
elif head_name == 'map':
fn = self.eval(expr[1], env)
lst = self.eval(expr[2], env)
return [self._call(fn, [x], env) for x in lst]
elif head_name == 'mod':
return self.eval(expr[1], env) % self.eval(expr[2], env)
# Arithmetic
elif head_name == '+':
return self.eval(expr[1], env) + self.eval(expr[2], env)
elif head_name == '-':
if len(expr) == 2:
return -self.eval(expr[1], env)
return self.eval(expr[1], env) - self.eval(expr[2], env)
elif head_name == '*':
return self.eval(expr[1], env) * self.eval(expr[2], env)
elif head_name == '/':
return self.eval(expr[1], env) / self.eval(expr[2], env)
# Comparison
elif head_name == '<':
return self.eval(expr[1], env) < self.eval(expr[2], env)
elif head_name == '>':
return self.eval(expr[1], env) > self.eval(expr[2], env)
elif head_name == '<=':
return self.eval(expr[1], env) <= self.eval(expr[2], env)
elif head_name == '>=':
return self.eval(expr[1], env) >= self.eval(expr[2], env)
elif head_name == '=':
return self.eval(expr[1], env) == self.eval(expr[2], env)
# Logic
elif head_name == 'and':
return all(self.eval(a, env) for a in expr[1:])
elif head_name == 'or':
return any(self.eval(a, env) for a in expr[1:])
elif head_name == 'not':
return not self.eval(expr[1], env)
# Function call
else:
fn = env.get(head_name) or self.globals.get(head_name)
if fn is None:
raise NameError(f"Undefined function: {head_name}")
args = [self.eval(a, env) for a in expr[1:]]
return self._call(fn, args, env)
def _eval_let(self, expr: list, env: Dict[str, Any]) -> Any:
"""Evaluate (let [bindings...] body)."""
bindings = expr[1]
body = expr[2]
# Create new environment with bindings
new_env = dict(env)
# Process bindings in pairs
i = 0
while i < len(bindings):
name = bindings[i]
if isinstance(name, Symbol) or hasattr(name, 'name'):
name = name.name if hasattr(name, 'name') else str(name)
value = self.eval(bindings[i + 1], new_env)
new_env[name] = value
i += 2
return self.eval(body, new_env)
def _eval_lambda(self, expr: list, env: Dict[str, Any]) -> Lambda:
"""Evaluate (lambda [params] body)."""
params_expr = expr[1]
body = expr[2]
# Extract parameter names
params = []
for p in params_expr:
if isinstance(p, Symbol) or hasattr(p, 'name'):
params.append(p.name if hasattr(p, 'name') else str(p))
else:
params.append(str(p))
return Lambda(params=params, body=body, closure=dict(env))
def _eval_if(self, expr: list, env: Dict[str, Any]) -> Any:
"""Evaluate (if cond then else)."""
cond = self.eval(expr[1], env)
if cond:
return self.eval(expr[2], env)
elif len(expr) > 3:
return self.eval(expr[3], env)
return None
def _eval_dict(self, expr: list, env: Dict[str, Any]) -> dict:
"""Evaluate (dict :key val ...)."""
result = {}
i = 1
while i < len(expr):
key = expr[i]
# Handle keyword syntax (:key) and Keyword objects
if hasattr(key, 'name'):
key = key.name
elif hasattr(key, '__class__') and key.__class__.__name__ == 'Keyword':
key = str(key).lstrip(':')
elif isinstance(key, str) and key.startswith(':'):
key = key[1:]
value = self.eval(expr[i + 1], env)
result[key] = value
i += 2
return result
def _call(self, fn: Any, args: List[Any], env: Dict[str, Any]) -> Any:
"""Call a function with arguments."""
if isinstance(fn, Lambda):
# Our own Lambda type
call_env = dict(fn.closure)
for param, arg in zip(fn.params, args):
call_env[param] = arg
return self.eval(fn.body, call_env)
elif hasattr(fn, 'params') and hasattr(fn, 'body'):
# Lambda from parser (artdag.sexp.parser.Lambda)
call_env = dict(env)
if hasattr(fn, 'closure') and fn.closure:
call_env.update(fn.closure)
# Get param names
params = []
for p in fn.params:
if hasattr(p, 'name'):
params.append(p.name)
else:
params.append(str(p))
for param, arg in zip(params, args):
call_env[param] = arg
return self.eval(fn.body, call_env)
elif callable(fn):
return fn(*args)
else:
raise TypeError(f"Not callable: {type(fn).__name__}")
def eval_slice_on_lambda(lambda_obj, acc: dict, i: int, start: float, end: float,
videos: list, interp: SexpInterpreter = None) -> dict:
"""
Evaluate a SLICE_ON lambda function.
Args:
lambda_obj: The Lambda object from the compiled recipe
acc: Current accumulator state
i: Beat index
start: Slice start time
end: Slice end time
videos: List of video inputs
interp: Interpreter to use
Returns:
Dict with 'layers', 'compose', 'acc' keys
"""
if interp is None:
interp = SexpInterpreter()
# Set up global 'videos' for (len videos) to work
interp.globals['videos'] = videos
# Build initial environment with lambda parameters
env = dict(lambda_obj.closure) if hasattr(lambda_obj, 'closure') and lambda_obj.closure else {}
env['videos'] = videos
# Call the lambda
result = interp._call(lambda_obj, [acc, i, start, end], env)
return result

281
streaming/sources.py Normal file
View File

@@ -0,0 +1,281 @@
"""
Video and image sources with looping support.
"""
import numpy as np
import subprocess
import json
from pathlib import Path
from typing import Optional, Tuple
from abc import ABC, abstractmethod
class Source(ABC):
"""Abstract base class for frame sources."""
@abstractmethod
def read_frame(self, t: float) -> np.ndarray:
"""Read frame at time t (with looping if needed)."""
pass
@property
@abstractmethod
def duration(self) -> float:
"""Source duration in seconds."""
pass
@property
@abstractmethod
def size(self) -> Tuple[int, int]:
"""Frame size as (width, height)."""
pass
@property
@abstractmethod
def fps(self) -> float:
"""Frames per second."""
pass
class VideoSource(Source):
"""
Video file source with automatic looping.
Reads frames on-demand, seeking as needed. When time exceeds
duration, wraps around (loops).
"""
def __init__(self, path: str, target_fps: float = 30):
self.path = Path(path)
self.target_fps = target_fps
# Initialize decode state first (before _probe which could fail)
self._process: Optional[subprocess.Popen] = None
self._current_start: Optional[float] = None
self._frame_buffer: Optional[np.ndarray] = None
self._buffer_time: Optional[float] = None
self._duration = None
self._size = None
self._fps = None
if not self.path.exists():
raise FileNotFoundError(f"Video not found: {path}")
self._probe()
def _probe(self):
"""Get video metadata."""
cmd = [
"ffprobe", "-v", "quiet",
"-print_format", "json",
"-show_format", "-show_streams",
str(self.path)
]
result = subprocess.run(cmd, capture_output=True, text=True)
data = json.loads(result.stdout)
# Get duration
self._duration = float(data["format"]["duration"])
# Get video stream info
for stream in data["streams"]:
if stream["codec_type"] == "video":
self._size = (int(stream["width"]), int(stream["height"]))
# Parse fps from r_frame_rate (e.g., "30/1" or "30000/1001")
fps_parts = stream.get("r_frame_rate", "30/1").split("/")
self._fps = float(fps_parts[0]) / float(fps_parts[1])
break
@property
def duration(self) -> float:
return self._duration
@property
def size(self) -> Tuple[int, int]:
return self._size
@property
def fps(self) -> float:
return self._fps
def _start_decode(self, start_time: float):
"""Start ffmpeg decode process from given time."""
if self._process:
try:
self._process.stdout.close()
except:
pass
self._process.terminate()
try:
self._process.wait(timeout=1)
except:
self._process.kill()
self._process.wait()
w, h = self._size
cmd = [
"ffmpeg", "-v", "quiet",
"-ss", str(start_time),
"-i", str(self.path),
"-f", "rawvideo",
"-pix_fmt", "rgb24",
"-r", str(self.target_fps),
"-"
]
self._process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=w * h * 3 * 4, # Buffer a few frames
)
self._current_start = start_time
self._buffer_time = start_time
def read_frame(self, t: float) -> np.ndarray:
"""
Read frame at time t.
If t exceeds duration, wraps around (loops).
Seeks if needed, otherwise reads sequentially.
"""
# Wrap time for looping
t_wrapped = t % self._duration
# Check if we need to seek (loop point or large time jump)
need_seek = (
self._process is None or
self._buffer_time is None or
abs(t_wrapped - self._buffer_time) > 1.0 / self.target_fps * 2
)
if need_seek:
self._start_decode(t_wrapped)
# Read frame
w, h = self._size
frame_size = w * h * 3
# Try to read with retries for seek settling
for attempt in range(3):
raw = self._process.stdout.read(frame_size)
if len(raw) == frame_size:
break
# End of stream or seek not ready - restart from beginning
self._start_decode(0)
if len(raw) < frame_size:
# Still no data - return last frame or black
if self._frame_buffer is not None:
return self._frame_buffer.copy()
return np.zeros((h, w, 3), dtype=np.uint8)
frame = np.frombuffer(raw, dtype=np.uint8).reshape((h, w, 3))
self._frame_buffer = frame # Cache for fallback
self._buffer_time = t_wrapped + 1.0 / self.target_fps
return frame
def close(self):
"""Clean up resources."""
if self._process:
self._process.terminate()
self._process.wait()
self._process = None
def __del__(self):
self.close()
def __repr__(self):
return f"VideoSource({self.path.name}, {self._size[0]}x{self._size[1]}, {self._duration:.1f}s)"
class ImageSource(Source):
"""
Static image source (returns same frame for any time).
Useful for backgrounds, overlays, etc.
"""
def __init__(self, path: str):
self.path = Path(path)
if not self.path.exists():
raise FileNotFoundError(f"Image not found: {path}")
# Load image
import cv2
self._frame = cv2.imread(str(self.path))
self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2RGB)
self._size = (self._frame.shape[1], self._frame.shape[0])
@property
def duration(self) -> float:
return float('inf') # Images last forever
@property
def size(self) -> Tuple[int, int]:
return self._size
@property
def fps(self) -> float:
return 30.0 # Arbitrary
def read_frame(self, t: float) -> np.ndarray:
return self._frame.copy()
def __repr__(self):
return f"ImageSource({self.path.name}, {self._size[0]}x{self._size[1]})"
class LiveSource(Source):
"""
Live video capture source (webcam, capture card, etc.).
Time parameter is ignored - always returns latest frame.
"""
def __init__(self, device: int = 0, size: Tuple[int, int] = (1280, 720), fps: float = 30):
import cv2
self._cap = cv2.VideoCapture(device)
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, size[0])
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1])
self._cap.set(cv2.CAP_PROP_FPS, fps)
# Get actual settings
self._size = (
int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
self._fps = self._cap.get(cv2.CAP_PROP_FPS)
if not self._cap.isOpened():
raise RuntimeError(f"Could not open video device {device}")
@property
def duration(self) -> float:
return float('inf') # Live - no duration
@property
def size(self) -> Tuple[int, int]:
return self._size
@property
def fps(self) -> float:
return self._fps
def read_frame(self, t: float) -> np.ndarray:
"""Read latest frame (t is ignored for live sources)."""
import cv2
ret, frame = self._cap.read()
if not ret:
return np.zeros((self._size[1], self._size[0], 3), dtype=np.uint8)
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
def close(self):
self._cap.release()
def __del__(self):
self.close()
def __repr__(self):
return f"LiveSource({self._size[0]}x{self._size[1]}, {self._fps}fps)"

1081
streaming/stream_sexp.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,859 @@
"""
Fully Generic Streaming S-expression Interpreter.
The interpreter knows NOTHING about video, audio, or any domain.
All domain logic comes from primitives loaded via (require-primitives ...).
Built-in forms:
- Control: if, cond, let, let*, lambda, ->
- Arithmetic: +, -, *, /, mod, map-range
- Comparison: <, >, =, <=, >=, and, or, not
- Data: dict, get, list, nth, len, quote
- Random: rand, rand-int, rand-range
- Scan: bind (access scan state)
Everything else comes from primitives or effects.
Context (ctx) is passed explicitly to frame evaluation:
- ctx.t: current time
- ctx.frame-num: current frame number
- ctx.fps: frames per second
"""
import sys
import time
import json
import hashlib
import math
import numpy as np
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, List, Any, Optional, Tuple
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.parser import parse, parse_all, Symbol, Keyword
@dataclass
class Context:
"""Runtime context passed to frame evaluation."""
t: float = 0.0
frame_num: int = 0
fps: float = 30.0
class StreamInterpreter:
"""
Fully generic streaming sexp interpreter.
No domain-specific knowledge - just evaluates expressions
and calls primitives.
"""
def __init__(self, sexp_path: str):
self.sexp_path = Path(sexp_path)
self.sexp_dir = self.sexp_path.parent
text = self.sexp_path.read_text()
self.ast = parse(text)
self.config = self._parse_config()
# Global environment for def bindings
self.globals: Dict[str, Any] = {}
# Scans
self.scans: Dict[str, dict] = {}
# Audio playback path (for syncing output)
self.audio_playback: Optional[str] = None
# Registries for external definitions
self.primitives: Dict[str, Any] = {}
self.effects: Dict[str, dict] = {}
self.macros: Dict[str, dict] = {}
self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs"
self.frame_pipeline = None
# External config files (set before run())
self.sources_config: Optional[Path] = None
self.audio_config: Optional[Path] = None
import random
self.rng = random.Random(self.config.get('seed', 42))
def _load_config_file(self, config_path):
"""Load a config file and process its definitions."""
config_path = Path(config_path) # Accept str or Path
if not config_path.exists():
print(f"Warning: config file not found: {config_path}", file=sys.stderr)
return
text = config_path.read_text()
ast = parse_all(text)
for form in ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'def':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
value = self._eval(form[2], self.globals)
self.globals[name] = value
print(f"Config: {name}", file=sys.stderr)
elif cmd == 'audio-playback':
# Path relative to working directory (consistent with other paths)
path = str(form[1]).strip('"')
self.audio_playback = str(Path(path).resolve())
print(f"Audio playback: {self.audio_playback}", file=sys.stderr)
def _parse_config(self) -> dict:
"""Parse config from (stream name :key val ...)."""
config = {'fps': 30, 'seed': 42, 'width': 720, 'height': 720}
if not self.ast or not isinstance(self.ast[0], Symbol):
return config
if self.ast[0].name != 'stream':
return config
i = 2
while i < len(self.ast):
if isinstance(self.ast[i], Keyword):
config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None
i += 2
elif isinstance(self.ast[i], list):
break
else:
i += 1
return config
def _load_primitives(self, lib_name: str):
"""Load primitives from a Python library file."""
import importlib.util
lib_paths = [
self.primitive_lib_dir / f"{lib_name}.py",
self.sexp_dir / "primitive_libs" / f"{lib_name}.py",
self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py",
]
lib_path = None
for p in lib_paths:
if p.exists():
lib_path = p
break
if not lib_path:
print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr)
return
spec = importlib.util.spec_from_file_location(lib_name, lib_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
count = 0
for name in dir(module):
if name.startswith('prim_'):
func = getattr(module, name)
prim_name = name[5:]
dash_name = prim_name.replace('_', '-')
# Register ONLY with namespace (geometry:ripple-displace)
self.primitives[f"{lib_name}:{dash_name}"] = func
count += 1
if hasattr(module, 'PRIMITIVES'):
prims = getattr(module, 'PRIMITIVES')
if isinstance(prims, dict):
for name, func in prims.items():
# Register ONLY with namespace
dash_name = name.replace('_', '-')
self.primitives[f"{lib_name}:{dash_name}"] = func
count += 1
print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr)
def _load_effect(self, effect_path: Path):
"""Load and register an effect from a .sexp file."""
if not effect_path.exists():
print(f"Warning: effect file not found: {effect_path}", file=sys.stderr)
return
text = effect_path.read_text()
ast = parse_all(text)
for form in ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'define-effect':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = {}
body = None
i = 2
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'params' and i + 1 < len(form):
for pdef in form[i + 1]:
if isinstance(pdef, list) and pdef:
pname = pdef[0].name if isinstance(pdef[0], Symbol) else str(pdef[0])
pinfo = {'default': 0}
j = 1
while j < len(pdef):
if isinstance(pdef[j], Keyword) and j + 1 < len(pdef):
pinfo[pdef[j].name] = pdef[j + 1]
j += 2
else:
j += 1
params[pname] = pinfo
i += 2
else:
body = form[i]
i += 1
self.effects[name] = {'params': params, 'body': body}
print(f"Effect: {name}", file=sys.stderr)
elif cmd == 'defmacro':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]]
body = form[3]
self.macros[name] = {'params': params, 'body': body}
elif cmd == 'effect':
# Handle (effect name :path "...") in included files - recursive
i = 2
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
# Resolve relative to the file being loaded
full = (effect_path.parent / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'include':
# Handle (include :path "...") in included files - recursive
i = 1
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (effect_path.parent / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'scan':
# Handle scans from included files
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
trigger_expr = form[2]
init_val, step_expr = {}, None
i = 3
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'init' and i + 1 < len(form):
init_val = self._eval(form[i + 1], self.globals)
elif form[i].name == 'step' and i + 1 < len(form):
step_expr = form[i + 1]
i += 2
else:
i += 1
self.scans[name] = {
'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val},
'init': init_val,
'step': step_expr,
'trigger': trigger_expr,
}
print(f"Scan: {name}", file=sys.stderr)
def _init(self):
"""Initialize from sexp - load primitives, effects, defs, scans."""
# Load external config files first (they can override recipe definitions)
if self.sources_config:
self._load_config_file(self.sources_config)
if self.audio_config:
self._load_config_file(self.audio_config)
for form in self.ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'effect':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
i = 2
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (self.sexp_dir / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'include':
i = 1
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (self.sexp_dir / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'audio-playback':
# (audio-playback "path") - set audio file for playback sync
# Skip if already set by config file
if self.audio_playback is None:
path = str(form[1]).strip('"')
self.audio_playback = str((self.sexp_dir / path).resolve())
print(f"Audio playback: {self.audio_playback}", file=sys.stderr)
elif cmd == 'def':
# (def name expr) - evaluate and store in globals
# Skip if already defined by config file
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
if name in self.globals:
print(f"Def: {name} (from config, skipped)", file=sys.stderr)
continue
value = self._eval(form[2], self.globals)
self.globals[name] = value
print(f"Def: {name}", file=sys.stderr)
elif cmd == 'defmacro':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]]
body = form[3]
self.macros[name] = {'params': params, 'body': body}
elif cmd == 'scan':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
trigger_expr = form[2]
init_val, step_expr = {}, None
i = 3
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'init' and i + 1 < len(form):
init_val = self._eval(form[i + 1], self.globals)
elif form[i].name == 'step' and i + 1 < len(form):
step_expr = form[i + 1]
i += 2
else:
i += 1
self.scans[name] = {
'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val},
'init': init_val,
'step': step_expr,
'trigger': trigger_expr,
}
print(f"Scan: {name}", file=sys.stderr)
elif cmd == 'frame':
self.frame_pipeline = form[1] if len(form) > 1 else None
def _eval(self, expr, env: dict) -> Any:
"""Evaluate an expression."""
# Primitives
if isinstance(expr, (int, float)):
return expr
if isinstance(expr, str):
return expr
if isinstance(expr, bool):
return expr
if isinstance(expr, Symbol):
name = expr.name
# Built-in constants
if name == 'pi':
return math.pi
if name == 'true':
return True
if name == 'false':
return False
if name == 'nil':
return None
# Environment lookup
if name in env:
return env[name]
# Global lookup
if name in self.globals:
return self.globals[name]
# Scan state lookup
if name in self.scans:
return self.scans[name]['state']
raise NameError(f"Undefined variable: {name}")
if isinstance(expr, Keyword):
return expr.name
if not isinstance(expr, list) or not expr:
return expr
# Dict literal {:key val ...}
if isinstance(expr[0], Keyword):
result = {}
i = 0
while i < len(expr):
if isinstance(expr[i], Keyword):
result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None
i += 2
else:
i += 1
return result
head = expr[0]
if not isinstance(head, Symbol):
return [self._eval(e, env) for e in expr]
op = head.name
args = expr[1:]
# Check for closure call
if op in env:
val = env[op]
if isinstance(val, dict) and val.get('_type') == 'closure':
closure = val
closure_env = dict(closure['env'])
for i, pname in enumerate(closure['params']):
closure_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(closure['body'], closure_env)
if op in self.globals:
val = self.globals[op]
if isinstance(val, dict) and val.get('_type') == 'closure':
closure = val
closure_env = dict(closure['env'])
for i, pname in enumerate(closure['params']):
closure_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(closure['body'], closure_env)
# Threading macro
if op == '->':
result = self._eval(args[0], env)
for form in args[1:]:
if isinstance(form, list) and form:
new_form = [form[0], result] + form[1:]
result = self._eval(new_form, env)
else:
result = self._eval([form, result], env)
return result
# === Binding ===
if op == 'bind':
scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0])
if scan_name in self.scans:
state = self.scans[scan_name]['state']
if len(args) > 1:
key = args[1].name if isinstance(args[1], Keyword) else str(args[1])
return state.get(key, 0)
return state
return 0
# === Arithmetic ===
if op == '+':
return sum(self._eval(a, env) for a in args)
if op == '-':
vals = [self._eval(a, env) for a in args]
return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0]
if op == '*':
result = 1
for a in args:
result *= self._eval(a, env)
return result
if op == '/':
vals = [self._eval(a, env) for a in args]
return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0
if op == 'mod':
vals = [self._eval(a, env) for a in args]
return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0
# === Comparison ===
if op == '<':
return self._eval(args[0], env) < self._eval(args[1], env)
if op == '>':
return self._eval(args[0], env) > self._eval(args[1], env)
if op == '=':
return self._eval(args[0], env) == self._eval(args[1], env)
if op == '<=':
return self._eval(args[0], env) <= self._eval(args[1], env)
if op == '>=':
return self._eval(args[0], env) >= self._eval(args[1], env)
if op == 'and':
for arg in args:
if not self._eval(arg, env):
return False
return True
if op == 'or':
result = False
for arg in args:
result = self._eval(arg, env)
if result:
return result
return result
if op == 'not':
return not self._eval(args[0], env)
# === Logic ===
if op == 'if':
cond = self._eval(args[0], env)
if cond:
return self._eval(args[1], env)
return self._eval(args[2], env) if len(args) > 2 else None
if op == 'cond':
i = 0
while i < len(args) - 1:
pred = self._eval(args[i], env)
if pred:
return self._eval(args[i + 1], env)
i += 2
return None
if op == 'lambda':
params = args[0]
body = args[1]
param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params]
return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)}
if op == 'let' or op == 'let*':
bindings = args[0]
body = args[1]
new_env = dict(env)
if bindings and isinstance(bindings[0], list):
for binding in bindings:
if isinstance(binding, list) and len(binding) >= 2:
name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0])
val = self._eval(binding[1], new_env)
new_env[name] = val
else:
i = 0
while i < len(bindings):
name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i])
val = self._eval(bindings[i + 1], new_env)
new_env[name] = val
i += 2
return self._eval(body, new_env)
# === Dict ===
if op == 'dict':
result = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
key = args[i].name
val = self._eval(args[i + 1], env) if i + 1 < len(args) else None
result[key] = val
i += 2
else:
i += 1
return result
if op == 'get':
obj = self._eval(args[0], env)
key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env)
if isinstance(obj, dict):
return obj.get(key, 0)
return 0
# === List ===
if op == 'list':
return [self._eval(a, env) for a in args]
if op == 'quote':
return args[0] if args else None
if op == 'nth':
lst = self._eval(args[0], env)
idx = int(self._eval(args[1], env))
if isinstance(lst, (list, tuple)) and 0 <= idx < len(lst):
return lst[idx]
return None
if op == 'len':
val = self._eval(args[0], env)
return len(val) if hasattr(val, '__len__') else 0
if op == 'map':
seq = self._eval(args[0], env)
fn = self._eval(args[1], env)
if not isinstance(seq, (list, tuple)):
return []
# Handle closure (lambda from sexp)
if isinstance(fn, dict) and fn.get('_type') == 'closure':
results = []
for item in seq:
closure_env = dict(fn['env'])
if fn['params']:
closure_env[fn['params'][0]] = item
results.append(self._eval(fn['body'], closure_env))
return results
# Handle Python callable
if callable(fn):
return [fn(item) for item in seq]
return []
# === Effects ===
if op in self.effects:
effect = self.effects[op]
effect_env = dict(env)
param_names = list(effect['params'].keys())
for pname, pdef in effect['params'].items():
effect_env[pname] = pdef.get('default', 0)
positional_idx = 0
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
pname = args[i].name
if pname in effect['params'] and i + 1 < len(args):
effect_env[pname] = self._eval(args[i + 1], env)
i += 2
else:
val = self._eval(args[i], env)
if positional_idx == 0:
effect_env['frame'] = val
elif positional_idx - 1 < len(param_names):
effect_env[param_names[positional_idx - 1]] = val
positional_idx += 1
i += 1
return self._eval(effect['body'], effect_env)
# === Primitives ===
if op in self.primitives:
prim_func = self.primitives[op]
evaluated_args = []
kwargs = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
k = args[i].name
v = self._eval(args[i + 1], env) if i + 1 < len(args) else None
kwargs[k] = v
i += 2
else:
evaluated_args.append(self._eval(args[i], env))
i += 1
try:
if kwargs:
return prim_func(*evaluated_args, **kwargs)
return prim_func(*evaluated_args)
except Exception as e:
print(f"Primitive {op} error: {e}", file=sys.stderr)
return None
# === Macros (function-like: args evaluated before binding) ===
if op in self.macros:
macro = self.macros[op]
macro_env = dict(env)
for i, pname in enumerate(macro['params']):
# Evaluate args in calling environment before binding
macro_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(macro['body'], macro_env)
# Underscore variant lookup
prim_name = op.replace('-', '_')
if prim_name in self.primitives:
prim_func = self.primitives[prim_name]
evaluated_args = []
kwargs = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
k = args[i].name.replace('-', '_')
v = self._eval(args[i + 1], env) if i + 1 < len(args) else None
kwargs[k] = v
i += 2
else:
evaluated_args.append(self._eval(args[i], env))
i += 1
try:
if kwargs:
return prim_func(*evaluated_args, **kwargs)
return prim_func(*evaluated_args)
except Exception as e:
print(f"Primitive {op} error: {e}", file=sys.stderr)
return None
# Unknown - return as-is
return expr
def _step_scans(self, ctx: Context, env: dict):
"""Step scans based on trigger evaluation."""
for name, scan in self.scans.items():
trigger_expr = scan['trigger']
# Evaluate trigger in context
should_step = self._eval(trigger_expr, env)
if should_step:
state = scan['state']
step_env = dict(state)
step_env.update(env)
new_state = self._eval(scan['step'], step_env)
if isinstance(new_state, dict):
scan['state'] = new_state
else:
scan['state'] = {'acc': new_state}
def run(self, duration: float = None, output: str = "pipe"):
"""Run the streaming pipeline."""
# Import output classes - handle both package and direct execution
try:
from .output import PipeOutput, DisplayOutput, FileOutput
except ImportError:
from output import PipeOutput, DisplayOutput, FileOutput
self._init()
if not self.frame_pipeline:
print("Error: no (frame ...) pipeline defined", file=sys.stderr)
return
w = self.config.get('width', 720)
h = self.config.get('height', 720)
fps = self.config.get('fps', 30)
if duration is None:
# Try to get duration from audio if available
for name, val in self.globals.items():
if hasattr(val, 'duration'):
duration = val.duration
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
break
else:
duration = 60.0
n_frames = int(duration * fps)
frame_time = 1.0 / fps
print(f"Streaming {n_frames} frames @ {fps}fps", file=sys.stderr)
# Create context
ctx = Context(fps=fps)
# Output (with optional audio sync)
audio = self.audio_playback
if output == "pipe":
out = PipeOutput(size=(w, h), fps=fps, audio_source=audio)
elif output == "preview":
out = DisplayOutput(size=(w, h), fps=fps, audio_source=audio)
else:
out = FileOutput(output, size=(w, h), fps=fps, audio_source=audio)
try:
frame_times = []
for frame_num in range(n_frames):
if not out.is_open:
break
frame_start = time.time()
ctx.t = frame_num * frame_time
ctx.frame_num = frame_num
# Build frame environment with context
frame_env = {
'ctx': {
't': ctx.t,
'frame-num': ctx.frame_num,
'fps': ctx.fps,
},
't': ctx.t, # Also expose t directly for convenience
'frame-num': ctx.frame_num,
}
# Step scans
self._step_scans(ctx, frame_env)
# Evaluate pipeline
result = self._eval(self.frame_pipeline, frame_env)
if result is not None and hasattr(result, 'shape'):
out.write(result, ctx.t)
frame_elapsed = time.time() - frame_start
frame_times.append(frame_elapsed)
# Progress with timing
if frame_num % 30 == 0:
pct = 100 * frame_num / n_frames
avg_ms = 1000 * sum(frame_times[-30:]) / max(1, len(frame_times[-30:]))
target_ms = 1000 * frame_time
print(f"\r{pct:5.1f}% [{avg_ms:.0f}ms/frame, target {target_ms:.0f}ms]", end="", file=sys.stderr, flush=True)
finally:
out.close()
print("\nDone", file=sys.stderr)
def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None,
sources_config: str = None, audio_config: str = None):
"""Run a streaming sexp."""
interp = StreamInterpreter(sexp_path)
if fps:
interp.config['fps'] = fps
if sources_config:
interp.sources_config = Path(sources_config)
if audio_config:
interp.audio_config = Path(audio_config)
interp.run(duration=duration, output=output)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run streaming sexp (generic interpreter)")
parser.add_argument("sexp", help="Path to .sexp file")
parser.add_argument("-d", "--duration", type=float, default=None)
parser.add_argument("-o", "--output", default="pipe")
parser.add_argument("--fps", type=float, default=None)
parser.add_argument("--sources", dest="sources_config", help="Path to sources config .sexp file")
parser.add_argument("--audio", dest="audio_config", help="Path to audio config .sexp file")
args = parser.parse_args()
run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps,
sources_config=args.sources_config, audio_config=args.audio_config)

View File

@@ -0,0 +1,25 @@
;; Crossfade with Zoom Transition
;;
;; Macro for transitioning between two frames with a zoom effect.
;; Active frame zooms out while next frame zooms in.
;;
;; Required context:
;; - zoom effect must be loaded
;; - blend effect must be loaded
;;
;; Parameters:
;; active-frame: current frame
;; next-frame: frame to transition to
;; fade-amt: transition progress (0 = all active, 1 = all next)
;;
;; Usage:
;; (include :path "../templates/crossfade-zoom.sexp")
;; ...
;; (crossfade-zoom active-frame next-frame 0.5)
(defmacro crossfade-zoom (active-frame next-frame fade-amt)
(let [active-zoom (+ 1.0 fade-amt)
active-zoomed (zoom active-frame :amount active-zoom)
next-zoom (+ 0.1 (* fade-amt 0.9))
next-zoomed (zoom next-frame :amount next-zoom)]
(blend active-zoomed next-zoomed :opacity fade-amt)))

View File

@@ -0,0 +1,65 @@
;; cycle-crossfade template
;;
;; Generalized cycling zoom-crossfade for any number of video layers.
;; Cycles through videos with smooth zoom-based crossfade transitions.
;;
;; Parameters:
;; beat-data - beat analysis node (drives timing)
;; input-videos - list of video nodes to cycle through
;; init-clen - initial cycle length in beats
;;
;; NOTE: The parameter is named "input-videos" (not "videos") because
;; template substitution replaces param names everywhere in the AST.
;; The planner's _expand_slice_on injects env["videos"] at plan time,
;; so (len videos) inside the lambda references that injected value.
(deftemplate cycle-crossfade
(beat-data input-videos init-clen)
(slice-on beat-data
:videos input-videos
:init {:cycle 0 :beat 0 :clen init-clen}
:fn (lambda [acc i start end]
(let [beat (get acc "beat")
clen (get acc "clen")
active (get acc "cycle")
n (len videos)
phase3 (* beat 3)
wt (lambda [p]
(let [prev (mod (+ p (- n 1)) n)]
(if (= active p)
(if (< phase3 clen) 1.0
(if (< phase3 (* clen 2))
(- 1.0 (* (/ (- phase3 clen) clen) 1.0))
0.0))
(if (= active prev)
(if (< phase3 clen) 0.0
(if (< phase3 (* clen 2))
(* (/ (- phase3 clen) clen) 1.0)
1.0))
0.0))))
zm (lambda [p]
(let [prev (mod (+ p (- n 1)) n)]
(if (= active p)
;; Active video: normal -> zoom out during transition -> tiny
(if (< phase3 clen) 1.0
(if (< phase3 (* clen 2))
(+ 1.0 (* (/ (- phase3 clen) clen) 1.0))
0.1))
(if (= active prev)
;; Incoming video: tiny -> zoom in during transition -> normal
(if (< phase3 clen) 0.1
(if (< phase3 (* clen 2))
(+ 0.1 (* (/ (- phase3 clen) clen) 0.9))
1.0))
0.1))))
new-acc (if (< (+ beat 1) clen)
(dict :cycle active :beat (+ beat 1) :clen clen)
(dict :cycle (mod (+ active 1) n) :beat 0
:clen (+ 40 (mod (* i 7) 41))))]
{:layers (map (lambda [p]
{:video p :effects [{:effect zoom :amount (zm p)}]})
(range 0 n))
:compose {:effect blend_multi :mode "alpha"
:weights (map (lambda [p] (wt p)) (range 0 n))}
:acc new-acc}))))

112
templates/process-pair.sexp Normal file
View File

@@ -0,0 +1,112 @@
;; process-pair template
;;
;; Reusable video-pair processor: takes a single video source, creates two
;; clips (A and B) with opposite rotations and sporadic effects, blends them,
;; and applies a per-pair slow rotation driven by a beat scan.
;;
;; All sporadic triggers (invert, hue-shift, ascii) and pair-level controls
;; (blend opacity, rotation) are defined internally using seed offsets.
;;
;; Parameters:
;; video - source video node
;; energy - energy analysis node (drives rotation/zoom amounts)
;; beat-data - beat analysis node (drives sporadic triggers)
;; rng - RNG object from (make-rng seed) for auto-derived seeds
;; rot-dir - initial rotation direction: 1 (clockwise) or -1 (anti-clockwise)
;; rot-a/b - rotation ranges for clip A/B (e.g. [0 45])
;; zoom-a/b - zoom ranges for clip A/B (e.g. [1 1.5])
(deftemplate process-pair
(video energy beat-data rng rot-dir rot-a rot-b zoom-a zoom-b)
;; --- Sporadic triggers for clip A ---
;; Invert: 10% chance per beat, lasts 1-5 beats
(def inv-a (scan beat-data :rng rng :init 0
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0))
:emit (if (> acc 0) 1 0)))
;; Hue shift: 10% chance, random hue 30-330 deg, lasts 1-5 beats
(def hue-a (scan beat-data :rng rng :init (dict :rem 0 :hue 0)
:step (if (> rem 0)
(dict :rem (- rem 1) :hue hue)
(if (< (rand) 0.1)
(dict :rem (rand-int 1 5) :hue (rand-range 30 330))
(dict :rem 0 :hue 0)))
:emit (if (> rem 0) hue 0)))
;; ASCII art: 5% chance, lasts 1-3 beats
(def ascii-a (scan beat-data :rng rng :init 0
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0))
:emit (if (> acc 0) 1 0)))
;; --- Sporadic triggers for clip B (offset seeds) ---
(def inv-b (scan beat-data :rng rng :init 0
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0))
:emit (if (> acc 0) 1 0)))
(def hue-b (scan beat-data :rng rng :init (dict :rem 0 :hue 0)
:step (if (> rem 0)
(dict :rem (- rem 1) :hue hue)
(if (< (rand) 0.1)
(dict :rem (rand-int 1 5) :hue (rand-range 30 330))
(dict :rem 0 :hue 0)))
:emit (if (> rem 0) hue 0)))
(def ascii-b (scan beat-data :rng rng :init 0
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0))
:emit (if (> acc 0) 1 0)))
;; --- Pair-level controls ---
;; Internal A/B blend: randomly show A (0), both (0.5), or B (1), every 1-11 beats
(def pair-mix (scan beat-data :rng rng
:init (dict :rem 0 :opacity 0.5)
:step (if (> rem 0)
(dict :rem (- rem 1) :opacity opacity)
(dict :rem (rand-int 1 11) :opacity (* (rand-int 0 2) 0.5)))
:emit opacity))
;; Per-pair rotation: one full rotation every 20-30 beats, alternating direction
(def pair-rot (scan beat-data :rng rng
:init (dict :beat 0 :clen 25 :dir rot-dir :angle 0)
:step (if (< (+ beat 1) clen)
(dict :beat (+ beat 1) :clen clen :dir dir
:angle (+ angle (* dir (/ 360 clen))))
(dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1)
:angle angle))
:emit angle))
;; --- Clip A processing ---
(def clip-a (-> video (segment :start 0 :duration (bind energy duration))))
(def rotated-a (-> clip-a
(effect rotate :angle (bind energy values :range rot-a))
(effect zoom :amount (bind energy values :range zoom-a))
(effect invert :amount (bind inv-a values))
(effect hue_shift :degrees (bind hue-a values))
;; ASCII disabled - too slow without GPU
;; (effect ascii_art
;; :char_size (bind energy values :range [4 32])
;; :mix (bind ascii-a values))
))
;; --- Clip B processing ---
(def clip-b (-> video (segment :start 0 :duration (bind energy duration))))
(def rotated-b (-> clip-b
(effect rotate :angle (bind energy values :range rot-b))
(effect zoom :amount (bind energy values :range zoom-b))
(effect invert :amount (bind inv-b values))
(effect hue_shift :degrees (bind hue-b values))
;; ASCII disabled - too slow without GPU
;; (effect ascii_art
;; :char_size (bind energy values :range [4 32])
;; :mix (bind ascii-b values))
))
;; --- Blend A+B and apply pair rotation ---
(-> rotated-a
(effect blend rotated-b
:mode "alpha" :opacity (bind pair-mix values) :resize_mode "fit")
(effect rotate
:angle (bind pair-rot values))))

View File

@@ -0,0 +1,28 @@
;; Oscillating Spin Scan
;;
;; Accumulates rotation angle on each beat, reversing direction
;; periodically for an oscillating effect.
;;
;; Required context:
;; - music: audio analyzer from (streaming:make-audio-analyzer ...)
;;
;; Provides scan: spin
;; Bind with: (bind spin :angle) ;; cumulative rotation angle
;;
;; Behavior:
;; - Rotates 14.4 degrees per beat (completes 360 in 25 beats)
;; - After 20-30 beats, reverses direction
;; - Creates a swinging/oscillating rotation effect
;;
;; Usage:
;; (include :path "../templates/scan-oscillating-spin.sexp")
;;
;; In frame:
;; (rotate frame :angle (bind spin :angle))
(scan spin (streaming:audio-beat music t)
:init {:angle 0 :dir 1 :left 25}
:step (if (> left 0)
(dict :angle (+ angle (* dir 14.4)) :dir dir :left (- left 1))
(dict :angle angle :dir (* dir -1)
:left (+ 20 (mod (streaming:audio-beat-count music t) 11)))))

Some files were not shown because too many files have changed in this diff Show More