471 lines
17 KiB
Python
471 lines
17 KiB
Python
"""
|
|
Adapter to run sexp recipes through the streaming compositor.
|
|
|
|
Bridges the gap between:
|
|
- Existing recipe format (sexp files with stages, effects, analysis)
|
|
- Streaming compositor (sources, effect chains, compositor config)
|
|
"""
|
|
|
|
import sys
|
|
from pathlib import Path
|
|
from typing import Dict, List, Any, Optional
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
|
|
|
|
from .compositor import StreamingCompositor
|
|
from .sources import VideoSource
|
|
from .audio import FileAudioAnalyzer
|
|
|
|
|
|
class RecipeAdapter:
|
|
"""
|
|
Adapts a compiled sexp recipe to run through the streaming compositor.
|
|
|
|
Example:
|
|
adapter = RecipeAdapter("effects/quick_test.sexp")
|
|
adapter.run(output="preview", duration=60)
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
recipe_path: str,
|
|
params: Dict[str, Any] = None,
|
|
backend: str = "numpy",
|
|
):
|
|
"""
|
|
Load and prepare a recipe for streaming.
|
|
|
|
Args:
|
|
recipe_path: Path to .sexp recipe file
|
|
params: Parameter overrides
|
|
backend: "numpy" or "glsl"
|
|
"""
|
|
self.recipe_path = Path(recipe_path)
|
|
self.recipe_dir = self.recipe_path.parent
|
|
self.params = params or {}
|
|
self.backend = backend
|
|
|
|
# Compile recipe
|
|
self._compile()
|
|
|
|
def _compile(self):
|
|
"""Compile the recipe and extract structure."""
|
|
from artdag.sexp.compiler import compile_string
|
|
|
|
recipe_text = self.recipe_path.read_text()
|
|
self.compiled = compile_string(recipe_text, self.params, recipe_dir=self.recipe_dir)
|
|
|
|
# Extract key info
|
|
self.sources = {} # name -> path
|
|
self.effects_registry = {} # effect_name -> path
|
|
self.analyzers = {} # name -> analyzer info
|
|
|
|
# Walk nodes to find sources and structure
|
|
# nodes is a list in CompiledRecipe
|
|
for node in self.compiled.nodes:
|
|
node_type = node.get("type", "")
|
|
|
|
if node_type == "SOURCE":
|
|
config = node.get("config", {})
|
|
path = config.get("path")
|
|
if path:
|
|
self.sources[node["id"]] = self.recipe_dir / path
|
|
|
|
elif node_type == "ANALYZE":
|
|
config = node.get("config", {})
|
|
self.analyzers[node["id"]] = {
|
|
"analyzer": config.get("analyzer"),
|
|
"path": config.get("analyzer_path"),
|
|
}
|
|
|
|
# Get effects registry from compiled recipe
|
|
# registry has 'effects' sub-dict
|
|
effects_dict = self.compiled.registry.get("effects", {})
|
|
for name, info in effects_dict.items():
|
|
if info.get("path"):
|
|
self.effects_registry[name] = Path(info["path"])
|
|
|
|
def run_analysis(self) -> Dict[str, Any]:
|
|
"""
|
|
Run analysis phase (energy, beats, etc.).
|
|
|
|
Returns:
|
|
Dict of analysis track name -> {times, values, duration}
|
|
"""
|
|
print(f"Running analysis...", file=sys.stderr)
|
|
|
|
# Use existing planner's analysis execution
|
|
from artdag.sexp.planner import create_plan
|
|
|
|
analysis_data = {}
|
|
|
|
def on_analysis(node_id: str, results: dict):
|
|
analysis_data[node_id] = results
|
|
print(f" {node_id[:16]}...: {len(results.get('times', []))} samples", file=sys.stderr)
|
|
|
|
# Create plan (runs analysis as side effect)
|
|
plan = create_plan(
|
|
self.compiled,
|
|
inputs={},
|
|
recipe_dir=self.recipe_dir,
|
|
on_analysis=on_analysis,
|
|
)
|
|
|
|
# Also store named analysis tracks
|
|
for name, data in plan.analysis.items():
|
|
analysis_data[name] = data
|
|
|
|
return analysis_data
|
|
|
|
def build_compositor(
|
|
self,
|
|
analysis_data: Dict[str, Any] = None,
|
|
fps: float = None,
|
|
) -> StreamingCompositor:
|
|
"""
|
|
Build a streaming compositor from the recipe.
|
|
|
|
This is a simplified version that handles common patterns.
|
|
Complex recipes may need manual configuration.
|
|
|
|
Args:
|
|
analysis_data: Pre-computed analysis data
|
|
|
|
Returns:
|
|
Configured StreamingCompositor
|
|
"""
|
|
# Extract video and audio sources in SLICE_ON input order
|
|
video_sources = []
|
|
audio_source = None
|
|
|
|
# Find audio source first
|
|
for node_id, path in self.sources.items():
|
|
suffix = path.suffix.lower()
|
|
if suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
|
|
audio_source = str(path)
|
|
break
|
|
|
|
# Find SLICE_ON node to get correct video order
|
|
slice_on_inputs = None
|
|
for node in self.compiled.nodes:
|
|
if node.get('type') == 'SLICE_ON':
|
|
# Use 'videos' config key which has the correct order
|
|
config = node.get('config', {})
|
|
slice_on_inputs = config.get('videos', [])
|
|
break
|
|
|
|
if slice_on_inputs:
|
|
# Trace each SLICE_ON input back to its SOURCE
|
|
node_lookup = {n['id']: n for n in self.compiled.nodes}
|
|
|
|
def trace_to_source(node_id, visited=None):
|
|
"""Trace a node back to its SOURCE, return source path."""
|
|
if visited is None:
|
|
visited = set()
|
|
if node_id in visited:
|
|
return None
|
|
visited.add(node_id)
|
|
|
|
node = node_lookup.get(node_id)
|
|
if not node:
|
|
return None
|
|
if node.get('type') == 'SOURCE':
|
|
return self.sources.get(node_id)
|
|
# Recurse through inputs
|
|
for inp in node.get('inputs', []):
|
|
result = trace_to_source(inp, visited)
|
|
if result:
|
|
return result
|
|
return None
|
|
|
|
# Build video_sources in SLICE_ON input order
|
|
for inp_id in slice_on_inputs:
|
|
source_path = trace_to_source(inp_id)
|
|
if source_path:
|
|
suffix = source_path.suffix.lower()
|
|
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
|
|
video_sources.append(str(source_path))
|
|
|
|
# Fallback to definition order if no SLICE_ON
|
|
if not video_sources:
|
|
for node_id, path in self.sources.items():
|
|
suffix = path.suffix.lower()
|
|
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
|
|
video_sources.append(str(path))
|
|
|
|
if not video_sources:
|
|
raise ValueError("No video sources found in recipe")
|
|
|
|
# Build effect chains - use live audio bindings (matching video_sources count)
|
|
effects_per_source = self._build_streaming_effects(n_sources=len(video_sources))
|
|
|
|
# Build compositor config from recipe
|
|
compositor_config = self._extract_compositor_config(analysis_data)
|
|
|
|
return StreamingCompositor(
|
|
sources=video_sources,
|
|
effects_per_source=effects_per_source,
|
|
compositor_config=compositor_config,
|
|
analysis_data=analysis_data or {},
|
|
backend=self.backend,
|
|
recipe_dir=self.recipe_dir,
|
|
fps=fps or self.compiled.encoding.get("fps", 30),
|
|
audio_source=audio_source,
|
|
)
|
|
|
|
def _build_streaming_effects(self, n_sources: int = None) -> List[List[Dict]]:
|
|
"""
|
|
Build effect chains for streaming with live audio bindings.
|
|
|
|
Replicates the recipe's effect pipeline:
|
|
- Per source: rotate, zoom, invert, hue_shift, ascii_art
|
|
- All driven by live_energy and live_beat
|
|
"""
|
|
if n_sources is None:
|
|
n_sources = len([p for p in self.sources.values()
|
|
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
|
|
|
|
effects_per_source = []
|
|
|
|
for i in range(n_sources):
|
|
# Alternate rotation direction per source
|
|
rot_dir = 1 if i % 2 == 0 else -1
|
|
|
|
effects = [
|
|
# Rotate - energy drives angle
|
|
{
|
|
"effect": "rotate",
|
|
"effect_path": str(self.effects_registry.get("rotate", "")),
|
|
"angle": {
|
|
"_binding": True,
|
|
"source": "live_energy",
|
|
"feature": "values",
|
|
"range": [0, 45 * rot_dir],
|
|
},
|
|
},
|
|
# Zoom - energy drives amount
|
|
{
|
|
"effect": "zoom",
|
|
"effect_path": str(self.effects_registry.get("zoom", "")),
|
|
"amount": {
|
|
"_binding": True,
|
|
"source": "live_energy",
|
|
"feature": "values",
|
|
"range": [1.0, 1.5] if i % 2 == 0 else [1.0, 0.7],
|
|
},
|
|
},
|
|
# Invert - beat triggers
|
|
{
|
|
"effect": "invert",
|
|
"effect_path": str(self.effects_registry.get("invert", "")),
|
|
"amount": {
|
|
"_binding": True,
|
|
"source": "live_beat",
|
|
"feature": "values",
|
|
"range": [0, 1],
|
|
},
|
|
},
|
|
# Hue shift - energy drives hue
|
|
{
|
|
"effect": "hue_shift",
|
|
"effect_path": str(self.effects_registry.get("hue_shift", "")),
|
|
"degrees": {
|
|
"_binding": True,
|
|
"source": "live_energy",
|
|
"feature": "values",
|
|
"range": [0, 180],
|
|
},
|
|
},
|
|
# ASCII art - energy drives char size, beat triggers mix
|
|
{
|
|
"effect": "ascii_art",
|
|
"effect_path": str(self.effects_registry.get("ascii_art", "")),
|
|
"char_size": {
|
|
"_binding": True,
|
|
"source": "live_energy",
|
|
"feature": "values",
|
|
"range": [4, 32],
|
|
},
|
|
"mix": {
|
|
"_binding": True,
|
|
"source": "live_beat",
|
|
"feature": "values",
|
|
"range": [0, 1],
|
|
},
|
|
},
|
|
]
|
|
effects_per_source.append(effects)
|
|
|
|
return effects_per_source
|
|
|
|
def _extract_effects(self) -> List[List[Dict]]:
|
|
"""Extract effect chains for each source (legacy, pre-computed analysis)."""
|
|
# Simplified: find EFFECT nodes and their configs
|
|
effects_per_source = []
|
|
|
|
for node_id, path in self.sources.items():
|
|
if path.suffix.lower() not in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
|
|
continue
|
|
|
|
# Find effects that depend on this source
|
|
# This is simplified - real implementation would trace the DAG
|
|
effects = []
|
|
|
|
for node in self.compiled.nodes:
|
|
if node.get("type") == "EFFECT":
|
|
config = node.get("config", {})
|
|
effect_name = config.get("effect")
|
|
if effect_name and effect_name in self.effects_registry:
|
|
effect_config = {
|
|
"effect": effect_name,
|
|
"effect_path": str(self.effects_registry[effect_name]),
|
|
}
|
|
# Copy only effect params (filter out internal fields)
|
|
internal_fields = (
|
|
"effect", "effect_path", "cid", "effect_cid",
|
|
"effects_registry", "analysis_refs", "inputs",
|
|
)
|
|
for k, v in config.items():
|
|
if k not in internal_fields:
|
|
effect_config[k] = v
|
|
effects.append(effect_config)
|
|
break # One effect per source for now
|
|
|
|
effects_per_source.append(effects)
|
|
|
|
return effects_per_source
|
|
|
|
def _extract_compositor_config(self, analysis_data: Dict) -> Dict:
|
|
"""Extract compositor configuration."""
|
|
# Look for blend_multi or similar composition nodes
|
|
for node in self.compiled.nodes:
|
|
if node.get("type") == "EFFECT":
|
|
config = node.get("config", {})
|
|
if config.get("effect") == "blend_multi":
|
|
return {
|
|
"mode": config.get("mode", "alpha"),
|
|
"weights": config.get("weights", []),
|
|
}
|
|
|
|
# Default: equal blend
|
|
n_sources = len([p for p in self.sources.values()
|
|
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
|
|
return {
|
|
"mode": "alpha",
|
|
"weights": [1.0 / n_sources] * n_sources if n_sources > 0 else [1.0],
|
|
}
|
|
|
|
def run(
|
|
self,
|
|
output: str = "preview",
|
|
duration: float = None,
|
|
fps: float = None,
|
|
):
|
|
"""
|
|
Run the recipe through streaming compositor.
|
|
|
|
Everything streams: video frames read on-demand, audio analyzed in real-time.
|
|
No pre-computation.
|
|
|
|
Args:
|
|
output: "preview", filename, or Output object
|
|
duration: Duration in seconds (default: audio duration)
|
|
fps: Frame rate (default from recipe, or 30)
|
|
"""
|
|
# Build compositor with recipe executor for full pipeline
|
|
from .recipe_executor import StreamingRecipeExecutor
|
|
|
|
compositor = self.build_compositor(analysis_data={}, fps=fps)
|
|
|
|
# Use audio duration if not specified
|
|
if duration is None:
|
|
if compositor._audio_analyzer:
|
|
duration = compositor._audio_analyzer.duration
|
|
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
|
|
else:
|
|
# Live mode - run until quit
|
|
print("Live mode - press 'q' to quit", file=sys.stderr)
|
|
|
|
# Create sexp executor that interprets the recipe
|
|
from .sexp_executor import SexpStreamingExecutor
|
|
executor = SexpStreamingExecutor(self.compiled, seed=42)
|
|
|
|
compositor.run(output=output, duration=duration, recipe_executor=executor)
|
|
|
|
|
|
def run_recipe(
|
|
recipe_path: str,
|
|
output: str = "preview",
|
|
duration: float = None,
|
|
params: Dict = None,
|
|
fps: float = None,
|
|
):
|
|
"""
|
|
Run a recipe through streaming compositor.
|
|
|
|
Everything streams in real-time: video frames, audio analysis.
|
|
No pre-computation - starts immediately.
|
|
|
|
Example:
|
|
run_recipe("effects/quick_test.sexp", output="preview", duration=30)
|
|
run_recipe("effects/quick_test.sexp", output="preview", fps=5) # Lower fps for slow systems
|
|
"""
|
|
adapter = RecipeAdapter(recipe_path, params=params)
|
|
adapter.run(output=output, duration=duration, fps=fps)
|
|
|
|
|
|
def run_recipe_piped(
|
|
recipe_path: str,
|
|
duration: float = None,
|
|
params: Dict = None,
|
|
fps: float = None,
|
|
):
|
|
"""
|
|
Run recipe and pipe directly to mpv.
|
|
"""
|
|
from .output import PipeOutput
|
|
|
|
adapter = RecipeAdapter(recipe_path, params=params)
|
|
compositor = adapter.build_compositor(analysis_data={}, fps=fps)
|
|
|
|
# Get frame size
|
|
if compositor.sources:
|
|
first_source = compositor.sources[0]
|
|
w, h = first_source._size
|
|
else:
|
|
w, h = 720, 720
|
|
|
|
actual_fps = fps or adapter.compiled.encoding.get('fps', 30)
|
|
|
|
# Create pipe output
|
|
pipe_out = PipeOutput(
|
|
size=(w, h),
|
|
fps=actual_fps,
|
|
audio_source=compositor._audio_source
|
|
)
|
|
|
|
# Create executor
|
|
from .sexp_executor import SexpStreamingExecutor
|
|
executor = SexpStreamingExecutor(adapter.compiled, seed=42)
|
|
|
|
# Run with pipe output
|
|
compositor.run(output=pipe_out, duration=duration, recipe_executor=executor)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import argparse
|
|
parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor")
|
|
parser.add_argument("recipe", help="Path to .sexp recipe file")
|
|
parser.add_argument("-o", "--output", default="pipe",
|
|
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
|
|
parser.add_argument("-d", "--duration", type=float, default=None,
|
|
help="Duration in seconds (default: audio duration)")
|
|
parser.add_argument("--fps", type=float, default=None,
|
|
help="Frame rate (default: from recipe)")
|
|
args = parser.parse_args()
|
|
|
|
if args.output == "pipe":
|
|
run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps)
|
|
else:
|
|
run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps)
|