Squashed 'core/' content from commit 4957443

git-subtree-dir: core
git-subtree-split: 4957443184ae0eb6323635a90a19acffb3e01d07
This commit is contained in:
giles
2026-02-24 23:09:39 +00:00
commit cc2dcbddd4
80 changed files with 25711 additions and 0 deletions

View File

@@ -0,0 +1,55 @@
"""
Cacheable effect system.
Effects are single Python files with:
- PEP 723 embedded dependencies
- @-tag metadata in docstrings
- Frame-by-frame or whole-video API
Effects are cached by content hash (SHA3-256) and executed in
sandboxed environments for determinism.
"""
from .meta import EffectMeta, ParamSpec, ExecutionContext
from .loader import load_effect, load_effect_file, LoadedEffect, compute_cid
from .binding import (
AnalysisData,
ResolvedBinding,
resolve_binding,
resolve_all_bindings,
bindings_to_lookup_table,
has_bindings,
extract_binding_sources,
)
from .sandbox import Sandbox, SandboxConfig, SandboxResult, is_bwrap_available, get_venv_path
from .runner import run_effect, run_effect_from_cache, EffectExecutor
__all__ = [
# Meta types
"EffectMeta",
"ParamSpec",
"ExecutionContext",
# Loader
"load_effect",
"load_effect_file",
"LoadedEffect",
"compute_cid",
# Binding
"AnalysisData",
"ResolvedBinding",
"resolve_binding",
"resolve_all_bindings",
"bindings_to_lookup_table",
"has_bindings",
"extract_binding_sources",
# Sandbox
"Sandbox",
"SandboxConfig",
"SandboxResult",
"is_bwrap_available",
"get_venv_path",
# Runner
"run_effect",
"run_effect_from_cache",
"EffectExecutor",
]

311
artdag/effects/binding.py Normal file
View File

@@ -0,0 +1,311 @@
"""
Parameter binding resolution.
Resolves bind expressions to per-frame lookup tables at plan time.
Binding options:
- :range [lo hi] - map 0-1 to output range
- :smooth N - smoothing window in seconds
- :offset N - time offset in seconds
- :on-event V - value on discrete events
- :decay N - exponential decay after event
- :noise N - add deterministic noise (seeded)
- :seed N - explicit RNG seed
"""
import hashlib
import math
import random
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
@dataclass
class AnalysisData:
"""
Analysis data for binding resolution.
Attributes:
frame_rate: Video frame rate
total_frames: Total number of frames
features: Dict mapping feature name to per-frame values
events: Dict mapping event name to list of frame indices
"""
frame_rate: float
total_frames: int
features: Dict[str, List[float]] # feature -> [value_per_frame]
events: Dict[str, List[int]] # event -> [frame_indices]
def get_feature(self, name: str, frame: int) -> float:
"""Get feature value at frame, interpolating if needed."""
if name not in self.features:
return 0.0
values = self.features[name]
if not values:
return 0.0
if frame >= len(values):
return values[-1]
return values[frame]
def get_events_in_range(
self, name: str, start_frame: int, end_frame: int
) -> List[int]:
"""Get event frames in range."""
if name not in self.events:
return []
return [f for f in self.events[name] if start_frame <= f < end_frame]
@dataclass
class ResolvedBinding:
"""
Resolved binding with per-frame values.
Attributes:
param_name: Parameter this binding applies to
values: List of values, one per frame
"""
param_name: str
values: List[float]
def get(self, frame: int) -> float:
"""Get value at frame."""
if frame >= len(self.values):
return self.values[-1] if self.values else 0.0
return self.values[frame]
def resolve_binding(
binding: Dict[str, Any],
analysis: AnalysisData,
param_name: str,
cache_id: str = None,
) -> ResolvedBinding:
"""
Resolve a binding specification to per-frame values.
Args:
binding: Binding spec with source, feature, and options
analysis: Analysis data with features and events
param_name: Name of the parameter being bound
cache_id: Cache ID for deterministic seeding
Returns:
ResolvedBinding with values for each frame
"""
feature = binding.get("feature")
if not feature:
raise ValueError(f"Binding for {param_name} missing feature")
# Get base values
values = []
is_event = feature in analysis.events
if is_event:
# Event-based binding
on_event = binding.get("on_event", 1.0)
decay = binding.get("decay", 0.0)
values = _resolve_event_binding(
analysis.events.get(feature, []),
analysis.total_frames,
analysis.frame_rate,
on_event,
decay,
)
else:
# Continuous feature binding
feature_values = analysis.features.get(feature, [])
if not feature_values:
# No data, use zeros
values = [0.0] * analysis.total_frames
else:
# Extend to total frames if needed
values = list(feature_values)
while len(values) < analysis.total_frames:
values.append(values[-1] if values else 0.0)
# Apply offset
offset = binding.get("offset")
if offset:
offset_frames = int(offset * analysis.frame_rate)
values = _apply_offset(values, offset_frames)
# Apply smoothing
smooth = binding.get("smooth")
if smooth:
window_frames = int(smooth * analysis.frame_rate)
values = _apply_smoothing(values, window_frames)
# Apply range mapping
range_spec = binding.get("range")
if range_spec:
lo, hi = range_spec
values = _apply_range(values, lo, hi)
# Apply noise
noise = binding.get("noise")
if noise:
seed = binding.get("seed")
if seed is None and cache_id:
# Derive seed from cache_id for determinism
seed = int(hashlib.sha256(cache_id.encode()).hexdigest()[:8], 16)
values = _apply_noise(values, noise, seed or 0)
return ResolvedBinding(param_name=param_name, values=values)
def _resolve_event_binding(
event_frames: List[int],
total_frames: int,
frame_rate: float,
on_event: float,
decay: float,
) -> List[float]:
"""
Resolve event-based binding with optional decay.
Args:
event_frames: List of frame indices where events occur
total_frames: Total number of frames
frame_rate: Video frame rate
on_event: Value at event
decay: Decay time constant in seconds (0 = instant)
Returns:
List of values per frame
"""
values = [0.0] * total_frames
if not event_frames:
return values
event_set = set(event_frames)
if decay <= 0:
# No decay - just mark event frames
for f in event_frames:
if 0 <= f < total_frames:
values[f] = on_event
else:
# Apply exponential decay
decay_frames = decay * frame_rate
for f in event_frames:
if f < 0 or f >= total_frames:
continue
# Apply decay from this event forward
for i in range(f, total_frames):
elapsed = i - f
decayed = on_event * math.exp(-elapsed / decay_frames)
if decayed < 0.001:
break
values[i] = max(values[i], decayed)
return values
def _apply_offset(values: List[float], offset_frames: int) -> List[float]:
"""Shift values by offset frames (positive = delay)."""
if offset_frames == 0:
return values
n = len(values)
result = [0.0] * n
for i in range(n):
src = i - offset_frames
if 0 <= src < n:
result[i] = values[src]
return result
def _apply_smoothing(values: List[float], window_frames: int) -> List[float]:
"""Apply moving average smoothing."""
if window_frames <= 1:
return values
n = len(values)
result = []
half = window_frames // 2
for i in range(n):
start = max(0, i - half)
end = min(n, i + half + 1)
avg = sum(values[start:end]) / (end - start)
result.append(avg)
return result
def _apply_range(values: List[float], lo: float, hi: float) -> List[float]:
"""Map values from 0-1 to lo-hi range."""
return [lo + v * (hi - lo) for v in values]
def _apply_noise(values: List[float], amount: float, seed: int) -> List[float]:
"""Add deterministic noise to values."""
rng = random.Random(seed)
return [v + rng.uniform(-amount, amount) for v in values]
def resolve_all_bindings(
config: Dict[str, Any],
analysis: AnalysisData,
cache_id: str = None,
) -> Dict[str, ResolvedBinding]:
"""
Resolve all bindings in a config dict.
Looks for values with _binding: True marker.
Args:
config: Node config with potential bindings
analysis: Analysis data
cache_id: Cache ID for seeding
Returns:
Dict mapping param name to resolved binding
"""
resolved = {}
for key, value in config.items():
if isinstance(value, dict) and value.get("_binding"):
resolved[key] = resolve_binding(value, analysis, key, cache_id)
return resolved
def bindings_to_lookup_table(
bindings: Dict[str, ResolvedBinding],
) -> Dict[str, List[float]]:
"""
Convert resolved bindings to simple lookup tables.
Returns dict mapping param name to list of per-frame values.
This format is JSON-serializable for inclusion in execution plans.
"""
return {name: binding.values for name, binding in bindings.items()}
def has_bindings(config: Dict[str, Any]) -> bool:
"""Check if config contains any bindings."""
for value in config.values():
if isinstance(value, dict) and value.get("_binding"):
return True
return False
def extract_binding_sources(config: Dict[str, Any]) -> List[str]:
"""
Extract all analysis source references from bindings.
Returns list of node IDs that provide analysis data.
"""
sources = []
for value in config.values():
if isinstance(value, dict) and value.get("_binding"):
source = value.get("source")
if source and source not in sources:
sources.append(source)
return sources

View File

@@ -0,0 +1,347 @@
"""
FFmpeg pipe-based frame processing.
Processes video through Python frame-by-frame effects using FFmpeg pipes:
FFmpeg decode -> Python process_frame -> FFmpeg encode
This avoids writing intermediate frames to disk.
"""
import logging
import subprocess
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
logger = logging.getLogger(__name__)
@dataclass
class VideoInfo:
"""Video metadata."""
width: int
height: int
frame_rate: float
total_frames: int
duration: float
pixel_format: str = "rgb24"
def probe_video(path: Path) -> VideoInfo:
"""
Get video information using ffprobe.
Args:
path: Path to video file
Returns:
VideoInfo with dimensions, frame rate, etc.
"""
cmd = [
"ffprobe",
"-v", "error",
"-select_streams", "v:0",
"-show_entries", "stream=width,height,r_frame_rate,nb_frames,duration",
"-of", "csv=p=0",
str(path),
]
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
raise RuntimeError(f"ffprobe failed: {result.stderr}")
parts = result.stdout.strip().split(",")
if len(parts) < 4:
raise RuntimeError(f"Unexpected ffprobe output: {result.stdout}")
width = int(parts[0])
height = int(parts[1])
# Parse frame rate (could be "30/1" or "30")
fr_parts = parts[2].split("/")
if len(fr_parts) == 2:
frame_rate = float(fr_parts[0]) / float(fr_parts[1])
else:
frame_rate = float(fr_parts[0])
# nb_frames might be N/A
total_frames = 0
duration = 0.0
try:
total_frames = int(parts[3])
except (ValueError, IndexError):
pass
try:
duration = float(parts[4]) if len(parts) > 4 else 0.0
except (ValueError, IndexError):
pass
if total_frames == 0 and duration > 0:
total_frames = int(duration * frame_rate)
return VideoInfo(
width=width,
height=height,
frame_rate=frame_rate,
total_frames=total_frames,
duration=duration,
)
FrameProcessor = Callable[[np.ndarray, Dict[str, Any], Any], Tuple[np.ndarray, Any]]
def process_video(
input_path: Path,
output_path: Path,
process_frame: FrameProcessor,
params: Dict[str, Any],
bindings: Dict[str, List[float]] = None,
initial_state: Any = None,
pixel_format: str = "rgb24",
output_codec: str = "libx264",
output_options: List[str] = None,
) -> Tuple[Path, Any]:
"""
Process video through frame-by-frame effect.
Args:
input_path: Input video path
output_path: Output video path
process_frame: Function (frame, params, state) -> (frame, state)
params: Static parameter dict
bindings: Per-frame parameter lookup tables
initial_state: Initial state for process_frame
pixel_format: Pixel format for frame data
output_codec: Video codec for output
output_options: Additional ffmpeg output options
Returns:
Tuple of (output_path, final_state)
"""
bindings = bindings or {}
output_options = output_options or []
# Probe input
info = probe_video(input_path)
logger.info(f"Processing {info.width}x{info.height} @ {info.frame_rate}fps")
# Calculate bytes per frame
if pixel_format == "rgb24":
bytes_per_pixel = 3
elif pixel_format == "rgba":
bytes_per_pixel = 4
else:
bytes_per_pixel = 3 # Default to RGB
frame_size = info.width * info.height * bytes_per_pixel
# Start decoder process
decode_cmd = [
"ffmpeg",
"-i", str(input_path),
"-f", "rawvideo",
"-pix_fmt", pixel_format,
"-",
]
# Start encoder process
encode_cmd = [
"ffmpeg",
"-y",
"-f", "rawvideo",
"-pix_fmt", pixel_format,
"-s", f"{info.width}x{info.height}",
"-r", str(info.frame_rate),
"-i", "-",
"-i", str(input_path), # For audio
"-map", "0:v",
"-map", "1:a?",
"-c:v", output_codec,
"-c:a", "aac",
*output_options,
str(output_path),
]
logger.debug(f"Decoder: {' '.join(decode_cmd)}")
logger.debug(f"Encoder: {' '.join(encode_cmd)}")
decoder = subprocess.Popen(
decode_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
encoder = subprocess.Popen(
encode_cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
state = initial_state
frame_idx = 0
try:
while True:
# Read frame from decoder
raw_frame = decoder.stdout.read(frame_size)
if len(raw_frame) < frame_size:
break
# Convert to numpy
frame = np.frombuffer(raw_frame, dtype=np.uint8)
frame = frame.reshape((info.height, info.width, bytes_per_pixel))
# Build per-frame params
frame_params = dict(params)
for param_name, values in bindings.items():
if frame_idx < len(values):
frame_params[param_name] = values[frame_idx]
# Process frame
processed, state = process_frame(frame, frame_params, state)
# Ensure correct shape and dtype
if processed.shape != frame.shape:
raise ValueError(
f"Frame shape mismatch: {processed.shape} vs {frame.shape}"
)
processed = processed.astype(np.uint8)
# Write to encoder
encoder.stdin.write(processed.tobytes())
frame_idx += 1
if frame_idx % 100 == 0:
logger.debug(f"Processed frame {frame_idx}")
except Exception as e:
logger.error(f"Frame processing failed at frame {frame_idx}: {e}")
raise
finally:
decoder.stdout.close()
decoder.wait()
encoder.stdin.close()
encoder.wait()
if encoder.returncode != 0:
stderr = encoder.stderr.read().decode() if encoder.stderr else ""
raise RuntimeError(f"Encoder failed: {stderr}")
logger.info(f"Processed {frame_idx} frames")
return output_path, state
def process_video_batch(
input_path: Path,
output_path: Path,
process_frames: Callable[[List[np.ndarray], Dict[str, Any]], List[np.ndarray]],
params: Dict[str, Any],
batch_size: int = 30,
pixel_format: str = "rgb24",
output_codec: str = "libx264",
) -> Path:
"""
Process video in batches for effects that need temporal context.
Args:
input_path: Input video path
output_path: Output video path
process_frames: Function (frames_batch, params) -> processed_batch
params: Parameter dict
batch_size: Number of frames per batch
pixel_format: Pixel format
output_codec: Output codec
Returns:
Output path
"""
info = probe_video(input_path)
if pixel_format == "rgb24":
bytes_per_pixel = 3
elif pixel_format == "rgba":
bytes_per_pixel = 4
else:
bytes_per_pixel = 3
frame_size = info.width * info.height * bytes_per_pixel
decode_cmd = [
"ffmpeg",
"-i", str(input_path),
"-f", "rawvideo",
"-pix_fmt", pixel_format,
"-",
]
encode_cmd = [
"ffmpeg",
"-y",
"-f", "rawvideo",
"-pix_fmt", pixel_format,
"-s", f"{info.width}x{info.height}",
"-r", str(info.frame_rate),
"-i", "-",
"-i", str(input_path),
"-map", "0:v",
"-map", "1:a?",
"-c:v", output_codec,
"-c:a", "aac",
str(output_path),
]
decoder = subprocess.Popen(
decode_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
encoder = subprocess.Popen(
encode_cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
batch = []
total_processed = 0
try:
while True:
raw_frame = decoder.stdout.read(frame_size)
if len(raw_frame) < frame_size:
# Process remaining batch
if batch:
processed = process_frames(batch, params)
for frame in processed:
encoder.stdin.write(frame.astype(np.uint8).tobytes())
total_processed += 1
break
frame = np.frombuffer(raw_frame, dtype=np.uint8)
frame = frame.reshape((info.height, info.width, bytes_per_pixel))
batch.append(frame)
if len(batch) >= batch_size:
processed = process_frames(batch, params)
for frame in processed:
encoder.stdin.write(frame.astype(np.uint8).tobytes())
total_processed += 1
batch = []
finally:
decoder.stdout.close()
decoder.wait()
encoder.stdin.close()
encoder.wait()
if encoder.returncode != 0:
stderr = encoder.stderr.read().decode() if encoder.stderr else ""
raise RuntimeError(f"Encoder failed: {stderr}")
logger.info(f"Processed {total_processed} frames in batches of {batch_size}")
return output_path

455
artdag/effects/loader.py Normal file
View File

@@ -0,0 +1,455 @@
"""
Effect file loader.
Parses effect files with:
- PEP 723 inline script metadata for dependencies
- @-tag docstrings for effect metadata
- META object for programmatic access
"""
import ast
import hashlib
import re
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from .meta import EffectMeta, ParamSpec
@dataclass
class LoadedEffect:
"""
A loaded effect with all metadata.
Attributes:
source: Original source code
cid: SHA3-256 hash of source
meta: Extracted EffectMeta
dependencies: List of pip dependencies
requires_python: Python version requirement
module: Compiled module (if loaded)
"""
source: str
cid: str
meta: EffectMeta
dependencies: List[str] = field(default_factory=list)
requires_python: str = ">=3.10"
module: Any = None
def has_frame_api(self) -> bool:
"""Check if effect has frame-by-frame API."""
return self.meta.api_type == "frame"
def has_video_api(self) -> bool:
"""Check if effect has whole-video API."""
return self.meta.api_type == "video"
def compute_cid(source: str) -> str:
"""Compute SHA3-256 hash of effect source."""
return hashlib.sha3_256(source.encode("utf-8")).hexdigest()
def parse_pep723_metadata(source: str) -> Tuple[List[str], str]:
"""
Parse PEP 723 inline script metadata.
Looks for:
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
Returns:
Tuple of (dependencies list, requires_python string)
"""
dependencies = []
requires_python = ">=3.10"
# Match the script block
pattern = r"# /// script\n(.*?)# ///"
match = re.search(pattern, source, re.DOTALL)
if not match:
return dependencies, requires_python
block = match.group(1)
# Parse dependencies
deps_match = re.search(r'# dependencies = \[(.*?)\]', block, re.DOTALL)
if deps_match:
deps_str = deps_match.group(1)
# Extract quoted strings
dependencies = re.findall(r'"([^"]+)"', deps_str)
# Parse requires-python
python_match = re.search(r'# requires-python = "([^"]+)"', block)
if python_match:
requires_python = python_match.group(1)
return dependencies, requires_python
def parse_docstring_metadata(docstring: str) -> Dict[str, Any]:
"""
Parse @-tag metadata from docstring.
Supports:
@effect name
@version 1.0.0
@author @user@domain
@temporal false
@description
Multi-line description text.
@param name type
@range lo hi
@default value
Description text.
@example
(fx effect :param value)
Returns:
Dictionary with extracted metadata
"""
if not docstring:
return {}
result = {
"name": "",
"version": "1.0.0",
"author": "",
"temporal": False,
"description": "",
"params": [],
"examples": [],
}
lines = docstring.strip().split("\n")
i = 0
current_param = None
while i < len(lines):
line = lines[i].strip()
if line.startswith("@effect "):
result["name"] = line[8:].strip()
elif line.startswith("@version "):
result["version"] = line[9:].strip()
elif line.startswith("@author "):
result["author"] = line[8:].strip()
elif line.startswith("@temporal "):
val = line[10:].strip().lower()
result["temporal"] = val in ("true", "yes", "1")
elif line.startswith("@description"):
# Collect multi-line description
desc_lines = []
i += 1
while i < len(lines):
next_line = lines[i]
if next_line.strip().startswith("@"):
i -= 1 # Back up to process this tag
break
desc_lines.append(next_line)
i += 1
result["description"] = "\n".join(desc_lines).strip()
elif line.startswith("@param "):
# Parse parameter: @param name type
parts = line[7:].split()
if len(parts) >= 2:
current_param = {
"name": parts[0],
"type": parts[1],
"range": None,
"default": None,
"description": "",
}
# Collect param details
desc_lines = []
i += 1
while i < len(lines):
next_line = lines[i]
stripped = next_line.strip()
if stripped.startswith("@range "):
range_parts = stripped[7:].split()
if len(range_parts) >= 2:
try:
current_param["range"] = (
float(range_parts[0]),
float(range_parts[1]),
)
except ValueError:
pass
elif stripped.startswith("@default "):
current_param["default"] = stripped[9:].strip()
elif stripped.startswith("@param ") or stripped.startswith("@example"):
i -= 1 # Back up
break
elif stripped.startswith("@"):
i -= 1
break
elif stripped:
desc_lines.append(stripped)
i += 1
current_param["description"] = " ".join(desc_lines)
result["params"].append(current_param)
current_param = None
elif line.startswith("@example"):
# Collect example
example_lines = []
i += 1
while i < len(lines):
next_line = lines[i]
if next_line.strip().startswith("@") and not next_line.strip().startswith("@example"):
if next_line.strip().startswith("@example"):
i -= 1
break
if next_line.strip().startswith("@example"):
i -= 1
break
example_lines.append(next_line)
i += 1
example = "\n".join(example_lines).strip()
if example:
result["examples"].append(example)
i += 1
return result
def extract_meta_from_ast(source: str) -> Optional[Dict[str, Any]]:
"""
Extract META object from source AST.
Looks for:
META = EffectMeta(...)
Returns the keyword arguments if found.
"""
try:
tree = ast.parse(source)
except SyntaxError:
return None
for node in ast.walk(tree):
if isinstance(node, ast.Assign):
for target in node.targets:
if isinstance(target, ast.Name) and target.id == "META":
if isinstance(node.value, ast.Call):
return _extract_call_kwargs(node.value)
return None
def _extract_call_kwargs(call: ast.Call) -> Dict[str, Any]:
"""Extract keyword arguments from an AST Call node."""
result = {}
for keyword in call.keywords:
if keyword.arg is None:
continue
value = _ast_to_value(keyword.value)
if value is not None:
result[keyword.arg] = value
return result
def _ast_to_value(node: ast.expr) -> Any:
"""Convert AST node to Python value."""
if isinstance(node, ast.Constant):
return node.value
elif isinstance(node, ast.Str): # Python 3.7 compat
return node.s
elif isinstance(node, ast.Num): # Python 3.7 compat
return node.n
elif isinstance(node, ast.NameConstant): # Python 3.7 compat
return node.value
elif isinstance(node, ast.List):
return [_ast_to_value(elt) for elt in node.elts]
elif isinstance(node, ast.Tuple):
return tuple(_ast_to_value(elt) for elt in node.elts)
elif isinstance(node, ast.Dict):
return {
_ast_to_value(k): _ast_to_value(v)
for k, v in zip(node.keys, node.values)
if k is not None
}
elif isinstance(node, ast.Call):
# Handle ParamSpec(...) calls
if isinstance(node.func, ast.Name) and node.func.id == "ParamSpec":
return _extract_call_kwargs(node)
return None
def get_module_docstring(source: str) -> str:
"""Extract the module-level docstring from source."""
try:
tree = ast.parse(source)
except SyntaxError:
return ""
if tree.body and isinstance(tree.body[0], ast.Expr):
if isinstance(tree.body[0].value, ast.Constant):
return tree.body[0].value.value
elif isinstance(tree.body[0].value, ast.Str): # Python 3.7 compat
return tree.body[0].value.s
return ""
def load_effect(source: str) -> LoadedEffect:
"""
Load an effect from source code.
Parses:
1. PEP 723 metadata for dependencies
2. Module docstring for @-tag metadata
3. META object for programmatic metadata
Priority: META object > docstring > defaults
Args:
source: Effect source code
Returns:
LoadedEffect with all metadata
Raises:
ValueError: If effect is invalid
"""
cid = compute_cid(source)
# Parse PEP 723 metadata
dependencies, requires_python = parse_pep723_metadata(source)
# Parse docstring metadata
docstring = get_module_docstring(source)
doc_meta = parse_docstring_metadata(docstring)
# Try to extract META from AST
ast_meta = extract_meta_from_ast(source)
# Build EffectMeta, preferring META object over docstring
name = ""
if ast_meta and "name" in ast_meta:
name = ast_meta["name"]
elif doc_meta.get("name"):
name = doc_meta["name"]
if not name:
raise ValueError("Effect must have a name (@effect or META.name)")
version = ast_meta.get("version") if ast_meta else doc_meta.get("version", "1.0.0")
temporal = ast_meta.get("temporal") if ast_meta else doc_meta.get("temporal", False)
author = ast_meta.get("author") if ast_meta else doc_meta.get("author", "")
description = ast_meta.get("description") if ast_meta else doc_meta.get("description", "")
examples = ast_meta.get("examples") if ast_meta else doc_meta.get("examples", [])
# Build params
params = []
if ast_meta and "params" in ast_meta:
for p in ast_meta["params"]:
if isinstance(p, dict):
type_map = {"float": float, "int": int, "bool": bool, "str": str}
param_type = type_map.get(p.get("param_type", "float"), float)
if isinstance(p.get("param_type"), type):
param_type = p["param_type"]
params.append(
ParamSpec(
name=p.get("name", ""),
param_type=param_type,
default=p.get("default"),
range=p.get("range"),
description=p.get("description", ""),
)
)
elif doc_meta.get("params"):
for p in doc_meta["params"]:
type_map = {"float": float, "int": int, "bool": bool, "str": str}
param_type = type_map.get(p.get("type", "float"), float)
default = p.get("default")
if default is not None:
try:
default = param_type(default)
except (ValueError, TypeError):
pass
params.append(
ParamSpec(
name=p["name"],
param_type=param_type,
default=default,
range=p.get("range"),
description=p.get("description", ""),
)
)
# Determine API type by checking for function definitions
api_type = "frame" # default
try:
tree = ast.parse(source)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
if node.name == "process":
api_type = "video"
break
elif node.name == "process_frame":
api_type = "frame"
break
except SyntaxError:
pass
meta = EffectMeta(
name=name,
version=version if isinstance(version, str) else "1.0.0",
temporal=bool(temporal),
params=params,
author=author if isinstance(author, str) else "",
description=description if isinstance(description, str) else "",
examples=examples if isinstance(examples, list) else [],
dependencies=dependencies,
requires_python=requires_python,
api_type=api_type,
)
return LoadedEffect(
source=source,
cid=cid,
meta=meta,
dependencies=dependencies,
requires_python=requires_python,
)
def load_effect_file(path: Path) -> LoadedEffect:
"""Load an effect from a file path."""
source = path.read_text(encoding="utf-8")
return load_effect(source)
def compute_deps_hash(dependencies: List[str]) -> str:
"""
Compute hash of sorted dependencies.
Used for venv caching - same deps = same hash = reuse venv.
"""
sorted_deps = sorted(dep.lower().strip() for dep in dependencies)
deps_str = "\n".join(sorted_deps)
return hashlib.sha3_256(deps_str.encode("utf-8")).hexdigest()

247
artdag/effects/meta.py Normal file
View File

@@ -0,0 +1,247 @@
"""
Effect metadata types.
Defines the core dataclasses for effect metadata:
- ParamSpec: Parameter specification with type, range, and default
- EffectMeta: Complete effect metadata including params and flags
"""
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Type, Union
@dataclass
class ParamSpec:
"""
Specification for an effect parameter.
Attributes:
name: Parameter name (used in recipes as :name)
param_type: Python type (float, int, bool, str)
default: Default value if not specified
range: Optional (min, max) tuple for numeric types
description: Human-readable description
choices: Optional list of allowed values (for enums)
"""
name: str
param_type: Type
default: Any = None
range: Optional[Tuple[float, float]] = None
description: str = ""
choices: Optional[List[Any]] = None
def validate(self, value: Any) -> Any:
"""
Validate and coerce a parameter value.
Args:
value: Input value to validate
Returns:
Validated and coerced value
Raises:
ValueError: If value is invalid
"""
if value is None:
if self.default is not None:
return self.default
raise ValueError(f"Parameter '{self.name}' requires a value")
# Type coercion
try:
if self.param_type == bool:
if isinstance(value, str):
value = value.lower() in ("true", "1", "yes")
else:
value = bool(value)
elif self.param_type == int:
value = int(value)
elif self.param_type == float:
value = float(value)
elif self.param_type == str:
value = str(value)
else:
value = self.param_type(value)
except (ValueError, TypeError) as e:
raise ValueError(
f"Parameter '{self.name}' expects {self.param_type.__name__}, "
f"got {type(value).__name__}: {e}"
)
# Range check for numeric types
if self.range is not None and self.param_type in (int, float):
min_val, max_val = self.range
if value < min_val or value > max_val:
raise ValueError(
f"Parameter '{self.name}' must be in range "
f"[{min_val}, {max_val}], got {value}"
)
# Choices check
if self.choices is not None and value not in self.choices:
raise ValueError(
f"Parameter '{self.name}' must be one of {self.choices}, got {value}"
)
return value
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for serialization."""
d = {
"name": self.name,
"type": self.param_type.__name__,
"description": self.description,
}
if self.default is not None:
d["default"] = self.default
if self.range is not None:
d["range"] = list(self.range)
if self.choices is not None:
d["choices"] = self.choices
return d
@dataclass
class EffectMeta:
"""
Complete metadata for an effect.
Attributes:
name: Effect name (used in recipes)
version: Semantic version string
temporal: If True, effect needs complete input (can't be collapsed)
params: List of parameter specifications
author: Optional author identifier
description: Human-readable description
examples: List of example S-expression usages
dependencies: List of Python package dependencies
requires_python: Minimum Python version
api_type: "frame" for frame-by-frame, "video" for whole-video
"""
name: str
version: str = "1.0.0"
temporal: bool = False
params: List[ParamSpec] = field(default_factory=list)
author: str = ""
description: str = ""
examples: List[str] = field(default_factory=list)
dependencies: List[str] = field(default_factory=list)
requires_python: str = ">=3.10"
api_type: str = "frame" # "frame" or "video"
def get_param(self, name: str) -> Optional[ParamSpec]:
"""Get a parameter spec by name."""
for param in self.params:
if param.name == name:
return param
return None
def validate_params(self, params: Dict[str, Any]) -> Dict[str, Any]:
"""
Validate all parameters.
Args:
params: Dictionary of parameter values
Returns:
Dictionary with validated/coerced values including defaults
Raises:
ValueError: If any parameter is invalid
"""
result = {}
for spec in self.params:
value = params.get(spec.name)
result[spec.name] = spec.validate(value)
return result
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for serialization."""
return {
"name": self.name,
"version": self.version,
"temporal": self.temporal,
"params": [p.to_dict() for p in self.params],
"author": self.author,
"description": self.description,
"examples": self.examples,
"dependencies": self.dependencies,
"requires_python": self.requires_python,
"api_type": self.api_type,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "EffectMeta":
"""Create from dictionary."""
params = []
for p in data.get("params", []):
# Map type name back to Python type
type_map = {"float": float, "int": int, "bool": bool, "str": str}
param_type = type_map.get(p.get("type", "float"), float)
params.append(
ParamSpec(
name=p["name"],
param_type=param_type,
default=p.get("default"),
range=tuple(p["range"]) if p.get("range") else None,
description=p.get("description", ""),
choices=p.get("choices"),
)
)
return cls(
name=data["name"],
version=data.get("version", "1.0.0"),
temporal=data.get("temporal", False),
params=params,
author=data.get("author", ""),
description=data.get("description", ""),
examples=data.get("examples", []),
dependencies=data.get("dependencies", []),
requires_python=data.get("requires_python", ">=3.10"),
api_type=data.get("api_type", "frame"),
)
@dataclass
class ExecutionContext:
"""
Context passed to effect execution.
Provides controlled access to resources within sandbox.
"""
input_paths: List[str]
output_path: str
params: Dict[str, Any]
seed: int # Deterministic seed for RNG
frame_rate: float = 30.0
width: int = 1920
height: int = 1080
# Resolved bindings (frame -> param value lookup)
bindings: Dict[str, List[float]] = field(default_factory=dict)
def get_param_at_frame(self, param_name: str, frame: int) -> Any:
"""
Get parameter value at a specific frame.
If parameter has a binding, looks up the bound value.
Otherwise returns the static parameter value.
"""
if param_name in self.bindings:
binding_values = self.bindings[param_name]
if frame < len(binding_values):
return binding_values[frame]
# Past end of binding data, use last value
return binding_values[-1] if binding_values else self.params.get(param_name)
return self.params.get(param_name)
def get_rng(self) -> "random.Random":
"""Get a seeded random number generator."""
import random
return random.Random(self.seed)

259
artdag/effects/runner.py Normal file
View File

@@ -0,0 +1,259 @@
"""
Effect runner.
Main entry point for executing cached effects with sandboxing.
"""
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional
from .binding import AnalysisData, bindings_to_lookup_table, resolve_all_bindings
from .loader import load_effect, LoadedEffect
from .meta import ExecutionContext
from .sandbox import Sandbox, SandboxConfig, SandboxResult, get_venv_path
logger = logging.getLogger(__name__)
def run_effect(
effect_source: str,
input_paths: List[Path],
output_path: Path,
params: Dict[str, Any],
analysis: Optional[AnalysisData] = None,
cache_id: str = None,
seed: int = 0,
trust_level: str = "untrusted",
timeout: int = 3600,
) -> SandboxResult:
"""
Run an effect with full sandboxing.
This is the main entry point for effect execution.
Args:
effect_source: Effect source code
input_paths: List of input file paths
output_path: Output file path
params: Effect parameters (may contain bindings)
analysis: Optional analysis data for binding resolution
cache_id: Cache ID for deterministic seeding
seed: RNG seed (overrides cache_id-based seed)
trust_level: "untrusted" or "trusted"
timeout: Maximum execution time in seconds
Returns:
SandboxResult with success status and output
"""
# Load and validate effect
loaded = load_effect(effect_source)
logger.info(f"Running effect '{loaded.meta.name}' v{loaded.meta.version}")
# Resolve bindings if analysis data available
bindings = {}
if analysis:
resolved = resolve_all_bindings(params, analysis, cache_id)
bindings = bindings_to_lookup_table(resolved)
# Remove binding dicts from params, keeping only resolved values
params = {
k: v for k, v in params.items()
if not (isinstance(v, dict) and v.get("_binding"))
}
# Validate parameters
validated_params = loaded.meta.validate_params(params)
# Get or create venv for dependencies
venv_path = None
if loaded.dependencies:
venv_path = get_venv_path(loaded.dependencies)
# Configure sandbox
config = SandboxConfig(
trust_level=trust_level,
venv_path=venv_path,
timeout=timeout,
)
# Write effect to temp file
import tempfile
with tempfile.NamedTemporaryFile(
mode="w",
suffix=".py",
delete=False,
) as f:
f.write(effect_source)
effect_path = Path(f.name)
try:
with Sandbox(config) as sandbox:
result = sandbox.run_effect(
effect_path=effect_path,
input_paths=input_paths,
output_path=output_path,
params=validated_params,
bindings=bindings,
seed=seed,
)
finally:
effect_path.unlink(missing_ok=True)
return result
def run_effect_from_cache(
cache,
effect_hash: str,
input_paths: List[Path],
output_path: Path,
params: Dict[str, Any],
analysis: Optional[AnalysisData] = None,
cache_id: str = None,
seed: int = 0,
trust_level: str = "untrusted",
timeout: int = 3600,
) -> SandboxResult:
"""
Run an effect from cache by content hash.
Args:
cache: Cache instance
effect_hash: Content hash of effect
input_paths: Input file paths
output_path: Output file path
params: Effect parameters
analysis: Optional analysis data
cache_id: Cache ID for seeding
seed: RNG seed
trust_level: "untrusted" or "trusted"
timeout: Max execution time
Returns:
SandboxResult
"""
effect_source = cache.get_effect(effect_hash)
if not effect_source:
return SandboxResult(
success=False,
error=f"Effect not found in cache: {effect_hash[:16]}...",
)
return run_effect(
effect_source=effect_source,
input_paths=input_paths,
output_path=output_path,
params=params,
analysis=analysis,
cache_id=cache_id,
seed=seed,
trust_level=trust_level,
timeout=timeout,
)
def check_effect_temporal(cache, effect_hash: str) -> bool:
"""
Check if an effect is temporal (can't be collapsed).
Args:
cache: Cache instance
effect_hash: Content hash of effect
Returns:
True if effect is temporal
"""
metadata = cache.get_effect_metadata(effect_hash)
if not metadata:
return False
meta = metadata.get("meta", {})
return meta.get("temporal", False)
def get_effect_api_type(cache, effect_hash: str) -> str:
"""
Get the API type of an effect.
Args:
cache: Cache instance
effect_hash: Content hash of effect
Returns:
"frame" or "video"
"""
metadata = cache.get_effect_metadata(effect_hash)
if not metadata:
return "frame"
meta = metadata.get("meta", {})
return meta.get("api_type", "frame")
class EffectExecutor:
"""
Executor for cached effects.
Provides a higher-level interface for effect execution.
"""
def __init__(self, cache, trust_level: str = "untrusted"):
"""
Initialize executor.
Args:
cache: Cache instance
trust_level: Default trust level
"""
self.cache = cache
self.trust_level = trust_level
def execute(
self,
effect_hash: str,
input_paths: List[Path],
output_path: Path,
params: Dict[str, Any],
analysis: Optional[AnalysisData] = None,
step_cache_id: str = None,
) -> SandboxResult:
"""
Execute an effect.
Args:
effect_hash: Content hash of effect
input_paths: Input file paths
output_path: Output path
params: Effect parameters
analysis: Analysis data for bindings
step_cache_id: Step cache ID for seeding
Returns:
SandboxResult
"""
# Check effect metadata for trust level override
metadata = self.cache.get_effect_metadata(effect_hash)
trust_level = self.trust_level
if metadata:
# L1 owner can mark effect as trusted
if metadata.get("trust_level") == "trusted":
trust_level = "trusted"
return run_effect_from_cache(
cache=self.cache,
effect_hash=effect_hash,
input_paths=input_paths,
output_path=output_path,
params=params,
analysis=analysis,
cache_id=step_cache_id,
trust_level=trust_level,
)
def is_temporal(self, effect_hash: str) -> bool:
"""Check if effect is temporal."""
return check_effect_temporal(self.cache, effect_hash)
def get_api_type(self, effect_hash: str) -> str:
"""Get effect API type."""
return get_effect_api_type(self.cache, effect_hash)

431
artdag/effects/sandbox.py Normal file
View File

@@ -0,0 +1,431 @@
"""
Sandbox for effect execution.
Uses bubblewrap (bwrap) for Linux namespace isolation.
Provides controlled access to:
- Input files (read-only)
- Output file (write)
- stderr (logging)
- Seeded RNG
"""
import hashlib
import json
import logging
import os
import shutil
import subprocess
import tempfile
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class SandboxConfig:
"""
Sandbox configuration.
Attributes:
trust_level: "untrusted" (full isolation) or "trusted" (allows subprocess)
venv_path: Path to effect's virtual environment
wheel_cache: Shared wheel cache directory
timeout: Maximum execution time in seconds
memory_limit: Memory limit in bytes (0 = unlimited)
allow_network: Whether to allow network access
"""
trust_level: str = "untrusted"
venv_path: Optional[Path] = None
wheel_cache: Path = field(default_factory=lambda: Path("/var/cache/artdag/wheels"))
timeout: int = 3600 # 1 hour default
memory_limit: int = 0
allow_network: bool = False
def is_bwrap_available() -> bool:
"""Check if bubblewrap is available."""
try:
result = subprocess.run(
["bwrap", "--version"],
capture_output=True,
text=True,
)
return result.returncode == 0
except FileNotFoundError:
return False
def get_venv_path(dependencies: List[str], cache_dir: Path = None) -> Path:
"""
Get or create venv for given dependencies.
Uses hash of sorted dependencies for cache key.
Args:
dependencies: List of pip package specifiers
cache_dir: Base directory for venv cache
Returns:
Path to venv directory
"""
cache_dir = cache_dir or Path("/var/cache/artdag/venvs")
cache_dir.mkdir(parents=True, exist_ok=True)
# Compute deps hash
sorted_deps = sorted(dep.lower().strip() for dep in dependencies)
deps_str = "\n".join(sorted_deps)
deps_hash = hashlib.sha3_256(deps_str.encode()).hexdigest()[:16]
venv_path = cache_dir / deps_hash
if venv_path.exists():
logger.debug(f"Reusing venv at {venv_path}")
return venv_path
# Create new venv
logger.info(f"Creating venv for {len(dependencies)} deps at {venv_path}")
subprocess.run(
["python", "-m", "venv", str(venv_path)],
check=True,
)
# Install dependencies
pip_path = venv_path / "bin" / "pip"
wheel_cache = Path("/var/cache/artdag/wheels")
if dependencies:
cmd = [
str(pip_path),
"install",
"--cache-dir", str(wheel_cache),
*dependencies,
]
subprocess.run(cmd, check=True)
return venv_path
@dataclass
class SandboxResult:
"""Result of sandboxed execution."""
success: bool
output_path: Optional[Path] = None
stderr: str = ""
exit_code: int = 0
error: Optional[str] = None
class Sandbox:
"""
Sandboxed effect execution environment.
Uses bubblewrap for namespace isolation when available,
falls back to subprocess with restricted permissions.
"""
def __init__(self, config: SandboxConfig = None):
self.config = config or SandboxConfig()
self._temp_dirs: List[Path] = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
def cleanup(self):
"""Clean up temporary directories."""
for temp_dir in self._temp_dirs:
if temp_dir.exists():
shutil.rmtree(temp_dir, ignore_errors=True)
self._temp_dirs = []
def _create_temp_dir(self) -> Path:
"""Create a temporary directory for sandbox use."""
temp_dir = Path(tempfile.mkdtemp(prefix="artdag_sandbox_"))
self._temp_dirs.append(temp_dir)
return temp_dir
def run_effect(
self,
effect_path: Path,
input_paths: List[Path],
output_path: Path,
params: Dict[str, Any],
bindings: Dict[str, List[float]] = None,
seed: int = 0,
) -> SandboxResult:
"""
Run an effect in the sandbox.
Args:
effect_path: Path to effect.py
input_paths: List of input file paths
output_path: Output file path
params: Effect parameters
bindings: Per-frame parameter bindings
seed: RNG seed for determinism
Returns:
SandboxResult with success status and output
"""
bindings = bindings or {}
# Create work directory
work_dir = self._create_temp_dir()
config_path = work_dir / "config.json"
effect_copy = work_dir / "effect.py"
# Copy effect to work dir
shutil.copy(effect_path, effect_copy)
# Write config file
config_data = {
"input_paths": [str(p) for p in input_paths],
"output_path": str(output_path),
"params": params,
"bindings": bindings,
"seed": seed,
}
config_path.write_text(json.dumps(config_data))
if is_bwrap_available() and self.config.trust_level == "untrusted":
return self._run_with_bwrap(
effect_copy, config_path, input_paths, output_path, work_dir
)
else:
return self._run_subprocess(
effect_copy, config_path, input_paths, output_path, work_dir
)
def _run_with_bwrap(
self,
effect_path: Path,
config_path: Path,
input_paths: List[Path],
output_path: Path,
work_dir: Path,
) -> SandboxResult:
"""Run effect with bubblewrap isolation."""
logger.info("Running effect in bwrap sandbox")
# Build bwrap command
cmd = [
"bwrap",
# New PID namespace
"--unshare-pid",
# No network
"--unshare-net",
# Read-only root filesystem
"--ro-bind", "/", "/",
# Read-write work directory
"--bind", str(work_dir), str(work_dir),
# Read-only input files
]
for input_path in input_paths:
cmd.extend(["--ro-bind", str(input_path), str(input_path)])
# Bind output directory as writable
output_dir = output_path.parent
output_dir.mkdir(parents=True, exist_ok=True)
cmd.extend(["--bind", str(output_dir), str(output_dir)])
# Bind venv if available
if self.config.venv_path and self.config.venv_path.exists():
cmd.extend(["--ro-bind", str(self.config.venv_path), str(self.config.venv_path)])
python_path = self.config.venv_path / "bin" / "python"
else:
python_path = Path("/usr/bin/python3")
# Add runner script
runner_script = self._get_runner_script()
runner_path = work_dir / "runner.py"
runner_path.write_text(runner_script)
# Run the effect
cmd.extend([
str(python_path),
str(runner_path),
str(effect_path),
str(config_path),
])
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=self.config.timeout,
)
if result.returncode == 0 and output_path.exists():
return SandboxResult(
success=True,
output_path=output_path,
stderr=result.stderr,
exit_code=0,
)
else:
return SandboxResult(
success=False,
stderr=result.stderr,
exit_code=result.returncode,
error=result.stderr or "Effect execution failed",
)
except subprocess.TimeoutExpired:
return SandboxResult(
success=False,
error=f"Effect timed out after {self.config.timeout}s",
exit_code=-1,
)
except Exception as e:
return SandboxResult(
success=False,
error=str(e),
exit_code=-1,
)
def _run_subprocess(
self,
effect_path: Path,
config_path: Path,
input_paths: List[Path],
output_path: Path,
work_dir: Path,
) -> SandboxResult:
"""Run effect in subprocess (fallback without bwrap)."""
logger.warning("Running effect without sandbox isolation")
# Create runner script
runner_script = self._get_runner_script()
runner_path = work_dir / "runner.py"
runner_path.write_text(runner_script)
# Determine Python path
if self.config.venv_path and self.config.venv_path.exists():
python_path = self.config.venv_path / "bin" / "python"
else:
python_path = "python3"
cmd = [
str(python_path),
str(runner_path),
str(effect_path),
str(config_path),
]
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=self.config.timeout,
cwd=str(work_dir),
)
if result.returncode == 0 and output_path.exists():
return SandboxResult(
success=True,
output_path=output_path,
stderr=result.stderr,
exit_code=0,
)
else:
return SandboxResult(
success=False,
stderr=result.stderr,
exit_code=result.returncode,
error=result.stderr or "Effect execution failed",
)
except subprocess.TimeoutExpired:
return SandboxResult(
success=False,
error=f"Effect timed out after {self.config.timeout}s",
exit_code=-1,
)
except Exception as e:
return SandboxResult(
success=False,
error=str(e),
exit_code=-1,
)
def _get_runner_script(self) -> str:
"""Get the runner script that executes effects."""
return '''#!/usr/bin/env python3
"""Effect runner script - executed in sandbox."""
import importlib.util
import json
import sys
from pathlib import Path
def load_effect(effect_path):
"""Load effect module from path."""
spec = importlib.util.spec_from_file_location("effect", effect_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def main():
if len(sys.argv) < 3:
print("Usage: runner.py <effect_path> <config_path>", file=sys.stderr)
sys.exit(1)
effect_path = Path(sys.argv[1])
config_path = Path(sys.argv[2])
# Load config
config = json.loads(config_path.read_text())
input_paths = [Path(p) for p in config["input_paths"]]
output_path = Path(config["output_path"])
params = config["params"]
bindings = config.get("bindings", {})
seed = config.get("seed", 0)
# Load effect
effect = load_effect(effect_path)
# Check API type
if hasattr(effect, "process"):
# Whole-video API
from artdag.effects.meta import ExecutionContext
ctx = ExecutionContext(
input_paths=[str(p) for p in input_paths],
output_path=str(output_path),
params=params,
seed=seed,
bindings=bindings,
)
effect.process(input_paths, output_path, params, ctx)
elif hasattr(effect, "process_frame"):
# Frame-by-frame API
from artdag.effects.frame_processor import process_video
result_path, _ = process_video(
input_path=input_paths[0],
output_path=output_path,
process_frame=effect.process_frame,
params=params,
bindings=bindings,
)
else:
print("Effect must have process() or process_frame()", file=sys.stderr)
sys.exit(1)
print(f"Success: {output_path}", file=sys.stderr)
if __name__ == "__main__":
main()
'''