Add streaming video compositor with sexp interpreter
- New streaming/ module for real-time video processing: - compositor.py: Main streaming compositor with cycle-crossfade - sexp_executor.py: Executes compiled sexp recipes in real-time - sexp_interp.py: Full S-expression interpreter for SLICE_ON Lambda - recipe_adapter.py: Bridges recipes to streaming compositor - sources.py: Video source with ffmpeg streaming - audio.py: Real-time audio analysis (energy, beats) - output.py: Preview (mpv) and file output with audio muxing - New templates/: - cycle-crossfade.sexp: Smooth zoom-based video cycling - process-pair.sexp: Dual-clip processing with effects - Key features: - Videos cycle in input-videos order (not definition order) - Cumulative whole-spin rotation - Zero-weight sources skip processing - Live audio-reactive effects - New effects: blend_multi for weighted layer compositing - Updated primitives and interpreter for streaming compatibility Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
7
.gitignore
vendored
7
.gitignore
vendored
@@ -10,3 +10,10 @@ __pycache__/
|
|||||||
|
|
||||||
# Output files
|
# Output files
|
||||||
*.json
|
*.json
|
||||||
|
|
||||||
|
# Cache directories
|
||||||
|
.cache/
|
||||||
|
.stage_cache/
|
||||||
|
effects/.stage_cache/
|
||||||
|
local_server/.cache/
|
||||||
|
local_server/.data/
|
||||||
|
|||||||
404
cache.py
Normal file
404
cache.py
Normal file
@@ -0,0 +1,404 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Unified content cache for artdag.
|
||||||
|
|
||||||
|
Design:
|
||||||
|
- IPNS (cache_id) = computation hash, known BEFORE execution
|
||||||
|
"What would be the result of running X with inputs Y?"
|
||||||
|
|
||||||
|
- CID = content hash, known AFTER execution
|
||||||
|
"What is this actual content?"
|
||||||
|
|
||||||
|
Structure:
|
||||||
|
.cache/
|
||||||
|
refs/ # IPNS → CID mappings (computation → result)
|
||||||
|
{cache_id} # Text file containing the CID of the result
|
||||||
|
content/ # Content-addressed storage
|
||||||
|
{cid[:2]}/{cid} # Actual content by CID
|
||||||
|
"""
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, Dict, Any, Tuple
|
||||||
|
|
||||||
|
# Default cache location - can be overridden via ARTDAG_CACHE env var
|
||||||
|
DEFAULT_CACHE_DIR = Path(__file__).parent / ".cache"
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache_dir() -> Path:
|
||||||
|
"""Get the cache directory, creating if needed."""
|
||||||
|
cache_dir = Path(os.environ.get("ARTDAG_CACHE", DEFAULT_CACHE_DIR))
|
||||||
|
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
return cache_dir
|
||||||
|
|
||||||
|
|
||||||
|
def get_refs_dir() -> Path:
|
||||||
|
"""Get the refs directory (IPNS → CID mappings)."""
|
||||||
|
refs_dir = get_cache_dir() / "refs"
|
||||||
|
refs_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
return refs_dir
|
||||||
|
|
||||||
|
|
||||||
|
def get_content_dir() -> Path:
|
||||||
|
"""Get the content directory (CID → content)."""
|
||||||
|
content_dir = get_cache_dir() / "content"
|
||||||
|
content_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
return content_dir
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# CID (Content Hash) Operations
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def compute_cid(content: bytes) -> str:
|
||||||
|
"""Compute content ID (SHA256 hash) for bytes."""
|
||||||
|
return hashlib.sha256(content).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def compute_file_cid(file_path: Path) -> str:
|
||||||
|
"""Compute content ID for a file."""
|
||||||
|
with open(file_path, 'rb') as f:
|
||||||
|
return compute_cid(f.read())
|
||||||
|
|
||||||
|
|
||||||
|
def compute_string_cid(text: str) -> str:
|
||||||
|
"""Compute content ID for a string."""
|
||||||
|
return compute_cid(text.encode('utf-8'))
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Content Storage (by CID)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def _content_path(cid: str) -> Path:
|
||||||
|
"""Get path for content by CID."""
|
||||||
|
return get_content_dir() / cid[:2] / cid
|
||||||
|
|
||||||
|
|
||||||
|
def content_exists_by_cid(cid: str) -> Optional[Path]:
|
||||||
|
"""Check if content exists by CID."""
|
||||||
|
path = _content_path(cid)
|
||||||
|
if path.exists() and path.stat().st_size > 0:
|
||||||
|
return path
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def content_store_by_cid(cid: str, content: bytes) -> Path:
|
||||||
|
"""Store content by its CID."""
|
||||||
|
path = _content_path(cid)
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
path.write_bytes(content)
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def content_store_file(file_path: Path) -> Tuple[str, Path]:
|
||||||
|
"""Store a file by its content hash. Returns (cid, path)."""
|
||||||
|
content = file_path.read_bytes()
|
||||||
|
cid = compute_cid(content)
|
||||||
|
path = content_store_by_cid(cid, content)
|
||||||
|
return cid, path
|
||||||
|
|
||||||
|
|
||||||
|
def content_store_string(text: str) -> Tuple[str, Path]:
|
||||||
|
"""Store a string by its content hash. Returns (cid, path)."""
|
||||||
|
content = text.encode('utf-8')
|
||||||
|
cid = compute_cid(content)
|
||||||
|
path = content_store_by_cid(cid, content)
|
||||||
|
return cid, path
|
||||||
|
|
||||||
|
|
||||||
|
def content_get(cid: str) -> Optional[bytes]:
|
||||||
|
"""Get content by CID."""
|
||||||
|
path = content_exists_by_cid(cid)
|
||||||
|
if path:
|
||||||
|
return path.read_bytes()
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def content_get_string(cid: str) -> Optional[str]:
|
||||||
|
"""Get string content by CID."""
|
||||||
|
content = content_get(cid)
|
||||||
|
if content:
|
||||||
|
return content.decode('utf-8')
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Refs (IPNS → CID mappings)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def _ref_path(cache_id: str) -> Path:
|
||||||
|
"""Get path for a ref by cache_id."""
|
||||||
|
return get_refs_dir() / cache_id
|
||||||
|
|
||||||
|
|
||||||
|
def ref_exists(cache_id: str) -> Optional[str]:
|
||||||
|
"""Check if a ref exists. Returns CID if found."""
|
||||||
|
path = _ref_path(cache_id)
|
||||||
|
if path.exists():
|
||||||
|
return path.read_text().strip()
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def ref_set(cache_id: str, cid: str) -> Path:
|
||||||
|
"""Set a ref (IPNS → CID mapping)."""
|
||||||
|
path = _ref_path(cache_id)
|
||||||
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
path.write_text(cid)
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
|
def ref_get_content(cache_id: str) -> Optional[bytes]:
|
||||||
|
"""Get content by cache_id (looks up ref, then fetches content)."""
|
||||||
|
cid = ref_exists(cache_id)
|
||||||
|
if cid:
|
||||||
|
return content_get(cid)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def ref_get_string(cache_id: str) -> Optional[str]:
|
||||||
|
"""Get string content by cache_id."""
|
||||||
|
content = ref_get_content(cache_id)
|
||||||
|
if content:
|
||||||
|
return content.decode('utf-8')
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# High-level Cache Operations
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def cache_store(cache_id: str, content: bytes) -> Tuple[str, Path]:
|
||||||
|
"""
|
||||||
|
Store content with IPNS→CID indirection.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cache_id: Computation hash (IPNS address)
|
||||||
|
content: Content to store
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(cid, path) tuple
|
||||||
|
"""
|
||||||
|
cid = compute_cid(content)
|
||||||
|
path = content_store_by_cid(cid, content)
|
||||||
|
ref_set(cache_id, cid)
|
||||||
|
return cid, path
|
||||||
|
|
||||||
|
|
||||||
|
def cache_store_file(cache_id: str, file_path: Path) -> Tuple[str, Path]:
|
||||||
|
"""Store a file with IPNS→CID indirection."""
|
||||||
|
content = file_path.read_bytes()
|
||||||
|
return cache_store(cache_id, content)
|
||||||
|
|
||||||
|
|
||||||
|
def cache_store_string(cache_id: str, text: str) -> Tuple[str, Path]:
|
||||||
|
"""Store a string with IPNS→CID indirection."""
|
||||||
|
return cache_store(cache_id, text.encode('utf-8'))
|
||||||
|
|
||||||
|
|
||||||
|
def cache_store_json(cache_id: str, data: Any) -> Tuple[str, Path]:
|
||||||
|
"""Store JSON data with IPNS→CID indirection."""
|
||||||
|
text = json.dumps(data, indent=2)
|
||||||
|
return cache_store_string(cache_id, text)
|
||||||
|
|
||||||
|
|
||||||
|
def cache_exists(cache_id: str) -> Optional[Path]:
|
||||||
|
"""Check if cached content exists for a computation."""
|
||||||
|
cid = ref_exists(cache_id)
|
||||||
|
if cid:
|
||||||
|
return content_exists_by_cid(cid)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def cache_get(cache_id: str) -> Optional[bytes]:
|
||||||
|
"""Get cached content by computation hash."""
|
||||||
|
return ref_get_content(cache_id)
|
||||||
|
|
||||||
|
|
||||||
|
def cache_get_string(cache_id: str) -> Optional[str]:
|
||||||
|
"""Get cached string by computation hash."""
|
||||||
|
return ref_get_string(cache_id)
|
||||||
|
|
||||||
|
|
||||||
|
def cache_get_json(cache_id: str) -> Optional[Any]:
|
||||||
|
"""Get cached JSON by computation hash."""
|
||||||
|
text = cache_get_string(cache_id)
|
||||||
|
if text:
|
||||||
|
return json.loads(text)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def cache_get_path(cache_id: str) -> Optional[Path]:
|
||||||
|
"""Get path to cached content by computation hash."""
|
||||||
|
cid = ref_exists(cache_id)
|
||||||
|
if cid:
|
||||||
|
return content_exists_by_cid(cid)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Plan Cache (convenience wrappers)
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def _stable_hash_params(params: Dict[str, Any]) -> str:
|
||||||
|
"""Compute stable hash of params using JSON + SHA256 (consistent with CID)."""
|
||||||
|
params_str = json.dumps(params, sort_keys=True, default=str)
|
||||||
|
return hashlib.sha256(params_str.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def plan_cache_id(source_cid: str, params: Dict[str, Any] = None) -> str:
|
||||||
|
"""
|
||||||
|
Compute the cache_id (IPNS address) for a plan.
|
||||||
|
|
||||||
|
Based on source CID + params. Name/version are just metadata.
|
||||||
|
"""
|
||||||
|
key = f"plan:{source_cid}"
|
||||||
|
if params:
|
||||||
|
params_hash = _stable_hash_params(params)
|
||||||
|
key = f"{key}:{params_hash}"
|
||||||
|
return hashlib.sha256(key.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def plan_exists(source_cid: str, params: Dict[str, Any] = None) -> Optional[str]:
|
||||||
|
"""Check if a cached plan exists. Returns CID if found."""
|
||||||
|
cache_id = plan_cache_id(source_cid, params)
|
||||||
|
return ref_exists(cache_id)
|
||||||
|
|
||||||
|
|
||||||
|
def plan_store(source_cid: str, params: Dict[str, Any], content: str) -> Tuple[str, str, Path]:
|
||||||
|
"""
|
||||||
|
Store a plan in the cache.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(cache_id, cid, path) tuple
|
||||||
|
"""
|
||||||
|
cache_id = plan_cache_id(source_cid, params)
|
||||||
|
cid, path = cache_store_string(cache_id, content)
|
||||||
|
return cache_id, cid, path
|
||||||
|
|
||||||
|
|
||||||
|
def plan_load(source_cid: str, params: Dict[str, Any] = None) -> Optional[str]:
|
||||||
|
"""Load a plan from cache. Returns plan content string."""
|
||||||
|
cache_id = plan_cache_id(source_cid, params)
|
||||||
|
return cache_get_string(cache_id)
|
||||||
|
|
||||||
|
|
||||||
|
def plan_get_path(source_cid: str, params: Dict[str, Any] = None) -> Optional[Path]:
|
||||||
|
"""Get path to cached plan."""
|
||||||
|
cache_id = plan_cache_id(source_cid, params)
|
||||||
|
return cache_get_path(cache_id)
|
||||||
|
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# Cache Listing
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
def list_cache(verbose: bool = False) -> Dict[str, Any]:
|
||||||
|
"""List all cached items."""
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
cache_dir = get_cache_dir()
|
||||||
|
refs_dir = get_refs_dir()
|
||||||
|
content_dir = get_content_dir()
|
||||||
|
|
||||||
|
def format_size(size):
|
||||||
|
if size >= 1_000_000_000:
|
||||||
|
return f"{size / 1_000_000_000:.1f}GB"
|
||||||
|
elif size >= 1_000_000:
|
||||||
|
return f"{size / 1_000_000:.1f}MB"
|
||||||
|
elif size >= 1000:
|
||||||
|
return f"{size / 1000:.1f}KB"
|
||||||
|
else:
|
||||||
|
return f"{size}B"
|
||||||
|
|
||||||
|
def get_file_info(path: Path) -> Dict:
|
||||||
|
stat = path.stat()
|
||||||
|
return {
|
||||||
|
"path": path,
|
||||||
|
"name": path.name,
|
||||||
|
"size": stat.st_size,
|
||||||
|
"size_str": format_size(stat.st_size),
|
||||||
|
"mtime": datetime.fromtimestamp(stat.st_mtime),
|
||||||
|
}
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"refs": [],
|
||||||
|
"content": [],
|
||||||
|
"summary": {"total_items": 0, "total_size": 0},
|
||||||
|
}
|
||||||
|
|
||||||
|
# Refs
|
||||||
|
if refs_dir.exists():
|
||||||
|
for f in sorted(refs_dir.iterdir()):
|
||||||
|
if f.is_file():
|
||||||
|
info = get_file_info(f)
|
||||||
|
info["cache_id"] = f.name
|
||||||
|
info["cid"] = f.read_text().strip()
|
||||||
|
# Try to determine type from content
|
||||||
|
cid = info["cid"]
|
||||||
|
content_path = content_exists_by_cid(cid)
|
||||||
|
if content_path:
|
||||||
|
info["content_size"] = content_path.stat().st_size
|
||||||
|
info["content_size_str"] = format_size(info["content_size"])
|
||||||
|
result["refs"].append(info)
|
||||||
|
|
||||||
|
# Content
|
||||||
|
if content_dir.exists():
|
||||||
|
for subdir in sorted(content_dir.iterdir()):
|
||||||
|
if subdir.is_dir():
|
||||||
|
for f in sorted(subdir.iterdir()):
|
||||||
|
if f.is_file():
|
||||||
|
info = get_file_info(f)
|
||||||
|
info["cid"] = f.name
|
||||||
|
result["content"].append(info)
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
result["summary"]["total_refs"] = len(result["refs"])
|
||||||
|
result["summary"]["total_content"] = len(result["content"])
|
||||||
|
result["summary"]["total_size"] = sum(i["size"] for i in result["content"])
|
||||||
|
result["summary"]["total_size_str"] = format_size(result["summary"]["total_size"])
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def print_cache_listing(verbose: bool = False):
|
||||||
|
"""Print cache listing to stdout."""
|
||||||
|
info = list_cache(verbose)
|
||||||
|
cache_dir = get_cache_dir()
|
||||||
|
|
||||||
|
print(f"\nCache directory: {cache_dir}\n")
|
||||||
|
|
||||||
|
# Refs summary
|
||||||
|
if info["refs"]:
|
||||||
|
print(f"=== Refs ({len(info['refs'])}) ===")
|
||||||
|
for ref in info["refs"][:20]: # Show first 20
|
||||||
|
content_info = f" → {ref.get('content_size_str', '?')}" if 'content_size_str' in ref else ""
|
||||||
|
print(f" {ref['cache_id'][:16]}... → {ref['cid'][:16]}...{content_info}")
|
||||||
|
if len(info["refs"]) > 20:
|
||||||
|
print(f" ... and {len(info['refs']) - 20} more")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Content by type
|
||||||
|
if info["content"]:
|
||||||
|
# Group by first 2 chars (subdirectory)
|
||||||
|
print(f"=== Content ({len(info['content'])} items, {info['summary']['total_size_str']}) ===")
|
||||||
|
for item in info["content"][:20]:
|
||||||
|
print(f" {item['cid'][:16]}... {item['size_str']:>8} {item['mtime'].strftime('%Y-%m-%d %H:%M')}")
|
||||||
|
if len(info["content"]) > 20:
|
||||||
|
print(f" ... and {len(info['content']) - 20} more")
|
||||||
|
print()
|
||||||
|
|
||||||
|
print(f"=== Summary ===")
|
||||||
|
print(f" Refs: {info['summary']['total_refs']}")
|
||||||
|
print(f" Content: {info['summary']['total_content']} ({info['summary']['total_size_str']})")
|
||||||
|
|
||||||
|
if verbose:
|
||||||
|
print(f"\nTo clear cache: rm -rf {cache_dir}/*")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import sys
|
||||||
|
verbose = "-v" in sys.argv or "--verbose" in sys.argv
|
||||||
|
print_cache_listing(verbose)
|
||||||
@@ -14,6 +14,8 @@
|
|||||||
:desc "Number of character columns")
|
:desc "Number of character columns")
|
||||||
(rotation_scale :type float :default 60 :range [0 180]
|
(rotation_scale :type float :default 60 :range [0 180]
|
||||||
:desc "Max rotation in degrees")
|
:desc "Max rotation in degrees")
|
||||||
|
(duration :type float :default 10 :range [1 300]
|
||||||
|
:desc "Clip duration in seconds")
|
||||||
)
|
)
|
||||||
|
|
||||||
;; Registry
|
;; Registry
|
||||||
@@ -29,7 +31,7 @@
|
|||||||
;; Stage 1: Analysis
|
;; Stage 1: Analysis
|
||||||
(stage :analyze
|
(stage :analyze
|
||||||
:outputs [energy-data]
|
:outputs [energy-data]
|
||||||
(def audio-clip (-> audio (segment :start 60 :duration 10)))
|
(def audio-clip (-> audio (segment :start 60 :duration duration)))
|
||||||
(def energy-data (-> audio-clip (analyze energy))))
|
(def energy-data (-> audio-clip (analyze energy))))
|
||||||
|
|
||||||
;; Stage 2: Process
|
;; Stage 2: Process
|
||||||
@@ -37,8 +39,8 @@
|
|||||||
:requires [:analyze]
|
:requires [:analyze]
|
||||||
:inputs [energy-data]
|
:inputs [energy-data]
|
||||||
:outputs [result audio-clip]
|
:outputs [result audio-clip]
|
||||||
(def clip (-> video (segment :start 0 :duration 10)))
|
(def clip (-> video (segment :start 0 :duration duration)))
|
||||||
(def audio-clip (-> audio (segment :start 60 :duration 10)))
|
(def audio-clip (-> audio (segment :start 60 :duration duration)))
|
||||||
|
|
||||||
(def result (-> clip
|
(def result (-> clip
|
||||||
(effect ascii_fx_zone
|
(effect ascii_fx_zone
|
||||||
|
|||||||
@@ -19,6 +19,8 @@
|
|||||||
:desc "Blend opacity (0=video-a only, 1=video-b only)")
|
:desc "Blend opacity (0=video-a only, 1=video-b only)")
|
||||||
(blend_mode :type string :default "overlay"
|
(blend_mode :type string :default "overlay"
|
||||||
:desc "Blend mode: alpha, add, multiply, screen, overlay, difference")
|
:desc "Blend mode: alpha, add, multiply, screen, overlay, difference")
|
||||||
|
(duration :type float :default 10 :range [1 300]
|
||||||
|
:desc "Clip duration in seconds")
|
||||||
)
|
)
|
||||||
|
|
||||||
;; Registry - effects and analyzers
|
;; Registry - effects and analyzers
|
||||||
@@ -35,7 +37,7 @@
|
|||||||
;; Stage 1: Analysis
|
;; Stage 1: Analysis
|
||||||
(stage :analyze
|
(stage :analyze
|
||||||
:outputs [energy-data]
|
:outputs [energy-data]
|
||||||
(def audio-clip (-> audio (segment :start 60 :duration 10)))
|
(def audio-clip (-> audio (segment :start 60 :duration duration)))
|
||||||
(def energy-data (-> audio-clip (analyze energy))))
|
(def energy-data (-> audio-clip (analyze energy))))
|
||||||
|
|
||||||
;; Stage 2: Process both videos
|
;; Stage 2: Process both videos
|
||||||
@@ -45,10 +47,10 @@
|
|||||||
:outputs [blended audio-clip]
|
:outputs [blended audio-clip]
|
||||||
|
|
||||||
;; Get audio clip for final mux
|
;; Get audio clip for final mux
|
||||||
(def audio-clip (-> audio (segment :start 60 :duration 10)))
|
(def audio-clip (-> audio (segment :start 60 :duration duration)))
|
||||||
|
|
||||||
;; Process video A with ASCII effect
|
;; Process video A with ASCII effect
|
||||||
(def clip-a (-> video-a (segment :start 0 :duration 10)))
|
(def clip-a (-> video-a (segment :start 0 :duration duration)))
|
||||||
(def ascii-a (-> clip-a
|
(def ascii-a (-> clip-a
|
||||||
(effect ascii_fx_zone
|
(effect ascii_fx_zone
|
||||||
:cols cols
|
:cols cols
|
||||||
@@ -66,7 +68,7 @@
|
|||||||
(- 1 (get zone "row-norm")))))))))))
|
(- 1 (get zone "row-norm")))))))))))
|
||||||
|
|
||||||
;; Process video B with ASCII effect
|
;; Process video B with ASCII effect
|
||||||
(def clip-b (-> video-b (segment :start 0 :duration 10)))
|
(def clip-b (-> video-b (segment :start 0 :duration duration)))
|
||||||
(def ascii-b (-> clip-b
|
(def ascii-b (-> clip-b
|
||||||
(effect ascii_fx_zone
|
(effect ascii_fx_zone
|
||||||
:cols cols
|
:cols cols
|
||||||
|
|||||||
178
effects/quick_test.sexp
Normal file
178
effects/quick_test.sexp
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
;; Quick Test Recipe
|
||||||
|
;;
|
||||||
|
;; Cycles between three video pairs (monday, duel, ecstacy) with smooth zoom-based crossfade.
|
||||||
|
;; Each pair is two copies of the same source with opposite rotations.
|
||||||
|
;; Each pair rotates in its own direction (per-pair rotation via template).
|
||||||
|
;; Cycle: active pair plays -> crossfade -> new pair plays -> advance and repeat.
|
||||||
|
;; Ripple drops on the final combined output only.
|
||||||
|
|
||||||
|
(recipe "quick_test"
|
||||||
|
:version "1.0"
|
||||||
|
:description "Cycling crossfade between three video pairs"
|
||||||
|
:minimal-primitives true
|
||||||
|
:encoding (:codec "libx264" :crf 23 :preset "ultrafast" :audio-codec "aac" :fps 30)
|
||||||
|
|
||||||
|
:params (
|
||||||
|
(audio_start :type float :default 60 :range [0 300]
|
||||||
|
:desc "Audio start time in seconds")
|
||||||
|
(audio_duration :type float :default nil
|
||||||
|
:desc "Audio duration (nil = full remaining)")
|
||||||
|
(blend_opacity :type float :default 0.5 :range [0 1]
|
||||||
|
:desc "Blend opacity within each pair")
|
||||||
|
(seed :type int :default 42 :desc "Master random seed")
|
||||||
|
)
|
||||||
|
|
||||||
|
;; Registry
|
||||||
|
(effect rotate :path "../sexp_effects/effects/rotate.sexp")
|
||||||
|
(effect zoom :path "../sexp_effects/effects/zoom.sexp")
|
||||||
|
(effect blend :path "../sexp_effects/effects/blend.sexp")
|
||||||
|
(effect invert :path "../sexp_effects/effects/invert.sexp")
|
||||||
|
(effect hue_shift :path "../sexp_effects/effects/hue_shift.sexp")
|
||||||
|
(effect ascii_art :path "../sexp_effects/effects/ascii_art.sexp")
|
||||||
|
(effect ripple :path "../sexp_effects/effects/ripple.sexp")
|
||||||
|
(effect blend_multi :path "../sexp_effects/effects/blend_multi.sexp")
|
||||||
|
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
|
||||||
|
(analyzer beats :path "../../artdag-analyzers/beats/analyzer.py")
|
||||||
|
|
||||||
|
;; Sources
|
||||||
|
|
||||||
|
(def video-1 (source :path "../1.mp4"))
|
||||||
|
(def video-2 (source :path "../2.webm"))
|
||||||
|
(def video-4 (source :path "../4.mp4"))
|
||||||
|
(def video-5 (source :path "../5.mp4"))
|
||||||
|
(def video-a (source :path "../monday.webm"))
|
||||||
|
(def video-b (source :path "../escher.webm"))
|
||||||
|
(def video-c (source :path "../dopple.webm"))
|
||||||
|
(def video-d (source :path "../disruptors.webm"))
|
||||||
|
(def video-e (source :path "../ecstacy.mp4"))
|
||||||
|
(def audio (source :path "../dizzy.mp3"))
|
||||||
|
|
||||||
|
;; Templates: reusable video-pair processor and cycle-crossfade
|
||||||
|
(include :path "../templates/process-pair.sexp")
|
||||||
|
(include :path "../templates/cycle-crossfade.sexp")
|
||||||
|
|
||||||
|
;; Unified RNG: auto-derives unique seeds for all scans
|
||||||
|
(def rng (make-rng seed))
|
||||||
|
|
||||||
|
;; Stage 1: Analysis - energy, beats, and global-level scans
|
||||||
|
(stage :analyze
|
||||||
|
:outputs [energy-data beat-data whole-spin
|
||||||
|
ripple-gate ripple-cx ripple-cy]
|
||||||
|
(def audio-clip (-> audio (segment :start audio_start :duration audio_duration)))
|
||||||
|
(def energy-data (-> audio-clip (analyze energy)))
|
||||||
|
(def beat-data (-> audio-clip (analyze beats)))
|
||||||
|
|
||||||
|
;; --- Whole-video continuous spin: cumulative rotation that reverses direction periodically ---
|
||||||
|
(def whole-spin (scan beat-data :rng rng
|
||||||
|
:init (dict :beat 0 :clen 25 :dir 1 :angle 0)
|
||||||
|
:step (if (< (+ beat 1) clen)
|
||||||
|
(dict :beat (+ beat 1) :clen clen :dir dir
|
||||||
|
:angle (+ angle (* dir (/ 360 clen))))
|
||||||
|
(dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1)
|
||||||
|
:angle angle))
|
||||||
|
:emit angle))
|
||||||
|
|
||||||
|
;; --- Ripple drops on final output ---
|
||||||
|
(def ripple (scan beat-data :rng rng
|
||||||
|
:init (dict :rem 0 :cx 0.5 :cy 0.5)
|
||||||
|
:step (if (> rem 0)
|
||||||
|
(dict :rem (- rem 1) :cx cx :cy cy)
|
||||||
|
(if (< (rand) 0.05)
|
||||||
|
(dict :rem (rand-int 1 20) :cx (rand-range 0.1 0.9) :cy (rand-range 0.1 0.9))
|
||||||
|
(dict :rem 0 :cx 0.5 :cy 0.5)))
|
||||||
|
:emit {:gate (if (> rem 0) 1 0) :cx cx :cy cy})))
|
||||||
|
|
||||||
|
;; Stage 2: Process videos via template
|
||||||
|
;; Per-pair scans (inv/hue/ascii triggers, pair-mix, pair-rot) are now
|
||||||
|
;; defined inside the process-pair template using seed offsets.
|
||||||
|
(stage :process
|
||||||
|
:requires [:analyze]
|
||||||
|
:inputs [energy-data beat-data whole-spin
|
||||||
|
ripple-gate ripple-cx ripple-cy]
|
||||||
|
:outputs [final-video audio-clip]
|
||||||
|
|
||||||
|
;; Re-segment audio for final mux
|
||||||
|
(def audio-clip (-> audio (segment :start audio_start :duration audio_duration)))
|
||||||
|
|
||||||
|
;; --- Process each pair via template ---
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
(def monday-blend (process-pair
|
||||||
|
:video video-a :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir -1
|
||||||
|
:rot-a [0 45] :rot-b [0 -45]
|
||||||
|
:zoom-a [1 1.5] :zoom-b [1 0.5]))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
(def escher-blend (process-pair
|
||||||
|
:video video-b :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir 1
|
||||||
|
:rot-a [0 45] :rot-b [0 -45]
|
||||||
|
:zoom-a [1 1.5] :zoom-b [1 0.5]))
|
||||||
|
|
||||||
|
(def duel-blend (process-pair
|
||||||
|
:video video-d :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir -1
|
||||||
|
:rot-a [0 -45] :rot-b [0 45]
|
||||||
|
:zoom-a [1 0.5] :zoom-b [1 1.5]))
|
||||||
|
|
||||||
|
(def blend-2 (process-pair
|
||||||
|
:video video-2 :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir 1
|
||||||
|
:rot-a [0 45] :rot-b [0 -45]
|
||||||
|
:zoom-a [1 1.5] :zoom-b [1 0.5]))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
(def dopple-blend (process-pair
|
||||||
|
:video video-c :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir -1
|
||||||
|
:rot-a [0 -45] :rot-b [0 45]
|
||||||
|
:zoom-a [1 0.5] :zoom-b [1 1.5]))
|
||||||
|
|
||||||
|
(def blend-4 (process-pair
|
||||||
|
:video video-4 :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir -1
|
||||||
|
:rot-a [0 45] :rot-b [0 -45]
|
||||||
|
:zoom-a [1 1.5] :zoom-b [1 0.5]))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
(def ext-blend (process-pair
|
||||||
|
:video video-e :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir 1
|
||||||
|
:rot-a [0 30] :rot-b [0 -30]
|
||||||
|
:zoom-a [1 1.3] :zoom-b [1 0.7]))
|
||||||
|
|
||||||
|
(def blend-5 (process-pair
|
||||||
|
:video video-5 :energy energy-data :beat-data beat-data
|
||||||
|
:rng rng :rot-dir 1
|
||||||
|
:rot-a [0 45] :rot-b [0 -45]
|
||||||
|
:zoom-a [1 1.5] :zoom-b [1 0.5]))
|
||||||
|
|
||||||
|
;; --- Cycle zoom + crossfade via template ---
|
||||||
|
|
||||||
|
(def combined (cycle-crossfade
|
||||||
|
:beat-data beat-data
|
||||||
|
:input-videos [monday-blend escher-blend blend-2 duel-blend blend-4 ext-blend dopple-blend blend-5]
|
||||||
|
:init-clen 60))
|
||||||
|
|
||||||
|
;; --- Final output: sporadic spin + ripple ---
|
||||||
|
(def final-video (-> combined
|
||||||
|
(effect rotate :angle (bind whole-spin values))
|
||||||
|
(effect ripple
|
||||||
|
:amplitude (* (bind ripple-gate values) (bind energy-data values :range [5 50]))
|
||||||
|
:center_x (bind ripple-cx values)
|
||||||
|
:center_y (bind ripple-cy values)
|
||||||
|
:frequency 8
|
||||||
|
:decay 2
|
||||||
|
:speed 5))))
|
||||||
|
|
||||||
|
;; Stage 3: Output
|
||||||
|
(stage :output
|
||||||
|
:requires [:process]
|
||||||
|
:inputs [final-video audio-clip]
|
||||||
|
(mux final-video audio-clip)))
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
;; ASCII Art effect - converts image to ASCII characters
|
;; ASCII Art effect - converts image to ASCII characters
|
||||||
|
(require-primitives "ascii")
|
||||||
|
|
||||||
(define-effect ascii_art
|
(define-effect ascii_art
|
||||||
:params (
|
:params (
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
;; ASCII Art FX - converts image to ASCII characters with per-character effects
|
;; ASCII Art FX - converts image to ASCII characters with per-character effects
|
||||||
|
(require-primitives "ascii")
|
||||||
|
|
||||||
(define-effect ascii_art_fx
|
(define-effect ascii_art_fx
|
||||||
:params (
|
:params (
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
;; ASCII Zones effect - different character sets for different brightness zones
|
;; ASCII Zones effect - different character sets for different brightness zones
|
||||||
;; Dark areas use simple chars, mid uses standard, bright uses blocks
|
;; Dark areas use simple chars, mid uses standard, bright uses blocks
|
||||||
|
(require-primitives "ascii")
|
||||||
|
|
||||||
(define-effect ascii_zones
|
(define-effect ascii_zones
|
||||||
:params (
|
:params (
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
(opacity :type float :default 0.5)
|
(opacity :type float :default 0.5)
|
||||||
(resize_mode :type string :default "fit")
|
(resize_mode :type string :default "fit")
|
||||||
(priority :type string :default "width")
|
(priority :type string :default "width")
|
||||||
(pad_color :type list :default [0 0 0])
|
(pad_color :type list :default (quote [0 0 0]))
|
||||||
)
|
)
|
||||||
(let [a frame-a
|
(let [a frame-a
|
||||||
a-w (width a)
|
a-w (width a)
|
||||||
|
|||||||
57
sexp_effects/effects/blend_multi.sexp
Normal file
57
sexp_effects/effects/blend_multi.sexp
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
;; N-way weighted blend effect
|
||||||
|
;;
|
||||||
|
;; Takes N input frames via `inputs` and N per-frame weights.
|
||||||
|
;; Produces a single frame: the normalised weighted composite.
|
||||||
|
;;
|
||||||
|
;; Parameters:
|
||||||
|
;; weights - list of N floats, one per input (resolved per-frame)
|
||||||
|
;; mode - blend mode applied when folding each frame in:
|
||||||
|
;; "alpha" — pure weighted average (default)
|
||||||
|
;; "multiply" — darken by multiplication
|
||||||
|
;; "screen" — lighten (inverse multiply)
|
||||||
|
;; "overlay" — contrast-boosting midtone blend
|
||||||
|
;; "soft-light" — gentle dodge/burn
|
||||||
|
;; "hard-light" — strong dodge/burn
|
||||||
|
;; "color-dodge" — brightens towards white
|
||||||
|
;; "color-burn" — darkens towards black
|
||||||
|
;; "difference" — absolute pixel difference
|
||||||
|
;; "exclusion" — softer difference
|
||||||
|
;; "add" — additive (clamped)
|
||||||
|
;; "subtract" — subtractive (clamped)
|
||||||
|
;; "darken" — per-pixel minimum
|
||||||
|
;; "lighten" — per-pixel maximum
|
||||||
|
;; resize_mode - how to match frame dimensions (fit, crop, stretch)
|
||||||
|
;;
|
||||||
|
;; Uses a left-fold over inputs[1..N-1]. At each step the running
|
||||||
|
;; opacity is: w[i] / (w[0] + w[1] + ... + w[i])
|
||||||
|
;; which produces the correct normalised weighted result.
|
||||||
|
|
||||||
|
(require-primitives "image" "blending")
|
||||||
|
|
||||||
|
(define-effect blend_multi
|
||||||
|
:params (
|
||||||
|
(weights :type list :default (quote []))
|
||||||
|
(mode :type string :default "alpha")
|
||||||
|
(resize_mode :type string :default "fit")
|
||||||
|
)
|
||||||
|
(let [n (len inputs)
|
||||||
|
;; Target dimensions from first frame
|
||||||
|
target-w (width (nth inputs 0))
|
||||||
|
target-h (height (nth inputs 0))
|
||||||
|
;; Fold over indices 1..n-1
|
||||||
|
;; Accumulator is (list blended-frame running-weight-sum)
|
||||||
|
seed (list (nth inputs 0) (nth weights 0))
|
||||||
|
result (reduce (range 1 n) seed
|
||||||
|
(lambda (pair i)
|
||||||
|
(let [acc (nth pair 0)
|
||||||
|
running (nth pair 1)
|
||||||
|
w (nth weights i)
|
||||||
|
new-running (+ running w)
|
||||||
|
opacity (/ w (max new-running 0.001))
|
||||||
|
f (resize (nth inputs i) target-w target-h "linear")
|
||||||
|
;; Apply blend mode then mix with opacity
|
||||||
|
blended (if (= mode "alpha")
|
||||||
|
(blend-images acc f opacity)
|
||||||
|
(blend-images acc (blend-mode acc f mode) opacity))]
|
||||||
|
(list blended new-running))))]
|
||||||
|
(nth result 0)))
|
||||||
@@ -1,8 +1,9 @@
|
|||||||
;; Invert effect - inverts all colors
|
;; Invert effect - inverts all colors
|
||||||
;; Uses vectorized invert-img primitive for fast processing
|
;; Uses vectorized invert-img primitive for fast processing
|
||||||
|
;; amount param: 0 = no invert, 1 = full invert (threshold at 0.5)
|
||||||
|
|
||||||
(require-primitives "color_ops")
|
(require-primitives "color_ops")
|
||||||
|
|
||||||
(define-effect invert
|
(define-effect invert
|
||||||
:params ()
|
:params ((amount :type float :default 1 :range [0 1]))
|
||||||
(invert-img frame))
|
(if (> amount 0.5) (invert-img frame) frame))
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
;; Ripple effect - radial wave distortion from center
|
;; Ripple effect - radial wave distortion from center
|
||||||
|
(require-primitives "geometry" "image" "math")
|
||||||
|
|
||||||
(define-effect ripple
|
(define-effect ripple
|
||||||
:params (
|
:params (
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
;; Zoom effect - zooms in/out from center
|
;; Zoom effect - zooms in/out from center
|
||||||
|
(require-primitives "geometry")
|
||||||
|
|
||||||
(define-effect zoom
|
(define-effect zoom
|
||||||
:params (
|
:params (
|
||||||
|
|||||||
@@ -793,6 +793,35 @@ class Interpreter:
|
|||||||
return list(self.effects.values())[-1]
|
return list(self.effects.values())[-1]
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def load_effect_from_string(self, sexp_content: str, effect_name: str = None) -> EffectDefinition:
|
||||||
|
"""Load an effect definition from an S-expression string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sexp_content: The S-expression content as a string
|
||||||
|
effect_name: Optional name hint (used if effect doesn't define its own name)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The loaded EffectDefinition
|
||||||
|
"""
|
||||||
|
expr = parse(sexp_content)
|
||||||
|
|
||||||
|
# Handle multiple top-level expressions
|
||||||
|
if isinstance(expr, list) and expr and isinstance(expr[0], list):
|
||||||
|
for e in expr:
|
||||||
|
self.eval(e)
|
||||||
|
else:
|
||||||
|
self.eval(expr)
|
||||||
|
|
||||||
|
# Return the effect if we can find it by name
|
||||||
|
if effect_name and effect_name in self.effects:
|
||||||
|
return self.effects[effect_name]
|
||||||
|
|
||||||
|
# Return the most recently loaded effect
|
||||||
|
if self.effects:
|
||||||
|
return list(self.effects.values())[-1]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def run_effect(self, name: str, frame, params: Dict[str, Any],
|
def run_effect(self, name: str, frame, params: Dict[str, Any],
|
||||||
state: Dict[str, Any]) -> tuple:
|
state: Dict[str, Any]) -> tuple:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -51,22 +51,22 @@ def _parse_color(color_str: str) -> tuple:
|
|||||||
|
|
||||||
|
|
||||||
def _cell_sample(frame: np.ndarray, cell_size: int):
|
def _cell_sample(frame: np.ndarray, cell_size: int):
|
||||||
"""Sample frame into cells, returning colors and luminances."""
|
"""Sample frame into cells, returning colors and luminances.
|
||||||
|
|
||||||
|
Uses cv2.resize with INTER_AREA (pixel-area averaging) which is
|
||||||
|
~25x faster than numpy reshape+mean for block downsampling.
|
||||||
|
"""
|
||||||
h, w = frame.shape[:2]
|
h, w = frame.shape[:2]
|
||||||
rows = h // cell_size
|
rows = h // cell_size
|
||||||
cols = w // cell_size
|
cols = w // cell_size
|
||||||
|
|
||||||
colors = np.zeros((rows, cols, 3), dtype=np.uint8)
|
# Crop to exact grid then block-average via cv2 area interpolation.
|
||||||
luminances = np.zeros((rows, cols), dtype=np.float32)
|
cropped = frame[:rows * cell_size, :cols * cell_size]
|
||||||
|
colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
for r in range(rows):
|
luminances = ((0.299 * colors[:, :, 0] +
|
||||||
for c in range(cols):
|
0.587 * colors[:, :, 1] +
|
||||||
y1, y2 = r * cell_size, (r + 1) * cell_size
|
0.114 * colors[:, :, 2]) / 255.0).astype(np.float32)
|
||||||
x1, x2 = c * cell_size, (c + 1) * cell_size
|
|
||||||
cell = frame[y1:y2, x1:x2]
|
|
||||||
avg_color = np.mean(cell, axis=(0, 1))
|
|
||||||
colors[r, c] = avg_color.astype(np.uint8)
|
|
||||||
luminances[r, c] = (0.299 * avg_color[0] + 0.587 * avg_color[1] + 0.114 * avg_color[2]) / 255
|
|
||||||
|
|
||||||
return colors, luminances
|
return colors, luminances
|
||||||
|
|
||||||
@@ -303,9 +303,35 @@ def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params):
|
|||||||
cell_env.set(cell_effect.params[1], zone)
|
cell_env.set(cell_effect.params[1], zone)
|
||||||
|
|
||||||
result = interp.eval(cell_effect.body, cell_env)
|
result = interp.eval(cell_effect.body, cell_env)
|
||||||
|
elif isinstance(cell_effect, list):
|
||||||
|
# Raw S-expression lambda like (lambda [cell zone] body) or (fn [cell zone] body)
|
||||||
|
# Check if it's a lambda expression
|
||||||
|
head = cell_effect[0] if cell_effect else None
|
||||||
|
head_name = head.name if head and hasattr(head, 'name') else str(head) if head else None
|
||||||
|
is_lambda = head_name in ('lambda', 'fn')
|
||||||
|
|
||||||
|
if is_lambda:
|
||||||
|
# (lambda [params...] body)
|
||||||
|
params = cell_effect[1] if len(cell_effect) > 1 else []
|
||||||
|
body = cell_effect[2] if len(cell_effect) > 2 else None
|
||||||
|
|
||||||
|
# Bind lambda parameters
|
||||||
|
if isinstance(params, list) and len(params) >= 1:
|
||||||
|
param_name = params[0].name if hasattr(params[0], 'name') else str(params[0])
|
||||||
|
cell_env.set(param_name, cell_img)
|
||||||
|
if isinstance(params, list) and len(params) >= 2:
|
||||||
|
param_name = params[1].name if hasattr(params[1], 'name') else str(params[1])
|
||||||
|
cell_env.set(param_name, zone)
|
||||||
|
|
||||||
|
result = interp.eval(body, cell_env) if body else cell_img
|
||||||
else:
|
else:
|
||||||
# Fallback: it might be a callable
|
# Some other expression - just evaluate it
|
||||||
|
result = interp.eval(cell_effect, cell_env)
|
||||||
|
elif callable(cell_effect):
|
||||||
|
# It's a callable
|
||||||
result = cell_effect(cell_img, zone)
|
result = cell_effect(cell_img, zone)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"cell_effect must be a Lambda, list, or callable, got {type(cell_effect)}")
|
||||||
|
|
||||||
if isinstance(result, np.ndarray) and result.shape == cell_img.shape:
|
if isinstance(result, np.ndarray) and result.shape == cell_img.shape:
|
||||||
return result
|
return result
|
||||||
@@ -317,6 +343,46 @@ def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params):
|
|||||||
raise ValueError(f"cell_effect must return an image array, got {type(result)}")
|
raise ValueError(f"cell_effect must return an image array, got {type(result)}")
|
||||||
|
|
||||||
|
|
||||||
|
def _get_legacy_ascii_primitives():
|
||||||
|
"""Import ASCII primitives from legacy primitives module.
|
||||||
|
|
||||||
|
These are loaded lazily to avoid import issues during module loading.
|
||||||
|
By the time a primitive library is loaded, sexp_effects.primitives
|
||||||
|
is already in sys.modules (imported by sexp_effects.__init__).
|
||||||
|
"""
|
||||||
|
from sexp_effects.primitives import (
|
||||||
|
prim_cell_sample,
|
||||||
|
prim_luminance_to_chars,
|
||||||
|
prim_render_char_grid,
|
||||||
|
prim_render_char_grid_fx,
|
||||||
|
prim_alphabet_char,
|
||||||
|
prim_alphabet_length,
|
||||||
|
prim_map_char_grid,
|
||||||
|
prim_map_colors,
|
||||||
|
prim_make_char_grid,
|
||||||
|
prim_set_char,
|
||||||
|
prim_get_char,
|
||||||
|
prim_char_grid_dimensions,
|
||||||
|
cell_sample_extended,
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
'cell-sample': prim_cell_sample,
|
||||||
|
'cell-sample-extended': cell_sample_extended,
|
||||||
|
'luminance-to-chars': prim_luminance_to_chars,
|
||||||
|
'render-char-grid': prim_render_char_grid,
|
||||||
|
'render-char-grid-fx': prim_render_char_grid_fx,
|
||||||
|
'alphabet-char': prim_alphabet_char,
|
||||||
|
'alphabet-length': prim_alphabet_length,
|
||||||
|
'map-char-grid': prim_map_char_grid,
|
||||||
|
'map-colors': prim_map_colors,
|
||||||
|
'make-char-grid': prim_make_char_grid,
|
||||||
|
'set-char': prim_set_char,
|
||||||
|
'get-char': prim_get_char,
|
||||||
|
'char-grid-dimensions': prim_char_grid_dimensions,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
PRIMITIVES = {
|
PRIMITIVES = {
|
||||||
'ascii-fx-zone': prim_ascii_fx_zone,
|
'ascii-fx-zone': prim_ascii_fx_zone,
|
||||||
|
**_get_legacy_ascii_primitives(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,6 +39,32 @@ def prim_mod(a, b):
|
|||||||
return a % b
|
return a % b
|
||||||
|
|
||||||
|
|
||||||
|
def prim_abs(x):
|
||||||
|
return abs(x)
|
||||||
|
|
||||||
|
|
||||||
|
def prim_min(*args):
|
||||||
|
return min(args)
|
||||||
|
|
||||||
|
|
||||||
|
def prim_max(*args):
|
||||||
|
return max(args)
|
||||||
|
|
||||||
|
|
||||||
|
def prim_round(x):
|
||||||
|
return round(x)
|
||||||
|
|
||||||
|
|
||||||
|
def prim_floor(x):
|
||||||
|
import math
|
||||||
|
return math.floor(x)
|
||||||
|
|
||||||
|
|
||||||
|
def prim_ceil(x):
|
||||||
|
import math
|
||||||
|
return math.ceil(x)
|
||||||
|
|
||||||
|
|
||||||
# Comparison
|
# Comparison
|
||||||
def prim_lt(a, b):
|
def prim_lt(a, b):
|
||||||
return a < b
|
return a < b
|
||||||
@@ -98,6 +124,17 @@ def prim_get(obj, key, default=None):
|
|||||||
return default
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def prim_nth(seq, i):
|
||||||
|
i = int(i)
|
||||||
|
if 0 <= i < len(seq):
|
||||||
|
return seq[i]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def prim_first(seq):
|
||||||
|
return seq[0] if seq else None
|
||||||
|
|
||||||
|
|
||||||
def prim_length(seq):
|
def prim_length(seq):
|
||||||
return len(seq)
|
return len(seq)
|
||||||
|
|
||||||
@@ -127,6 +164,31 @@ def prim_is_nil(x):
|
|||||||
return x is None
|
return x is None
|
||||||
|
|
||||||
|
|
||||||
|
# Higher-order / iteration
|
||||||
|
def prim_reduce(seq, init, fn):
|
||||||
|
"""(reduce seq init fn) — fold left: fn(fn(fn(init, s0), s1), s2) ..."""
|
||||||
|
acc = init
|
||||||
|
for item in seq:
|
||||||
|
acc = fn(acc, item)
|
||||||
|
return acc
|
||||||
|
|
||||||
|
|
||||||
|
def prim_map(seq, fn):
|
||||||
|
"""(map seq fn) — apply fn to each element, return new list."""
|
||||||
|
return [fn(item) for item in seq]
|
||||||
|
|
||||||
|
|
||||||
|
def prim_range(*args):
|
||||||
|
"""(range end), (range start end), or (range start end step) — integer range."""
|
||||||
|
if len(args) == 1:
|
||||||
|
return list(range(int(args[0])))
|
||||||
|
elif len(args) == 2:
|
||||||
|
return list(range(int(args[0]), int(args[1])))
|
||||||
|
elif len(args) >= 3:
|
||||||
|
return list(range(int(args[0]), int(args[1]), int(args[2])))
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
# Core primitives dict
|
# Core primitives dict
|
||||||
PRIMITIVES = {
|
PRIMITIVES = {
|
||||||
# Arithmetic
|
# Arithmetic
|
||||||
@@ -135,6 +197,12 @@ PRIMITIVES = {
|
|||||||
'*': prim_mul,
|
'*': prim_mul,
|
||||||
'/': prim_div,
|
'/': prim_div,
|
||||||
'mod': prim_mod,
|
'mod': prim_mod,
|
||||||
|
'abs': prim_abs,
|
||||||
|
'min': prim_min,
|
||||||
|
'max': prim_max,
|
||||||
|
'round': prim_round,
|
||||||
|
'floor': prim_floor,
|
||||||
|
'ceil': prim_ceil,
|
||||||
|
|
||||||
# Comparison
|
# Comparison
|
||||||
'<': prim_lt,
|
'<': prim_lt,
|
||||||
@@ -151,6 +219,8 @@ PRIMITIVES = {
|
|||||||
|
|
||||||
# Data access
|
# Data access
|
||||||
'get': prim_get,
|
'get': prim_get,
|
||||||
|
'nth': prim_nth,
|
||||||
|
'first': prim_first,
|
||||||
'length': prim_length,
|
'length': prim_length,
|
||||||
'len': prim_length,
|
'len': prim_length,
|
||||||
'list': prim_list,
|
'list': prim_list,
|
||||||
@@ -161,4 +231,10 @@ PRIMITIVES = {
|
|||||||
'list?': prim_is_list,
|
'list?': prim_is_list,
|
||||||
'dict?': prim_is_dict,
|
'dict?': prim_is_dict,
|
||||||
'nil?': prim_is_nil,
|
'nil?': prim_is_nil,
|
||||||
|
|
||||||
|
# Higher-order / iteration
|
||||||
|
'reduce': prim_reduce,
|
||||||
|
'fold': prim_reduce,
|
||||||
|
'map': prim_map,
|
||||||
|
'range': prim_range,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -100,6 +100,24 @@ def prim_affine(img, src_pts, dst_pts):
|
|||||||
return cv2.warpAffine(img, M, (w, h))
|
return cv2.warpAffine(img, M, (w, h))
|
||||||
|
|
||||||
|
|
||||||
|
def _get_legacy_geometry_primitives():
|
||||||
|
"""Import geometry primitives from legacy primitives module."""
|
||||||
|
from sexp_effects.primitives import (
|
||||||
|
prim_coords_x,
|
||||||
|
prim_coords_y,
|
||||||
|
prim_ripple_displace,
|
||||||
|
prim_fisheye_displace,
|
||||||
|
prim_kaleidoscope_displace,
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
'coords-x': prim_coords_x,
|
||||||
|
'coords-y': prim_coords_y,
|
||||||
|
'ripple-displace': prim_ripple_displace,
|
||||||
|
'fisheye-displace': prim_fisheye_displace,
|
||||||
|
'kaleidoscope-displace': prim_kaleidoscope_displace,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
PRIMITIVES = {
|
PRIMITIVES = {
|
||||||
# Basic transforms
|
# Basic transforms
|
||||||
'translate': prim_translate,
|
'translate': prim_translate,
|
||||||
@@ -119,4 +137,7 @@ PRIMITIVES = {
|
|||||||
# Advanced transforms
|
# Advanced transforms
|
||||||
'perspective': prim_perspective,
|
'perspective': prim_perspective,
|
||||||
'affine': prim_affine,
|
'affine': prim_affine,
|
||||||
|
|
||||||
|
# Displace / coordinate ops (from legacy primitives)
|
||||||
|
**_get_legacy_geometry_primitives(),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1444,42 +1444,80 @@ CHAR_ALPHABETS = {
|
|||||||
"digits": " 0123456789",
|
"digits": " 0123456789",
|
||||||
}
|
}
|
||||||
|
|
||||||
# Global atlas cache
|
# Global atlas cache: keyed on (frozenset(chars), cell_size) ->
|
||||||
|
# (atlas_array, char_to_idx) where atlas_array is (N, cell_size, cell_size) uint8.
|
||||||
_char_atlas_cache = {}
|
_char_atlas_cache = {}
|
||||||
|
_CHAR_ATLAS_CACHE_MAX = 32
|
||||||
|
|
||||||
|
|
||||||
def _get_char_atlas(alphabet: str, cell_size: int) -> dict:
|
def _get_char_atlas(alphabet: str, cell_size: int) -> dict:
|
||||||
"""Get or create character atlas for alphabet."""
|
"""Get or create character atlas for alphabet (legacy dict version)."""
|
||||||
cache_key = f"{alphabet}_{cell_size}"
|
atlas_arr, char_to_idx = _get_render_atlas(alphabet, cell_size)
|
||||||
if cache_key in _char_atlas_cache:
|
# Build legacy dict from array
|
||||||
return _char_atlas_cache[cache_key]
|
idx_to_char = {v: k for k, v in char_to_idx.items()}
|
||||||
|
return {idx_to_char[i]: atlas_arr[i] for i in range(len(atlas_arr))}
|
||||||
|
|
||||||
|
|
||||||
|
def _get_render_atlas(unique_chars_or_alphabet, cell_size: int):
|
||||||
|
"""Get or build a stacked numpy atlas for vectorised rendering.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
unique_chars_or_alphabet: Either an alphabet name (str looked up in
|
||||||
|
CHAR_ALPHABETS), a literal character string, or a set/frozenset
|
||||||
|
of characters.
|
||||||
|
cell_size: Pixel size of each cell.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(atlas_array, char_to_idx) where
|
||||||
|
atlas_array: (num_chars, cell_size, cell_size) uint8 masks
|
||||||
|
char_to_idx: dict mapping character -> index in atlas_array
|
||||||
|
"""
|
||||||
|
if isinstance(unique_chars_or_alphabet, (set, frozenset)):
|
||||||
|
chars_tuple = tuple(sorted(unique_chars_or_alphabet))
|
||||||
|
else:
|
||||||
|
resolved = CHAR_ALPHABETS.get(unique_chars_or_alphabet, unique_chars_or_alphabet)
|
||||||
|
chars_tuple = tuple(resolved)
|
||||||
|
|
||||||
|
cache_key = (chars_tuple, cell_size)
|
||||||
|
cached = _char_atlas_cache.get(cache_key)
|
||||||
|
if cached is not None:
|
||||||
|
return cached
|
||||||
|
|
||||||
chars = CHAR_ALPHABETS.get(alphabet, alphabet) # Use as literal if not found
|
|
||||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||||
font_scale = cell_size / 20.0
|
font_scale = cell_size / 20.0
|
||||||
thickness = max(1, int(cell_size / 10))
|
thickness = max(1, int(cell_size / 10))
|
||||||
|
|
||||||
atlas = {}
|
n = len(chars_tuple)
|
||||||
for char in chars:
|
atlas = np.zeros((n, cell_size, cell_size), dtype=np.uint8)
|
||||||
char_img = np.zeros((cell_size, cell_size), dtype=np.uint8)
|
char_to_idx = {}
|
||||||
if char != ' ':
|
|
||||||
|
for i, char in enumerate(chars_tuple):
|
||||||
|
char_to_idx[char] = i
|
||||||
|
if char and char != ' ':
|
||||||
try:
|
try:
|
||||||
(text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness)
|
(text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness)
|
||||||
text_x = max(0, (cell_size - text_w) // 2)
|
text_x = max(0, (cell_size - text_w) // 2)
|
||||||
text_y = (cell_size + text_h) // 2
|
text_y = (cell_size + text_h) // 2
|
||||||
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
|
cv2.putText(atlas[i], char, (text_x, text_y),
|
||||||
except:
|
font, font_scale, 255, thickness, cv2.LINE_AA)
|
||||||
|
except Exception:
|
||||||
pass
|
pass
|
||||||
atlas[char] = char_img
|
|
||||||
|
|
||||||
_char_atlas_cache[cache_key] = atlas
|
# Evict oldest entry if cache is full
|
||||||
return atlas
|
if len(_char_atlas_cache) >= _CHAR_ATLAS_CACHE_MAX:
|
||||||
|
_char_atlas_cache.pop(next(iter(_char_atlas_cache)))
|
||||||
|
|
||||||
|
_char_atlas_cache[cache_key] = (atlas, char_to_idx)
|
||||||
|
return atlas, char_to_idx
|
||||||
|
|
||||||
|
|
||||||
def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]:
|
def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]:
|
||||||
"""
|
"""
|
||||||
Sample image into cell grid, returning average colors and luminances.
|
Sample image into cell grid, returning average colors and luminances.
|
||||||
|
|
||||||
|
Uses cv2.resize with INTER_AREA (pixel-area averaging) which is
|
||||||
|
~25x faster than numpy reshape+mean for block downsampling.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
img: source image
|
img: source image
|
||||||
cell_size: size of each cell in pixels
|
cell_size: size of each cell in pixels
|
||||||
@@ -1497,13 +1535,10 @@ def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.nd
|
|||||||
return (np.zeros((1, 1, 3), dtype=np.uint8),
|
return (np.zeros((1, 1, 3), dtype=np.uint8),
|
||||||
np.zeros((1, 1), dtype=np.float32))
|
np.zeros((1, 1), dtype=np.float32))
|
||||||
|
|
||||||
# Crop to grid
|
# Crop to exact grid then block-average via cv2 area interpolation.
|
||||||
grid_h, grid_w = rows * cell_size, cols * cell_size
|
grid_h, grid_w = rows * cell_size, cols * cell_size
|
||||||
cropped = img[:grid_h, :grid_w]
|
cropped = img[:grid_h, :grid_w]
|
||||||
|
colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
|
||||||
# Reshape and average
|
|
||||||
reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3)
|
|
||||||
colors = reshaped.mean(axis=(1, 3)).astype(np.uint8)
|
|
||||||
|
|
||||||
# Compute luminance
|
# Compute luminance
|
||||||
luminances = (0.299 * colors[:, :, 0] +
|
luminances = (0.299 * colors[:, :, 0] +
|
||||||
@@ -1628,16 +1663,11 @@ def prim_luminance_to_chars(luminances: np.ndarray, alphabet: str, contrast: flo
|
|||||||
indices = ((lum / 255) * (num_chars - 1)).astype(np.int32)
|
indices = ((lum / 255) * (num_chars - 1)).astype(np.int32)
|
||||||
indices = np.clip(indices, 0, num_chars - 1)
|
indices = np.clip(indices, 0, num_chars - 1)
|
||||||
|
|
||||||
# Convert to character array
|
# Vectorised conversion via numpy char array lookup
|
||||||
rows, cols = indices.shape
|
chars_arr = np.array(list(chars))
|
||||||
result = []
|
char_grid = chars_arr[indices.ravel()].reshape(indices.shape)
|
||||||
for r in range(rows):
|
|
||||||
row = []
|
|
||||||
for c in range(cols):
|
|
||||||
row.append(chars[indices[r, c]])
|
|
||||||
result.append(row)
|
|
||||||
|
|
||||||
return result
|
return char_grid.tolist()
|
||||||
|
|
||||||
|
|
||||||
def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray,
|
def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray,
|
||||||
@@ -1647,6 +1677,10 @@ def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.nd
|
|||||||
"""
|
"""
|
||||||
Render a grid of characters onto an image.
|
Render a grid of characters onto an image.
|
||||||
|
|
||||||
|
Uses vectorised numpy operations instead of per-cell Python loops:
|
||||||
|
the character atlas is looked up via fancy indexing and the full
|
||||||
|
mask + colour image are assembled in bulk.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
img: source image (for dimensions)
|
img: source image (for dimensions)
|
||||||
chars: 2D list of single characters
|
chars: 2D list of single characters
|
||||||
@@ -1664,12 +1698,11 @@ def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.nd
|
|||||||
|
|
||||||
# Parse background_color
|
# Parse background_color
|
||||||
if isinstance(background_color, (list, tuple)):
|
if isinstance(background_color, (list, tuple)):
|
||||||
# Legacy: accept RGB list
|
|
||||||
bg_color = tuple(int(c) for c in background_color[:3])
|
bg_color = tuple(int(c) for c in background_color[:3])
|
||||||
else:
|
else:
|
||||||
bg_color = parse_color(background_color)
|
bg_color = parse_color(background_color)
|
||||||
if bg_color is None:
|
if bg_color is None:
|
||||||
bg_color = (0, 0, 0) # Default to black
|
bg_color = (0, 0, 0)
|
||||||
|
|
||||||
# Handle invert_colors - swap fg and bg
|
# Handle invert_colors - swap fg and bg
|
||||||
if invert_colors and fg_color is not None:
|
if invert_colors and fg_color is not None:
|
||||||
@@ -1686,58 +1719,66 @@ def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.nd
|
|||||||
|
|
||||||
bg = list(bg_color)
|
bg = list(bg_color)
|
||||||
|
|
||||||
result = np.full((h, w, 3), bg, dtype=np.uint8)
|
# --- Build atlas & index grid ---
|
||||||
|
|
||||||
# Collect all unique characters to build minimal atlas
|
|
||||||
unique_chars = set()
|
unique_chars = set()
|
||||||
for row in chars:
|
for row in chars:
|
||||||
for ch in row:
|
for ch in row:
|
||||||
unique_chars.add(ch)
|
unique_chars.add(ch)
|
||||||
|
|
||||||
# Build atlas for unique chars
|
atlas, char_to_idx = _get_render_atlas(unique_chars, cell_size)
|
||||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
|
||||||
font_scale = cell_size / 20.0
|
|
||||||
thickness = max(1, int(cell_size / 10))
|
|
||||||
|
|
||||||
atlas = {}
|
# Convert 2D char list to index array using ordinal lookup table
|
||||||
for char in unique_chars:
|
# (avoids per-cell Python dict lookup).
|
||||||
char_img = np.zeros((cell_size, cell_size), dtype=np.uint8)
|
space_idx = char_to_idx.get(' ', 0)
|
||||||
if char and char != ' ':
|
max_ord = max(ord(ch) for ch in char_to_idx) + 1
|
||||||
try:
|
ord_lookup = np.full(max_ord, space_idx, dtype=np.int32)
|
||||||
(text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness)
|
for ch, idx in char_to_idx.items():
|
||||||
text_x = max(0, (cell_size - text_w) // 2)
|
if ch:
|
||||||
text_y = (cell_size + text_h) // 2
|
ord_lookup[ord(ch)] = idx
|
||||||
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
atlas[char] = char_img
|
|
||||||
|
|
||||||
# Render characters
|
flat = [ch for row in chars for ch in row]
|
||||||
for r in range(rows):
|
ords = np.frombuffer(np.array(flat, dtype='U1'), dtype=np.uint32)
|
||||||
for c in range(cols):
|
char_indices = ord_lookup[ords].reshape(rows, cols)
|
||||||
char = chars[r][c]
|
|
||||||
if not char or char == ' ':
|
|
||||||
continue
|
|
||||||
|
|
||||||
y1, x1 = r * cell_size, c * cell_size
|
# --- Vectorised mask assembly ---
|
||||||
char_mask = atlas.get(char)
|
# atlas[char_indices] -> (rows, cols, cell_size, cell_size)
|
||||||
|
# Transpose to (rows, cell_size, cols, cell_size) then reshape to full image.
|
||||||
|
all_masks = atlas[char_indices]
|
||||||
|
full_mask = all_masks.transpose(0, 2, 1, 3).reshape(h, w)
|
||||||
|
|
||||||
if char_mask is None:
|
# Expand per-cell colours to per-pixel (only when needed).
|
||||||
continue
|
need_color_full = (color_mode in ("color", "invert")
|
||||||
|
or (fg_color is None and color_mode != "mono"))
|
||||||
|
|
||||||
if fg_color is not None:
|
if need_color_full:
|
||||||
# Use fixed color (named color or hex value)
|
color_full = np.repeat(
|
||||||
color = np.array(fg_color, dtype=np.uint8)
|
np.repeat(colors[:rows, :cols], cell_size, axis=0),
|
||||||
|
cell_size, axis=1)
|
||||||
|
|
||||||
|
# --- Vectorised colour composite ---
|
||||||
|
# Use element-wise multiply/np.where instead of boolean-indexed scatter
|
||||||
|
# for much better memory access patterns.
|
||||||
|
mask_u8 = (full_mask > 0).astype(np.uint8)[:, :, np.newaxis]
|
||||||
|
|
||||||
|
if color_mode == "invert":
|
||||||
|
# Background is source colour; characters are black.
|
||||||
|
# result = color_full * (1 - mask)
|
||||||
|
result = color_full * (1 - mask_u8)
|
||||||
|
elif fg_color is not None:
|
||||||
|
# Fixed foreground colour on background.
|
||||||
|
fg = np.array(fg_color, dtype=np.uint8)
|
||||||
|
bg_arr = np.array(bg, dtype=np.uint8)
|
||||||
|
result = np.where(mask_u8, fg, bg_arr).astype(np.uint8)
|
||||||
elif color_mode == "mono":
|
elif color_mode == "mono":
|
||||||
color = np.array([255, 255, 255], dtype=np.uint8)
|
bg_arr = np.array(bg, dtype=np.uint8)
|
||||||
elif color_mode == "invert":
|
result = np.where(mask_u8, np.uint8(255), bg_arr).astype(np.uint8)
|
||||||
result[y1:y1+cell_size, x1:x1+cell_size] = colors[r, c]
|
else:
|
||||||
color = np.array([0, 0, 0], dtype=np.uint8)
|
# "color" mode – each cell uses its source colour on bg.
|
||||||
else: # color
|
if bg == [0, 0, 0]:
|
||||||
color = colors[r, c]
|
result = color_full * mask_u8
|
||||||
|
else:
|
||||||
mask = char_mask > 0
|
bg_arr = np.array(bg, dtype=np.uint8)
|
||||||
result[y1:y1+cell_size, x1:x1+cell_size][mask] = color
|
result = np.where(mask_u8, color_full, bg_arr).astype(np.uint8)
|
||||||
|
|
||||||
# Resize to match original if needed
|
# Resize to match original if needed
|
||||||
orig_h, orig_w = img.shape[:2]
|
orig_h, orig_w = img.shape[:2]
|
||||||
|
|||||||
44
streaming/__init__.py
Normal file
44
streaming/__init__.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
"""
|
||||||
|
Streaming video compositor for real-time effect processing.
|
||||||
|
|
||||||
|
This module provides a frame-by-frame streaming architecture that:
|
||||||
|
- Reads from multiple video sources with automatic looping
|
||||||
|
- Applies effects inline (no intermediate files)
|
||||||
|
- Composites layers with time-varying weights
|
||||||
|
- Outputs to display, file, or stream
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
from streaming import StreamingCompositor, VideoSource, AudioAnalyzer
|
||||||
|
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=["video1.mp4", "video2.mp4"],
|
||||||
|
effects_per_source=[...],
|
||||||
|
compositor_config={...},
|
||||||
|
)
|
||||||
|
|
||||||
|
# With live audio
|
||||||
|
audio = AudioAnalyzer(device=0)
|
||||||
|
compositor.run(output="output.mp4", duration=60, audio=audio)
|
||||||
|
|
||||||
|
# With preview window
|
||||||
|
compositor.run(output="preview", duration=60)
|
||||||
|
|
||||||
|
Backends:
|
||||||
|
- numpy: Works everywhere, ~3-5 fps (default)
|
||||||
|
- glsl: Requires GPU, 30+ fps real-time (future)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .sources import VideoSource, ImageSource
|
||||||
|
from .compositor import StreamingCompositor
|
||||||
|
from .backends import NumpyBackend, get_backend
|
||||||
|
from .output import DisplayOutput, FileOutput
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"StreamingCompositor",
|
||||||
|
"VideoSource",
|
||||||
|
"ImageSource",
|
||||||
|
"NumpyBackend",
|
||||||
|
"get_backend",
|
||||||
|
"DisplayOutput",
|
||||||
|
"FileOutput",
|
||||||
|
]
|
||||||
486
streaming/audio.py
Normal file
486
streaming/audio.py
Normal file
@@ -0,0 +1,486 @@
|
|||||||
|
"""
|
||||||
|
Live audio analysis for reactive effects.
|
||||||
|
|
||||||
|
Provides real-time audio features:
|
||||||
|
- Energy (RMS amplitude)
|
||||||
|
- Beat detection
|
||||||
|
- Frequency bands (bass, mid, high)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from typing import Optional
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class AudioAnalyzer:
|
||||||
|
"""
|
||||||
|
Real-time audio analyzer using sounddevice.
|
||||||
|
|
||||||
|
Captures audio from microphone/line-in and computes
|
||||||
|
features in real-time for effect parameter bindings.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
analyzer = AudioAnalyzer(device=0)
|
||||||
|
analyzer.start()
|
||||||
|
|
||||||
|
# In compositor loop:
|
||||||
|
energy = analyzer.get_energy()
|
||||||
|
beat = analyzer.get_beat()
|
||||||
|
|
||||||
|
analyzer.stop()
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
device: int = None,
|
||||||
|
sample_rate: int = 44100,
|
||||||
|
block_size: int = 1024,
|
||||||
|
buffer_seconds: float = 0.5,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize audio analyzer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
device: Audio input device index (None = default)
|
||||||
|
sample_rate: Audio sample rate
|
||||||
|
block_size: Samples per block
|
||||||
|
buffer_seconds: Ring buffer duration
|
||||||
|
"""
|
||||||
|
self.sample_rate = sample_rate
|
||||||
|
self.block_size = block_size
|
||||||
|
self.device = device
|
||||||
|
|
||||||
|
# Ring buffer for recent audio
|
||||||
|
buffer_size = int(sample_rate * buffer_seconds)
|
||||||
|
self._buffer = np.zeros(buffer_size, dtype=np.float32)
|
||||||
|
self._buffer_pos = 0
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
|
# Beat detection state
|
||||||
|
self._last_energy = 0
|
||||||
|
self._energy_history = []
|
||||||
|
self._last_beat_time = 0
|
||||||
|
self._beat_threshold = 1.5 # Energy ratio for beat detection
|
||||||
|
self._min_beat_interval = 0.1 # Min seconds between beats
|
||||||
|
|
||||||
|
# Stream state
|
||||||
|
self._stream = None
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def _audio_callback(self, indata, frames, time_info, status):
|
||||||
|
"""Called by sounddevice for each audio block."""
|
||||||
|
with self._lock:
|
||||||
|
# Add to ring buffer
|
||||||
|
data = indata[:, 0] if len(indata.shape) > 1 else indata
|
||||||
|
n = len(data)
|
||||||
|
if self._buffer_pos + n <= len(self._buffer):
|
||||||
|
self._buffer[self._buffer_pos:self._buffer_pos + n] = data
|
||||||
|
else:
|
||||||
|
# Wrap around
|
||||||
|
first = len(self._buffer) - self._buffer_pos
|
||||||
|
self._buffer[self._buffer_pos:] = data[:first]
|
||||||
|
self._buffer[:n - first] = data[first:]
|
||||||
|
self._buffer_pos = (self._buffer_pos + n) % len(self._buffer)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
"""Start audio capture."""
|
||||||
|
try:
|
||||||
|
import sounddevice as sd
|
||||||
|
except ImportError:
|
||||||
|
print("Warning: sounddevice not installed. Audio analysis disabled.")
|
||||||
|
print("Install with: pip install sounddevice")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._stream = sd.InputStream(
|
||||||
|
device=self.device,
|
||||||
|
channels=1,
|
||||||
|
samplerate=self.sample_rate,
|
||||||
|
blocksize=self.block_size,
|
||||||
|
callback=self._audio_callback,
|
||||||
|
)
|
||||||
|
self._stream.start()
|
||||||
|
self._running = True
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""Stop audio capture."""
|
||||||
|
if self._stream:
|
||||||
|
self._stream.stop()
|
||||||
|
self._stream.close()
|
||||||
|
self._stream = None
|
||||||
|
self._running = False
|
||||||
|
|
||||||
|
def get_energy(self) -> float:
|
||||||
|
"""
|
||||||
|
Get current audio energy (RMS amplitude).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Energy value normalized to 0-1 range (approximately)
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
# Use recent samples
|
||||||
|
recent = 2048
|
||||||
|
if self._buffer_pos >= recent:
|
||||||
|
data = self._buffer[self._buffer_pos - recent:self._buffer_pos]
|
||||||
|
else:
|
||||||
|
data = np.concatenate([
|
||||||
|
self._buffer[-(recent - self._buffer_pos):],
|
||||||
|
self._buffer[:self._buffer_pos]
|
||||||
|
])
|
||||||
|
|
||||||
|
# RMS energy
|
||||||
|
rms = np.sqrt(np.mean(data ** 2))
|
||||||
|
|
||||||
|
# Normalize (typical mic input is quite low)
|
||||||
|
normalized = min(1.0, rms * 10)
|
||||||
|
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
def get_beat(self) -> bool:
|
||||||
|
"""
|
||||||
|
Detect if current moment is a beat.
|
||||||
|
|
||||||
|
Simple onset detection based on energy spikes.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if beat detected, False otherwise
|
||||||
|
"""
|
||||||
|
current_energy = self.get_energy()
|
||||||
|
now = time.time()
|
||||||
|
|
||||||
|
# Update energy history
|
||||||
|
self._energy_history.append(current_energy)
|
||||||
|
if len(self._energy_history) > 20:
|
||||||
|
self._energy_history.pop(0)
|
||||||
|
|
||||||
|
# Need enough history
|
||||||
|
if len(self._energy_history) < 5:
|
||||||
|
self._last_energy = current_energy
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Average recent energy
|
||||||
|
avg_energy = np.mean(self._energy_history[:-1])
|
||||||
|
|
||||||
|
# Beat if current energy is significantly above average
|
||||||
|
is_beat = (
|
||||||
|
current_energy > avg_energy * self._beat_threshold and
|
||||||
|
now - self._last_beat_time > self._min_beat_interval and
|
||||||
|
current_energy > self._last_energy # Rising edge
|
||||||
|
)
|
||||||
|
|
||||||
|
if is_beat:
|
||||||
|
self._last_beat_time = now
|
||||||
|
|
||||||
|
self._last_energy = current_energy
|
||||||
|
return is_beat
|
||||||
|
|
||||||
|
def get_spectrum(self, bands: int = 3) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Get frequency spectrum divided into bands.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
bands: Number of frequency bands (default 3: bass, mid, high)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Array of band energies, normalized to 0-1
|
||||||
|
"""
|
||||||
|
with self._lock:
|
||||||
|
# Use recent samples for FFT
|
||||||
|
n = 2048
|
||||||
|
if self._buffer_pos >= n:
|
||||||
|
data = self._buffer[self._buffer_pos - n:self._buffer_pos]
|
||||||
|
else:
|
||||||
|
data = np.concatenate([
|
||||||
|
self._buffer[-(n - self._buffer_pos):],
|
||||||
|
self._buffer[:self._buffer_pos]
|
||||||
|
])
|
||||||
|
|
||||||
|
# FFT
|
||||||
|
fft = np.abs(np.fft.rfft(data * np.hanning(len(data))))
|
||||||
|
|
||||||
|
# Divide into bands
|
||||||
|
band_size = len(fft) // bands
|
||||||
|
result = np.zeros(bands)
|
||||||
|
for i in range(bands):
|
||||||
|
start = i * band_size
|
||||||
|
end = start + band_size
|
||||||
|
result[i] = np.mean(fft[start:end])
|
||||||
|
|
||||||
|
# Normalize
|
||||||
|
max_val = np.max(result)
|
||||||
|
if max_val > 0:
|
||||||
|
result = result / max_val
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_running(self) -> bool:
|
||||||
|
return self._running
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.start()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
self.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class FileAudioAnalyzer:
|
||||||
|
"""
|
||||||
|
Audio analyzer that reads from a file (for testing/development).
|
||||||
|
|
||||||
|
Pre-computes analysis and plays back in sync with video.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, path: str, analysis_data: dict = None):
|
||||||
|
"""
|
||||||
|
Initialize from audio file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to audio file
|
||||||
|
analysis_data: Pre-computed analysis (times, values, etc.)
|
||||||
|
"""
|
||||||
|
self.path = path
|
||||||
|
self.analysis_data = analysis_data or {}
|
||||||
|
self._current_time = 0
|
||||||
|
|
||||||
|
def set_time(self, t: float):
|
||||||
|
"""Set current playback time."""
|
||||||
|
self._current_time = t
|
||||||
|
|
||||||
|
def get_energy(self) -> float:
|
||||||
|
"""Get energy at current time from pre-computed data."""
|
||||||
|
track = self.analysis_data.get("energy", {})
|
||||||
|
return self._interpolate(track, self._current_time)
|
||||||
|
|
||||||
|
def get_beat(self) -> bool:
|
||||||
|
"""Check if current time is near a beat."""
|
||||||
|
track = self.analysis_data.get("beats", {})
|
||||||
|
times = track.get("times", [])
|
||||||
|
|
||||||
|
# Check if we're within 50ms of a beat
|
||||||
|
for beat_time in times:
|
||||||
|
if abs(beat_time - self._current_time) < 0.05:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _interpolate(self, track: dict, t: float) -> float:
|
||||||
|
"""Interpolate value at time t."""
|
||||||
|
times = track.get("times", [])
|
||||||
|
values = track.get("values", [])
|
||||||
|
|
||||||
|
if not times or not values:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
if t <= times[0]:
|
||||||
|
return values[0]
|
||||||
|
if t >= times[-1]:
|
||||||
|
return values[-1]
|
||||||
|
|
||||||
|
# Find bracket and interpolate
|
||||||
|
for i in range(len(times) - 1):
|
||||||
|
if times[i] <= t <= times[i + 1]:
|
||||||
|
alpha = (t - times[i]) / (times[i + 1] - times[i])
|
||||||
|
return values[i] * (1 - alpha) + values[i + 1] * alpha
|
||||||
|
|
||||||
|
return values[-1]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_running(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class StreamingAudioAnalyzer:
|
||||||
|
"""
|
||||||
|
Real-time audio analyzer that streams from a file.
|
||||||
|
|
||||||
|
Reads audio in sync with video time and computes features on-the-fly.
|
||||||
|
No pre-computation needed - analysis happens as frames are processed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, path: str, sample_rate: int = 22050, hop_length: int = 512):
|
||||||
|
"""
|
||||||
|
Initialize streaming audio analyzer.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
path: Path to audio file
|
||||||
|
sample_rate: Sample rate for analysis
|
||||||
|
hop_length: Hop length for feature extraction
|
||||||
|
"""
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
|
||||||
|
self.path = path
|
||||||
|
self.sample_rate = sample_rate
|
||||||
|
self.hop_length = hop_length
|
||||||
|
self._current_time = 0.0
|
||||||
|
|
||||||
|
# Get audio duration
|
||||||
|
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
|
||||||
|
"-show_format", str(path)]
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
info = json.loads(result.stdout)
|
||||||
|
self.duration = float(info["format"]["duration"])
|
||||||
|
|
||||||
|
# Audio buffer and state
|
||||||
|
self._audio_data = None
|
||||||
|
self._energy_history = []
|
||||||
|
self._last_energy = 0
|
||||||
|
self._last_beat_time = -1
|
||||||
|
self._beat_threshold = 1.5
|
||||||
|
self._min_beat_interval = 0.15
|
||||||
|
|
||||||
|
# Load audio lazily
|
||||||
|
self._loaded = False
|
||||||
|
|
||||||
|
def _load_audio(self):
|
||||||
|
"""Load audio data on first use."""
|
||||||
|
if self._loaded:
|
||||||
|
return
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
# Use ffmpeg to decode audio to raw PCM
|
||||||
|
cmd = [
|
||||||
|
"ffmpeg", "-v", "quiet",
|
||||||
|
"-i", str(self.path),
|
||||||
|
"-f", "f32le", # 32-bit float, little-endian
|
||||||
|
"-ac", "1", # mono
|
||||||
|
"-ar", str(self.sample_rate),
|
||||||
|
"-"
|
||||||
|
]
|
||||||
|
result = subprocess.run(cmd, capture_output=True)
|
||||||
|
self._audio_data = np.frombuffer(result.stdout, dtype=np.float32)
|
||||||
|
self._loaded = True
|
||||||
|
|
||||||
|
def set_time(self, t: float):
|
||||||
|
"""Set current playback time."""
|
||||||
|
self._current_time = t
|
||||||
|
|
||||||
|
def get_energy(self) -> float:
|
||||||
|
"""Compute energy at current time."""
|
||||||
|
self._load_audio()
|
||||||
|
|
||||||
|
if self._audio_data is None or len(self._audio_data) == 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# Get sample index for current time
|
||||||
|
sample_idx = int(self._current_time * self.sample_rate)
|
||||||
|
window_size = self.hop_length * 2
|
||||||
|
|
||||||
|
start = max(0, sample_idx - window_size // 2)
|
||||||
|
end = min(len(self._audio_data), sample_idx + window_size // 2)
|
||||||
|
|
||||||
|
if start >= end:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# RMS energy
|
||||||
|
chunk = self._audio_data[start:end]
|
||||||
|
rms = np.sqrt(np.mean(chunk ** 2))
|
||||||
|
|
||||||
|
# Normalize to 0-1 range (approximate)
|
||||||
|
energy = min(1.0, rms * 3.0)
|
||||||
|
|
||||||
|
self._last_energy = energy
|
||||||
|
return energy
|
||||||
|
|
||||||
|
def get_beat(self) -> bool:
|
||||||
|
"""Detect beat using spectral flux (change in frequency content)."""
|
||||||
|
self._load_audio()
|
||||||
|
|
||||||
|
if self._audio_data is None or len(self._audio_data) == 0:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Get audio chunks for current and previous frame
|
||||||
|
sample_idx = int(self._current_time * self.sample_rate)
|
||||||
|
chunk_size = self.hop_length * 2
|
||||||
|
|
||||||
|
# Current chunk
|
||||||
|
start = max(0, sample_idx - chunk_size // 2)
|
||||||
|
end = min(len(self._audio_data), sample_idx + chunk_size // 2)
|
||||||
|
if end - start < chunk_size // 2:
|
||||||
|
return False
|
||||||
|
current_chunk = self._audio_data[start:end]
|
||||||
|
|
||||||
|
# Previous chunk (one hop back)
|
||||||
|
prev_start = max(0, start - self.hop_length)
|
||||||
|
prev_end = max(0, end - self.hop_length)
|
||||||
|
if prev_end <= prev_start:
|
||||||
|
return False
|
||||||
|
prev_chunk = self._audio_data[prev_start:prev_end]
|
||||||
|
|
||||||
|
# Compute spectra
|
||||||
|
current_spec = np.abs(np.fft.rfft(current_chunk * np.hanning(len(current_chunk))))
|
||||||
|
prev_spec = np.abs(np.fft.rfft(prev_chunk * np.hanning(len(prev_chunk))))
|
||||||
|
|
||||||
|
# Spectral flux: sum of positive differences (onset = new frequencies appearing)
|
||||||
|
min_len = min(len(current_spec), len(prev_spec))
|
||||||
|
diff = current_spec[:min_len] - prev_spec[:min_len]
|
||||||
|
flux = np.sum(np.maximum(0, diff)) # Only count increases
|
||||||
|
|
||||||
|
# Normalize by spectrum size
|
||||||
|
flux = flux / (min_len + 1)
|
||||||
|
|
||||||
|
# Update flux history
|
||||||
|
self._energy_history.append((self._current_time, flux))
|
||||||
|
while self._energy_history and self._energy_history[0][0] < self._current_time - 1.5:
|
||||||
|
self._energy_history.pop(0)
|
||||||
|
|
||||||
|
if len(self._energy_history) < 3:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Adaptive threshold based on recent flux values
|
||||||
|
flux_values = [f for t, f in self._energy_history]
|
||||||
|
mean_flux = np.mean(flux_values)
|
||||||
|
std_flux = np.std(flux_values) + 0.001 # Avoid division by zero
|
||||||
|
|
||||||
|
# Beat if flux is above mean (more sensitive threshold)
|
||||||
|
threshold = mean_flux + std_flux * 0.3 # Lower = more sensitive
|
||||||
|
min_interval = 0.1 # Allow up to 600 BPM
|
||||||
|
time_ok = self._current_time - self._last_beat_time > min_interval
|
||||||
|
|
||||||
|
is_beat = flux > threshold and time_ok
|
||||||
|
|
||||||
|
if is_beat:
|
||||||
|
self._last_beat_time = self._current_time
|
||||||
|
|
||||||
|
return is_beat
|
||||||
|
|
||||||
|
def get_spectrum(self, bands: int = 3) -> np.ndarray:
|
||||||
|
"""Get frequency spectrum at current time."""
|
||||||
|
self._load_audio()
|
||||||
|
|
||||||
|
if self._audio_data is None or len(self._audio_data) == 0:
|
||||||
|
return np.zeros(bands)
|
||||||
|
|
||||||
|
sample_idx = int(self._current_time * self.sample_rate)
|
||||||
|
n = 2048
|
||||||
|
|
||||||
|
start = max(0, sample_idx - n // 2)
|
||||||
|
end = min(len(self._audio_data), sample_idx + n // 2)
|
||||||
|
|
||||||
|
if end - start < n // 2:
|
||||||
|
return np.zeros(bands)
|
||||||
|
|
||||||
|
chunk = self._audio_data[start:end]
|
||||||
|
|
||||||
|
# FFT
|
||||||
|
fft = np.abs(np.fft.rfft(chunk * np.hanning(len(chunk))))
|
||||||
|
|
||||||
|
# Divide into bands
|
||||||
|
band_size = len(fft) // bands
|
||||||
|
result = np.zeros(bands)
|
||||||
|
for i in range(bands):
|
||||||
|
s, e = i * band_size, (i + 1) * band_size
|
||||||
|
result[i] = np.mean(fft[s:e])
|
||||||
|
|
||||||
|
# Normalize
|
||||||
|
max_val = np.max(result)
|
||||||
|
if max_val > 0:
|
||||||
|
result = result / max_val
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_running(self) -> bool:
|
||||||
|
return True
|
||||||
308
streaming/backends.py
Normal file
308
streaming/backends.py
Normal file
@@ -0,0 +1,308 @@
|
|||||||
|
"""
|
||||||
|
Effect processing backends.
|
||||||
|
|
||||||
|
Provides abstraction over different rendering backends:
|
||||||
|
- numpy: CPU-based, works everywhere, ~3-5 fps
|
||||||
|
- glsl: GPU-based, requires OpenGL, 30+ fps (future)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Dict, Any, Optional
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
class Backend(ABC):
|
||||||
|
"""Abstract base class for effect processing backends."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def process_frame(
|
||||||
|
self,
|
||||||
|
frames: List[np.ndarray],
|
||||||
|
effects_per_frame: List[List[Dict]],
|
||||||
|
compositor_config: Dict,
|
||||||
|
t: float,
|
||||||
|
analysis_data: Dict,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Process multiple input frames through effects and composite.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
frames: List of input frames (one per source)
|
||||||
|
effects_per_frame: List of effect chains (one per source)
|
||||||
|
compositor_config: How to blend the layers
|
||||||
|
t: Current time in seconds
|
||||||
|
analysis_data: Analysis data for binding resolution
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Composited output frame
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def load_effect(self, effect_path: Path) -> Any:
|
||||||
|
"""Load an effect definition."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class NumpyBackend(Backend):
|
||||||
|
"""
|
||||||
|
CPU-based effect processing using NumPy.
|
||||||
|
|
||||||
|
Uses existing sexp_effects interpreter for effect execution.
|
||||||
|
Works on any system, but limited to ~3-5 fps for complex effects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, recipe_dir: Path = None, minimal_primitives: bool = True):
|
||||||
|
self.recipe_dir = recipe_dir or Path(".")
|
||||||
|
self.minimal_primitives = minimal_primitives
|
||||||
|
self._interpreter = None
|
||||||
|
self._loaded_effects = {}
|
||||||
|
|
||||||
|
def _get_interpreter(self):
|
||||||
|
"""Lazy-load the sexp interpreter."""
|
||||||
|
if self._interpreter is None:
|
||||||
|
from sexp_effects import get_interpreter
|
||||||
|
self._interpreter = get_interpreter(minimal_primitives=self.minimal_primitives)
|
||||||
|
return self._interpreter
|
||||||
|
|
||||||
|
def load_effect(self, effect_path: Path) -> Any:
|
||||||
|
"""Load an effect from sexp file."""
|
||||||
|
effect_key = str(effect_path)
|
||||||
|
if effect_key not in self._loaded_effects:
|
||||||
|
interp = self._get_interpreter()
|
||||||
|
interp.load_effect(str(effect_path))
|
||||||
|
self._loaded_effects[effect_key] = effect_path.stem
|
||||||
|
return self._loaded_effects[effect_key]
|
||||||
|
|
||||||
|
def _resolve_binding(self, value: Any, t: float, analysis_data: Dict) -> Any:
|
||||||
|
"""Resolve a parameter binding to its value at time t."""
|
||||||
|
if not isinstance(value, dict):
|
||||||
|
return value
|
||||||
|
|
||||||
|
if "_binding" in value or "_bind" in value:
|
||||||
|
source = value.get("source") or value.get("_bind")
|
||||||
|
feature = value.get("feature", "values")
|
||||||
|
range_map = value.get("range")
|
||||||
|
|
||||||
|
track = analysis_data.get(source, {})
|
||||||
|
times = track.get("times", [])
|
||||||
|
values = track.get("values", [])
|
||||||
|
|
||||||
|
if not times or not values:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# Find value at time t (linear interpolation)
|
||||||
|
if t <= times[0]:
|
||||||
|
val = values[0]
|
||||||
|
elif t >= times[-1]:
|
||||||
|
val = values[-1]
|
||||||
|
else:
|
||||||
|
# Binary search for bracket
|
||||||
|
for i in range(len(times) - 1):
|
||||||
|
if times[i] <= t <= times[i + 1]:
|
||||||
|
alpha = (t - times[i]) / (times[i + 1] - times[i])
|
||||||
|
val = values[i] * (1 - alpha) + values[i + 1] * alpha
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
val = values[-1]
|
||||||
|
|
||||||
|
# Apply range mapping
|
||||||
|
if range_map and len(range_map) == 2:
|
||||||
|
val = range_map[0] + val * (range_map[1] - range_map[0])
|
||||||
|
|
||||||
|
return val
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _apply_effect(
|
||||||
|
self,
|
||||||
|
frame: np.ndarray,
|
||||||
|
effect_name: str,
|
||||||
|
params: Dict,
|
||||||
|
t: float,
|
||||||
|
analysis_data: Dict,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""Apply a single effect to a frame."""
|
||||||
|
# Resolve bindings in params
|
||||||
|
resolved_params = {"_time": t}
|
||||||
|
for key, value in params.items():
|
||||||
|
if key in ("effect", "effect_path", "cid", "analysis_refs"):
|
||||||
|
continue
|
||||||
|
resolved_params[key] = self._resolve_binding(value, t, analysis_data)
|
||||||
|
|
||||||
|
# Try fast native effects first
|
||||||
|
result = self._apply_native_effect(frame, effect_name, resolved_params)
|
||||||
|
if result is not None:
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Fall back to sexp interpreter for complex effects
|
||||||
|
interp = self._get_interpreter()
|
||||||
|
if effect_name in interp.effects:
|
||||||
|
result, _ = interp.run_effect(effect_name, frame, resolved_params, {})
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Unknown effect - pass through
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def _apply_native_effect(
|
||||||
|
self,
|
||||||
|
frame: np.ndarray,
|
||||||
|
effect_name: str,
|
||||||
|
params: Dict,
|
||||||
|
) -> Optional[np.ndarray]:
|
||||||
|
"""Fast native numpy effects for real-time streaming."""
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
if effect_name == "zoom":
|
||||||
|
amount = float(params.get("amount", 1.0))
|
||||||
|
if abs(amount - 1.0) < 0.01:
|
||||||
|
return frame
|
||||||
|
h, w = frame.shape[:2]
|
||||||
|
# Crop center and resize
|
||||||
|
new_w, new_h = int(w / amount), int(h / amount)
|
||||||
|
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
|
||||||
|
cropped = frame[y1:y1+new_h, x1:x1+new_w]
|
||||||
|
return cv2.resize(cropped, (w, h))
|
||||||
|
|
||||||
|
elif effect_name == "rotate":
|
||||||
|
angle = float(params.get("angle", 0))
|
||||||
|
if abs(angle) < 0.5:
|
||||||
|
return frame
|
||||||
|
h, w = frame.shape[:2]
|
||||||
|
center = (w // 2, h // 2)
|
||||||
|
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
|
||||||
|
return cv2.warpAffine(frame, matrix, (w, h))
|
||||||
|
|
||||||
|
elif effect_name == "brightness":
|
||||||
|
amount = float(params.get("amount", 1.0))
|
||||||
|
return np.clip(frame * amount, 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
elif effect_name == "invert":
|
||||||
|
amount = float(params.get("amount", 1.0))
|
||||||
|
if amount < 0.5:
|
||||||
|
return frame
|
||||||
|
return 255 - frame
|
||||||
|
|
||||||
|
# Not a native effect
|
||||||
|
return None
|
||||||
|
|
||||||
|
def process_frame(
|
||||||
|
self,
|
||||||
|
frames: List[np.ndarray],
|
||||||
|
effects_per_frame: List[List[Dict]],
|
||||||
|
compositor_config: Dict,
|
||||||
|
t: float,
|
||||||
|
analysis_data: Dict,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Process frames through effects and composite.
|
||||||
|
"""
|
||||||
|
if not frames:
|
||||||
|
return np.zeros((720, 1280, 3), dtype=np.uint8)
|
||||||
|
|
||||||
|
processed = []
|
||||||
|
|
||||||
|
# Apply effects to each input frame
|
||||||
|
for i, (frame, effects) in enumerate(zip(frames, effects_per_frame)):
|
||||||
|
result = frame.copy()
|
||||||
|
for effect_config in effects:
|
||||||
|
effect_name = effect_config.get("effect", "")
|
||||||
|
if effect_name:
|
||||||
|
result = self._apply_effect(
|
||||||
|
result, effect_name, effect_config, t, analysis_data
|
||||||
|
)
|
||||||
|
processed.append(result)
|
||||||
|
|
||||||
|
# Composite layers
|
||||||
|
if len(processed) == 1:
|
||||||
|
return processed[0]
|
||||||
|
|
||||||
|
return self._composite(processed, compositor_config, t, analysis_data)
|
||||||
|
|
||||||
|
def _composite(
|
||||||
|
self,
|
||||||
|
frames: List[np.ndarray],
|
||||||
|
config: Dict,
|
||||||
|
t: float,
|
||||||
|
analysis_data: Dict,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""Composite multiple frames into one."""
|
||||||
|
mode = config.get("mode", "alpha")
|
||||||
|
weights = config.get("weights", [1.0 / len(frames)] * len(frames))
|
||||||
|
|
||||||
|
# Resolve weight bindings
|
||||||
|
resolved_weights = []
|
||||||
|
for w in weights:
|
||||||
|
resolved_weights.append(self._resolve_binding(w, t, analysis_data))
|
||||||
|
|
||||||
|
# Normalize weights
|
||||||
|
total = sum(resolved_weights)
|
||||||
|
if total > 0:
|
||||||
|
resolved_weights = [w / total for w in resolved_weights]
|
||||||
|
else:
|
||||||
|
resolved_weights = [1.0 / len(frames)] * len(frames)
|
||||||
|
|
||||||
|
# Resize frames to match first frame
|
||||||
|
target_h, target_w = frames[0].shape[:2]
|
||||||
|
resized = []
|
||||||
|
for frame in frames:
|
||||||
|
if frame.shape[:2] != (target_h, target_w):
|
||||||
|
import cv2
|
||||||
|
frame = cv2.resize(frame, (target_w, target_h))
|
||||||
|
resized.append(frame.astype(np.float32))
|
||||||
|
|
||||||
|
# Weighted blend
|
||||||
|
result = np.zeros_like(resized[0])
|
||||||
|
for frame, weight in zip(resized, resolved_weights):
|
||||||
|
result += frame * weight
|
||||||
|
|
||||||
|
return np.clip(result, 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
|
||||||
|
class GLSLBackend(Backend):
|
||||||
|
"""
|
||||||
|
GPU-based effect processing using OpenGL/GLSL.
|
||||||
|
|
||||||
|
Requires GPU with OpenGL 3.3+ support (or Mesa software renderer).
|
||||||
|
Achieves 30+ fps real-time processing.
|
||||||
|
|
||||||
|
TODO: Implement when ready for GPU acceleration.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
raise NotImplementedError(
|
||||||
|
"GLSL backend not yet implemented. Use NumpyBackend for now."
|
||||||
|
)
|
||||||
|
|
||||||
|
def load_effect(self, effect_path: Path) -> Any:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process_frame(
|
||||||
|
self,
|
||||||
|
frames: List[np.ndarray],
|
||||||
|
effects_per_frame: List[List[Dict]],
|
||||||
|
compositor_config: Dict,
|
||||||
|
t: float,
|
||||||
|
analysis_data: Dict,
|
||||||
|
) -> np.ndarray:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_backend(name: str = "numpy", **kwargs) -> Backend:
|
||||||
|
"""
|
||||||
|
Get a backend by name.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
name: "numpy" or "glsl"
|
||||||
|
**kwargs: Backend-specific options
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Backend instance
|
||||||
|
"""
|
||||||
|
if name == "numpy":
|
||||||
|
return NumpyBackend(**kwargs)
|
||||||
|
elif name == "glsl":
|
||||||
|
return GLSLBackend(**kwargs)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown backend: {name}")
|
||||||
595
streaming/compositor.py
Normal file
595
streaming/compositor.py
Normal file
@@ -0,0 +1,595 @@
|
|||||||
|
"""
|
||||||
|
Streaming video compositor.
|
||||||
|
|
||||||
|
Main entry point for the streaming pipeline. Combines:
|
||||||
|
- Multiple video sources (with looping)
|
||||||
|
- Per-source effect chains
|
||||||
|
- Layer compositing
|
||||||
|
- Optional live audio analysis
|
||||||
|
- Output to display/file/stream
|
||||||
|
"""
|
||||||
|
|
||||||
|
import time
|
||||||
|
import sys
|
||||||
|
import numpy as np
|
||||||
|
from typing import List, Dict, Any, Optional, Union
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from .sources import Source, VideoSource
|
||||||
|
from .backends import Backend, NumpyBackend, get_backend
|
||||||
|
from .output import Output, DisplayOutput, FileOutput, MultiOutput
|
||||||
|
|
||||||
|
|
||||||
|
class StreamingCompositor:
|
||||||
|
"""
|
||||||
|
Real-time streaming video compositor.
|
||||||
|
|
||||||
|
Reads frames from multiple sources, applies effects, composites layers,
|
||||||
|
and outputs the result - all frame-by-frame without intermediate files.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=["video1.mp4", "video2.mp4"],
|
||||||
|
effects_per_source=[
|
||||||
|
[{"effect": "rotate", "angle": 45}],
|
||||||
|
[{"effect": "zoom", "amount": 1.5}],
|
||||||
|
],
|
||||||
|
compositor_config={"mode": "alpha", "weights": [0.5, 0.5]},
|
||||||
|
)
|
||||||
|
compositor.run(output="preview", duration=60)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
sources: List[Union[str, Source]],
|
||||||
|
effects_per_source: List[List[Dict]] = None,
|
||||||
|
compositor_config: Dict = None,
|
||||||
|
analysis_data: Dict = None,
|
||||||
|
backend: str = "numpy",
|
||||||
|
recipe_dir: Path = None,
|
||||||
|
fps: float = 30,
|
||||||
|
audio_source: str = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the streaming compositor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sources: List of video paths or Source objects
|
||||||
|
effects_per_source: List of effect chains, one per source
|
||||||
|
compositor_config: How to blend layers (mode, weights)
|
||||||
|
analysis_data: Pre-computed analysis data for bindings
|
||||||
|
backend: "numpy" or "glsl"
|
||||||
|
recipe_dir: Directory for resolving relative effect paths
|
||||||
|
fps: Output frame rate
|
||||||
|
audio_source: Path to audio file for streaming analysis
|
||||||
|
"""
|
||||||
|
self.fps = fps
|
||||||
|
self.recipe_dir = recipe_dir or Path(".")
|
||||||
|
self.analysis_data = analysis_data or {}
|
||||||
|
|
||||||
|
# Initialize streaming audio analyzer if audio source provided
|
||||||
|
self._audio_analyzer = None
|
||||||
|
self._audio_source = audio_source
|
||||||
|
if audio_source:
|
||||||
|
from .audio import StreamingAudioAnalyzer
|
||||||
|
self._audio_analyzer = StreamingAudioAnalyzer(audio_source)
|
||||||
|
print(f"Streaming audio: {audio_source}", file=sys.stderr)
|
||||||
|
|
||||||
|
# Initialize sources
|
||||||
|
self.sources: List[Source] = []
|
||||||
|
for src in sources:
|
||||||
|
if isinstance(src, Source):
|
||||||
|
self.sources.append(src)
|
||||||
|
elif isinstance(src, (str, Path)):
|
||||||
|
self.sources.append(VideoSource(str(src), target_fps=fps))
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown source type: {type(src)}")
|
||||||
|
|
||||||
|
# Effect chains (default: no effects)
|
||||||
|
self.effects_per_source = effects_per_source or [[] for _ in self.sources]
|
||||||
|
if len(self.effects_per_source) != len(self.sources):
|
||||||
|
raise ValueError(
|
||||||
|
f"effects_per_source length ({len(self.effects_per_source)}) "
|
||||||
|
f"must match sources length ({len(self.sources)})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Compositor config (default: equal blend)
|
||||||
|
self.compositor_config = compositor_config or {
|
||||||
|
"mode": "alpha",
|
||||||
|
"weights": [1.0 / len(self.sources)] * len(self.sources),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Initialize backend
|
||||||
|
self.backend: Backend = get_backend(
|
||||||
|
backend,
|
||||||
|
recipe_dir=self.recipe_dir,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load effects
|
||||||
|
self._load_effects()
|
||||||
|
|
||||||
|
def _load_effects(self):
|
||||||
|
"""Pre-load all effect definitions."""
|
||||||
|
for effects in self.effects_per_source:
|
||||||
|
for effect_config in effects:
|
||||||
|
effect_path = effect_config.get("effect_path")
|
||||||
|
if effect_path:
|
||||||
|
full_path = self.recipe_dir / effect_path
|
||||||
|
if full_path.exists():
|
||||||
|
self.backend.load_effect(full_path)
|
||||||
|
|
||||||
|
def _create_output(
|
||||||
|
self,
|
||||||
|
output: Union[str, Output],
|
||||||
|
size: tuple,
|
||||||
|
) -> Output:
|
||||||
|
"""Create output target from string or Output object."""
|
||||||
|
if isinstance(output, Output):
|
||||||
|
return output
|
||||||
|
|
||||||
|
if output == "preview":
|
||||||
|
return DisplayOutput("Streaming Preview", size,
|
||||||
|
audio_source=self._audio_source, fps=self.fps)
|
||||||
|
elif output == "null":
|
||||||
|
from .output import NullOutput
|
||||||
|
return NullOutput()
|
||||||
|
elif isinstance(output, str):
|
||||||
|
return FileOutput(output, size, fps=self.fps, audio_source=self._audio_source)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown output type: {output}")
|
||||||
|
|
||||||
|
def run(
|
||||||
|
self,
|
||||||
|
output: Union[str, Output] = "preview",
|
||||||
|
duration: float = None,
|
||||||
|
audio_analyzer=None,
|
||||||
|
show_fps: bool = True,
|
||||||
|
recipe_executor=None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Run the streaming compositor.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
output: Output target - "preview", filename, or Output object
|
||||||
|
duration: Duration in seconds (None = run until quit)
|
||||||
|
audio_analyzer: Optional AudioAnalyzer for live audio reactivity
|
||||||
|
show_fps: Show FPS counter in console
|
||||||
|
recipe_executor: Optional StreamingRecipeExecutor for full recipe logic
|
||||||
|
"""
|
||||||
|
# Determine output size from first source
|
||||||
|
output_size = self.sources[0].size
|
||||||
|
|
||||||
|
# Create output
|
||||||
|
out = self._create_output(output, output_size)
|
||||||
|
|
||||||
|
# Determine duration
|
||||||
|
if duration is None:
|
||||||
|
# Run until stopped (or min source duration if not looping)
|
||||||
|
duration = min(s.duration for s in self.sources)
|
||||||
|
if duration == float('inf'):
|
||||||
|
duration = 3600 # 1 hour max for live sources
|
||||||
|
|
||||||
|
total_frames = int(duration * self.fps)
|
||||||
|
frame_time = 1.0 / self.fps
|
||||||
|
|
||||||
|
print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr)
|
||||||
|
print(f"Duration: {duration:.1f}s, {total_frames} frames @ {self.fps}fps", file=sys.stderr)
|
||||||
|
print(f"Output size: {output_size[0]}x{output_size[1]}", file=sys.stderr)
|
||||||
|
print(f"Press 'q' to quit (if preview)", file=sys.stderr)
|
||||||
|
|
||||||
|
# Frame loop
|
||||||
|
start_time = time.time()
|
||||||
|
frame_count = 0
|
||||||
|
fps_update_interval = 30 # Update FPS display every N frames
|
||||||
|
last_fps_time = start_time
|
||||||
|
last_fps_count = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
for frame_num in range(total_frames):
|
||||||
|
if not out.is_open:
|
||||||
|
print(f"\nOutput closed at frame {frame_num}", file=sys.stderr)
|
||||||
|
break
|
||||||
|
|
||||||
|
t = frame_num * frame_time
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Update analysis data from streaming audio (file-based)
|
||||||
|
energy = 0.0
|
||||||
|
is_beat = False
|
||||||
|
if self._audio_analyzer:
|
||||||
|
self._update_from_audio(self._audio_analyzer, t)
|
||||||
|
energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0]
|
||||||
|
is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5
|
||||||
|
elif audio_analyzer:
|
||||||
|
self._update_from_audio(audio_analyzer, t)
|
||||||
|
energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0]
|
||||||
|
is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5
|
||||||
|
|
||||||
|
# Read frames from all sources
|
||||||
|
frames = [src.read_frame(t) for src in self.sources]
|
||||||
|
|
||||||
|
# Process through recipe executor if provided
|
||||||
|
if recipe_executor:
|
||||||
|
result = self._process_with_executor(
|
||||||
|
frames, recipe_executor, energy, is_beat, t
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Simple backend processing
|
||||||
|
result = self.backend.process_frame(
|
||||||
|
frames,
|
||||||
|
self.effects_per_source,
|
||||||
|
self.compositor_config,
|
||||||
|
t,
|
||||||
|
self.analysis_data,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Output
|
||||||
|
out.write(result, t)
|
||||||
|
frame_count += 1
|
||||||
|
|
||||||
|
# FPS display
|
||||||
|
if show_fps and frame_count % fps_update_interval == 0:
|
||||||
|
now = time.time()
|
||||||
|
elapsed = now - last_fps_time
|
||||||
|
if elapsed > 0:
|
||||||
|
current_fps = (frame_count - last_fps_count) / elapsed
|
||||||
|
progress = frame_num / total_frames * 100
|
||||||
|
print(
|
||||||
|
f"\r {progress:5.1f}% | {current_fps:5.1f} fps | "
|
||||||
|
f"frame {frame_num}/{total_frames}",
|
||||||
|
end="", file=sys.stderr
|
||||||
|
)
|
||||||
|
last_fps_time = now
|
||||||
|
last_fps_count = frame_count
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\nError at frame {frame_num}, t={t:.1f}s: {e}", file=sys.stderr)
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
break
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nInterrupted", file=sys.stderr)
|
||||||
|
finally:
|
||||||
|
out.close()
|
||||||
|
for src in self.sources:
|
||||||
|
if hasattr(src, 'close'):
|
||||||
|
src.close()
|
||||||
|
|
||||||
|
# Final stats
|
||||||
|
elapsed = time.time() - start_time
|
||||||
|
avg_fps = frame_count / elapsed if elapsed > 0 else 0
|
||||||
|
print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", file=sys.stderr)
|
||||||
|
|
||||||
|
def _process_with_executor(
|
||||||
|
self,
|
||||||
|
frames: List[np.ndarray],
|
||||||
|
executor,
|
||||||
|
energy: float,
|
||||||
|
is_beat: bool,
|
||||||
|
t: float,
|
||||||
|
) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Process frames using the recipe executor for full pipeline.
|
||||||
|
|
||||||
|
Implements:
|
||||||
|
1. process-pair: two clips per source with effects, blended
|
||||||
|
2. cycle-crossfade: dynamic composition with zoom and weights
|
||||||
|
3. Final effects: whole-spin, ripple
|
||||||
|
"""
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
# Target size from first source
|
||||||
|
target_h, target_w = frames[0].shape[:2]
|
||||||
|
|
||||||
|
# Resize all frames to target size (letterbox to preserve aspect ratio)
|
||||||
|
resized_frames = []
|
||||||
|
for frame in frames:
|
||||||
|
fh, fw = frame.shape[:2]
|
||||||
|
if (fh, fw) != (target_h, target_w):
|
||||||
|
# Calculate scale to fit while preserving aspect ratio
|
||||||
|
scale = min(target_w / fw, target_h / fh)
|
||||||
|
new_w, new_h = int(fw * scale), int(fh * scale)
|
||||||
|
resized = cv2.resize(frame, (new_w, new_h))
|
||||||
|
# Center on black canvas
|
||||||
|
canvas = np.zeros((target_h, target_w, 3), dtype=np.uint8)
|
||||||
|
x_off = (target_w - new_w) // 2
|
||||||
|
y_off = (target_h - new_h) // 2
|
||||||
|
canvas[y_off:y_off+new_h, x_off:x_off+new_w] = resized
|
||||||
|
resized_frames.append(canvas)
|
||||||
|
else:
|
||||||
|
resized_frames.append(frame)
|
||||||
|
frames = resized_frames
|
||||||
|
|
||||||
|
# Update executor state
|
||||||
|
executor.on_frame(energy, is_beat, t)
|
||||||
|
|
||||||
|
# Get weights to know which sources are active
|
||||||
|
weights = executor.get_cycle_weights()
|
||||||
|
|
||||||
|
# Process each source as a "pair" (clip A and B with different effects)
|
||||||
|
processed_pairs = []
|
||||||
|
|
||||||
|
for i, frame in enumerate(frames):
|
||||||
|
# Skip sources with zero weight (but still need placeholder)
|
||||||
|
if i < len(weights) and weights[i] < 0.001:
|
||||||
|
processed_pairs.append(None)
|
||||||
|
continue
|
||||||
|
# Get effect params for clip A and B
|
||||||
|
params_a = executor.get_effect_params(i, "a", energy)
|
||||||
|
params_b = executor.get_effect_params(i, "b", energy)
|
||||||
|
pair_params = executor.get_pair_params(i)
|
||||||
|
|
||||||
|
# Process clip A
|
||||||
|
clip_a = self._apply_clip_effects(frame.copy(), params_a, t)
|
||||||
|
|
||||||
|
# Process clip B
|
||||||
|
clip_b = self._apply_clip_effects(frame.copy(), params_b, t)
|
||||||
|
|
||||||
|
# Blend A and B using pair_mix opacity
|
||||||
|
opacity = pair_params["blend_opacity"]
|
||||||
|
blended = cv2.addWeighted(
|
||||||
|
clip_a, 1 - opacity,
|
||||||
|
clip_b, opacity,
|
||||||
|
0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Apply pair rotation
|
||||||
|
h, w = blended.shape[:2]
|
||||||
|
center = (w // 2, h // 2)
|
||||||
|
angle = pair_params["pair_rotation"]
|
||||||
|
if abs(angle) > 0.5:
|
||||||
|
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
|
||||||
|
blended = cv2.warpAffine(blended, matrix, (w, h))
|
||||||
|
|
||||||
|
processed_pairs.append(blended)
|
||||||
|
|
||||||
|
# Cycle-crossfade composition
|
||||||
|
weights = executor.get_cycle_weights()
|
||||||
|
zooms = executor.get_cycle_zooms()
|
||||||
|
|
||||||
|
# Apply zoom per pair and composite
|
||||||
|
h, w = target_h, target_w
|
||||||
|
result = np.zeros((h, w, 3), dtype=np.float32)
|
||||||
|
|
||||||
|
for idx, (pair, weight, zoom) in enumerate(zip(processed_pairs, weights, zooms)):
|
||||||
|
# Skip zero-weight sources
|
||||||
|
if pair is None or weight < 0.001:
|
||||||
|
continue
|
||||||
|
|
||||||
|
orig_shape = pair.shape
|
||||||
|
|
||||||
|
# Apply zoom
|
||||||
|
if zoom > 1.01:
|
||||||
|
# Zoom in: crop center and resize up
|
||||||
|
new_w, new_h = int(w / zoom), int(h / zoom)
|
||||||
|
if new_w > 0 and new_h > 0:
|
||||||
|
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
|
||||||
|
cropped = pair[y1:y1+new_h, x1:x1+new_w]
|
||||||
|
pair = cv2.resize(cropped, (w, h))
|
||||||
|
elif zoom < 0.99:
|
||||||
|
# Zoom out: shrink video and center on black
|
||||||
|
scaled_w, scaled_h = int(w * zoom), int(h * zoom)
|
||||||
|
if scaled_w > 0 and scaled_h > 0:
|
||||||
|
shrunk = cv2.resize(pair, (scaled_w, scaled_h))
|
||||||
|
canvas = np.zeros((h, w, 3), dtype=np.uint8)
|
||||||
|
x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2
|
||||||
|
canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk
|
||||||
|
pair = canvas.copy()
|
||||||
|
|
||||||
|
# Draw colored border - size indicates zoom level
|
||||||
|
border_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
|
||||||
|
color = border_colors[idx % 4]
|
||||||
|
thickness = max(3, int(10 * weight)) # Thicker border = higher weight
|
||||||
|
pair = np.ascontiguousarray(pair)
|
||||||
|
pair[:thickness, :] = color
|
||||||
|
pair[-thickness:, :] = color
|
||||||
|
pair[:, :thickness] = color
|
||||||
|
pair[:, -thickness:] = color
|
||||||
|
|
||||||
|
result += pair.astype(np.float32) * weight
|
||||||
|
|
||||||
|
result = np.clip(result, 0, 255).astype(np.uint8)
|
||||||
|
|
||||||
|
# Apply final effects (whole-spin, ripple)
|
||||||
|
final_params = executor.get_final_effects(energy)
|
||||||
|
|
||||||
|
# Whole spin
|
||||||
|
spin_angle = final_params["whole_spin_angle"]
|
||||||
|
if abs(spin_angle) > 0.5:
|
||||||
|
center = (w // 2, h // 2)
|
||||||
|
matrix = cv2.getRotationMatrix2D(center, spin_angle, 1.0)
|
||||||
|
result = cv2.warpAffine(result, matrix, (w, h))
|
||||||
|
|
||||||
|
# Ripple effect
|
||||||
|
amp = final_params["ripple_amplitude"]
|
||||||
|
if amp > 1:
|
||||||
|
result = self._apply_ripple(result, amp,
|
||||||
|
final_params["ripple_cx"],
|
||||||
|
final_params["ripple_cy"],
|
||||||
|
t)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _apply_clip_effects(self, frame: np.ndarray, params: dict, t: float) -> np.ndarray:
|
||||||
|
"""Apply per-clip effects: rotate, zoom, invert, hue_shift, ascii."""
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
h, w = frame.shape[:2]
|
||||||
|
|
||||||
|
# Rotate
|
||||||
|
angle = params["rotate_angle"]
|
||||||
|
if abs(angle) > 0.5:
|
||||||
|
center = (w // 2, h // 2)
|
||||||
|
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
|
||||||
|
frame = cv2.warpAffine(frame, matrix, (w, h))
|
||||||
|
|
||||||
|
# Zoom
|
||||||
|
zoom = params["zoom_amount"]
|
||||||
|
if abs(zoom - 1.0) > 0.01:
|
||||||
|
new_w, new_h = int(w / zoom), int(h / zoom)
|
||||||
|
if new_w > 0 and new_h > 0:
|
||||||
|
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
|
||||||
|
x1, y1 = max(0, x1), max(0, y1)
|
||||||
|
x2, y2 = min(w, x1 + new_w), min(h, y1 + new_h)
|
||||||
|
if x2 > x1 and y2 > y1:
|
||||||
|
cropped = frame[y1:y2, x1:x2]
|
||||||
|
frame = cv2.resize(cropped, (w, h))
|
||||||
|
|
||||||
|
# Invert
|
||||||
|
if params["invert_amount"] > 0.5:
|
||||||
|
frame = 255 - frame
|
||||||
|
|
||||||
|
# Hue shift
|
||||||
|
hue_deg = params["hue_degrees"]
|
||||||
|
if abs(hue_deg) > 1:
|
||||||
|
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
|
||||||
|
hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int32) + int(hue_deg / 2)) % 180
|
||||||
|
frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
|
||||||
|
|
||||||
|
# ASCII art
|
||||||
|
if params["ascii_mix"] > 0.5:
|
||||||
|
char_size = max(4, int(params["ascii_char_size"]))
|
||||||
|
frame = self._apply_ascii(frame, char_size)
|
||||||
|
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def _apply_ascii(self, frame: np.ndarray, char_size: int) -> np.ndarray:
|
||||||
|
"""Apply ASCII art effect."""
|
||||||
|
import cv2
|
||||||
|
from PIL import Image, ImageDraw, ImageFont
|
||||||
|
|
||||||
|
h, w = frame.shape[:2]
|
||||||
|
chars = " .:-=+*#%@"
|
||||||
|
|
||||||
|
# Get font
|
||||||
|
try:
|
||||||
|
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", char_size)
|
||||||
|
except:
|
||||||
|
font = ImageFont.load_default()
|
||||||
|
|
||||||
|
# Sample cells using area interpolation (fast block average)
|
||||||
|
rows = h // char_size
|
||||||
|
cols = w // char_size
|
||||||
|
if rows < 1 or cols < 1:
|
||||||
|
return frame
|
||||||
|
|
||||||
|
# Crop to exact grid and downsample
|
||||||
|
cropped = frame[:rows * char_size, :cols * char_size]
|
||||||
|
cell_colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
|
||||||
|
|
||||||
|
# Compute luminance
|
||||||
|
luminances = (0.299 * cell_colors[:, :, 0] +
|
||||||
|
0.587 * cell_colors[:, :, 1] +
|
||||||
|
0.114 * cell_colors[:, :, 2]) / 255.0
|
||||||
|
|
||||||
|
# Create output image
|
||||||
|
out_h = rows * char_size
|
||||||
|
out_w = cols * char_size
|
||||||
|
output = Image.new('RGB', (out_w, out_h), (0, 0, 0))
|
||||||
|
draw = ImageDraw.Draw(output)
|
||||||
|
|
||||||
|
# Draw characters
|
||||||
|
for r in range(rows):
|
||||||
|
for c in range(cols):
|
||||||
|
lum = luminances[r, c]
|
||||||
|
color = tuple(cell_colors[r, c])
|
||||||
|
|
||||||
|
# Map luminance to character
|
||||||
|
idx = int(lum * (len(chars) - 1))
|
||||||
|
char = chars[idx]
|
||||||
|
|
||||||
|
# Draw character
|
||||||
|
x = c * char_size
|
||||||
|
y = r * char_size
|
||||||
|
draw.text((x, y), char, fill=color, font=font)
|
||||||
|
|
||||||
|
# Convert back to numpy and resize to original
|
||||||
|
result = np.array(output)
|
||||||
|
if result.shape[:2] != (h, w):
|
||||||
|
result = cv2.resize(result, (w, h), interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _apply_ripple(self, frame: np.ndarray, amplitude: float,
|
||||||
|
cx: float, cy: float, t: float = 0) -> np.ndarray:
|
||||||
|
"""Apply ripple distortion effect."""
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
h, w = frame.shape[:2]
|
||||||
|
center_x, center_y = cx * w, cy * h
|
||||||
|
max_dim = max(w, h)
|
||||||
|
|
||||||
|
# Create coordinate grids
|
||||||
|
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
|
||||||
|
|
||||||
|
# Distance from center
|
||||||
|
dx = x_coords - center_x
|
||||||
|
dy = y_coords - center_y
|
||||||
|
dist = np.sqrt(dx*dx + dy*dy)
|
||||||
|
|
||||||
|
# Ripple parameters (matching recipe: frequency=8, decay=2, speed=5)
|
||||||
|
freq = 8
|
||||||
|
decay = 2
|
||||||
|
speed = 5
|
||||||
|
phase = t * speed * 2 * np.pi
|
||||||
|
|
||||||
|
# Ripple displacement (matching original formula)
|
||||||
|
ripple = np.sin(2 * np.pi * freq * dist / max_dim + phase) * amplitude
|
||||||
|
|
||||||
|
# Apply decay
|
||||||
|
if decay > 0:
|
||||||
|
ripple = ripple * np.exp(-dist * decay / max_dim)
|
||||||
|
|
||||||
|
# Displace along radial direction
|
||||||
|
with np.errstate(divide='ignore', invalid='ignore'):
|
||||||
|
norm_dx = np.where(dist > 0, dx / dist, 0)
|
||||||
|
norm_dy = np.where(dist > 0, dy / dist, 0)
|
||||||
|
|
||||||
|
map_x = (x_coords + ripple * norm_dx).astype(np.float32)
|
||||||
|
map_y = (y_coords + ripple * norm_dy).astype(np.float32)
|
||||||
|
|
||||||
|
return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR,
|
||||||
|
borderMode=cv2.BORDER_REFLECT)
|
||||||
|
|
||||||
|
def _update_from_audio(self, analyzer, t: float):
|
||||||
|
"""Update analysis data from audio analyzer (streaming or live)."""
|
||||||
|
# Set time for file-based streaming analyzers
|
||||||
|
if hasattr(analyzer, 'set_time'):
|
||||||
|
analyzer.set_time(t)
|
||||||
|
|
||||||
|
# Get current audio features
|
||||||
|
energy = analyzer.get_energy() if hasattr(analyzer, 'get_energy') else 0
|
||||||
|
beat = analyzer.get_beat() if hasattr(analyzer, 'get_beat') else False
|
||||||
|
|
||||||
|
# Update analysis tracks - these can be referenced by effect bindings
|
||||||
|
self.analysis_data["live_energy"] = {
|
||||||
|
"times": [t],
|
||||||
|
"values": [energy],
|
||||||
|
"duration": float('inf'),
|
||||||
|
}
|
||||||
|
self.analysis_data["live_beat"] = {
|
||||||
|
"times": [t],
|
||||||
|
"values": [1.0 if beat else 0.0],
|
||||||
|
"duration": float('inf'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def quick_preview(
|
||||||
|
sources: List[str],
|
||||||
|
effects: List[List[Dict]] = None,
|
||||||
|
duration: float = 10,
|
||||||
|
fps: float = 30,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Quick preview helper - show sources with optional effects.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
quick_preview(["video1.mp4", "video2.mp4"], duration=30)
|
||||||
|
"""
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=sources,
|
||||||
|
effects_per_source=effects,
|
||||||
|
fps=fps,
|
||||||
|
)
|
||||||
|
compositor.run(output="preview", duration=duration)
|
||||||
125
streaming/demo.py
Normal file
125
streaming/demo.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Demo script for streaming compositor.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
# Preview two videos blended
|
||||||
|
python -m streaming.demo preview video1.mp4 video2.mp4
|
||||||
|
|
||||||
|
# Record output to file
|
||||||
|
python -m streaming.demo record video1.mp4 video2.mp4 -o output.mp4
|
||||||
|
|
||||||
|
# Benchmark (no output)
|
||||||
|
python -m streaming.demo benchmark video1.mp4 --duration 10
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add parent to path for imports
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||||
|
|
||||||
|
from streaming import StreamingCompositor, VideoSource
|
||||||
|
from streaming.output import NullOutput
|
||||||
|
|
||||||
|
|
||||||
|
def demo_preview(sources: list, duration: float, effects: bool = False):
|
||||||
|
"""Preview sources with optional simple effects."""
|
||||||
|
effects_config = None
|
||||||
|
if effects:
|
||||||
|
effects_config = [
|
||||||
|
[{"effect": "rotate", "angle": 15}],
|
||||||
|
[{"effect": "zoom", "amount": 1.2}],
|
||||||
|
][:len(sources)]
|
||||||
|
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=sources,
|
||||||
|
effects_per_source=effects_config,
|
||||||
|
recipe_dir=Path(__file__).parent.parent,
|
||||||
|
)
|
||||||
|
compositor.run(output="preview", duration=duration)
|
||||||
|
|
||||||
|
|
||||||
|
def demo_record(sources: list, output_path: str, duration: float):
|
||||||
|
"""Record blended output to file."""
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=sources,
|
||||||
|
recipe_dir=Path(__file__).parent.parent,
|
||||||
|
)
|
||||||
|
compositor.run(output=output_path, duration=duration)
|
||||||
|
|
||||||
|
|
||||||
|
def demo_benchmark(sources: list, duration: float):
|
||||||
|
"""Benchmark processing speed (no output)."""
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=sources,
|
||||||
|
recipe_dir=Path(__file__).parent.parent,
|
||||||
|
)
|
||||||
|
compositor.run(output="null", duration=duration)
|
||||||
|
|
||||||
|
|
||||||
|
def demo_audio_reactive(sources: list, duration: float):
|
||||||
|
"""Preview with live audio reactivity."""
|
||||||
|
from streaming.audio import AudioAnalyzer
|
||||||
|
|
||||||
|
# Create compositor with energy-reactive effects
|
||||||
|
effects_config = [
|
||||||
|
[{
|
||||||
|
"effect": "zoom",
|
||||||
|
"amount": {"_binding": True, "source": "live_energy", "feature": "values", "range": [1.0, 1.5]},
|
||||||
|
}]
|
||||||
|
for _ in sources
|
||||||
|
]
|
||||||
|
|
||||||
|
compositor = StreamingCompositor(
|
||||||
|
sources=sources,
|
||||||
|
effects_per_source=effects_config,
|
||||||
|
recipe_dir=Path(__file__).parent.parent,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start audio analyzer
|
||||||
|
try:
|
||||||
|
with AudioAnalyzer() as audio:
|
||||||
|
print("Audio analyzer started. Make some noise!", file=sys.stderr)
|
||||||
|
compositor.run(output="preview", duration=duration, audio_analyzer=audio)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Audio not available: {e}", file=sys.stderr)
|
||||||
|
print("Running without audio...", file=sys.stderr)
|
||||||
|
compositor.run(output="preview", duration=duration)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Streaming compositor demo")
|
||||||
|
parser.add_argument("mode", choices=["preview", "record", "benchmark", "audio"],
|
||||||
|
help="Demo mode")
|
||||||
|
parser.add_argument("sources", nargs="+", help="Video source files")
|
||||||
|
parser.add_argument("-o", "--output", help="Output file (for record mode)")
|
||||||
|
parser.add_argument("-d", "--duration", type=float, default=30,
|
||||||
|
help="Duration in seconds")
|
||||||
|
parser.add_argument("--effects", action="store_true",
|
||||||
|
help="Apply simple effects (for preview)")
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Verify sources exist
|
||||||
|
for src in args.sources:
|
||||||
|
if not Path(src).exists():
|
||||||
|
print(f"Error: Source not found: {src}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if args.mode == "preview":
|
||||||
|
demo_preview(args.sources, args.duration, args.effects)
|
||||||
|
elif args.mode == "record":
|
||||||
|
if not args.output:
|
||||||
|
print("Error: --output required for record mode", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
demo_record(args.sources, args.output, args.duration)
|
||||||
|
elif args.mode == "benchmark":
|
||||||
|
demo_benchmark(args.sources, args.duration)
|
||||||
|
elif args.mode == "audio":
|
||||||
|
demo_audio_reactive(args.sources, args.duration)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
259
streaming/output.py
Normal file
259
streaming/output.py
Normal file
@@ -0,0 +1,259 @@
|
|||||||
|
"""
|
||||||
|
Output targets for streaming compositor.
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
- Display window (preview)
|
||||||
|
- File output (recording)
|
||||||
|
- Stream output (RTMP, etc.) - future
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import subprocess
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Tuple, Optional
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
class Output(ABC):
|
||||||
|
"""Abstract base class for output targets."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def write(self, frame: np.ndarray, t: float):
|
||||||
|
"""Write a frame to the output."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def close(self):
|
||||||
|
"""Close the output and clean up resources."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def is_open(self) -> bool:
|
||||||
|
"""Check if output is still open/valid."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class DisplayOutput(Output):
|
||||||
|
"""
|
||||||
|
Display frames using mpv (handles Wayland properly).
|
||||||
|
|
||||||
|
Useful for live preview. Press 'q' to quit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, title: str = "Streaming Preview", size: Tuple[int, int] = None,
|
||||||
|
audio_source: str = None, fps: float = 30):
|
||||||
|
self.title = title
|
||||||
|
self.size = size
|
||||||
|
self.audio_source = audio_source
|
||||||
|
self.fps = fps
|
||||||
|
self._is_open = True
|
||||||
|
self._process = None
|
||||||
|
self._audio_process = None
|
||||||
|
|
||||||
|
def _start_mpv(self, frame_size: Tuple[int, int]):
|
||||||
|
"""Start mpv process for display."""
|
||||||
|
import sys
|
||||||
|
w, h = frame_size
|
||||||
|
cmd = [
|
||||||
|
"mpv",
|
||||||
|
"--no-cache",
|
||||||
|
"--demuxer=rawvideo",
|
||||||
|
f"--demuxer-rawvideo-w={w}",
|
||||||
|
f"--demuxer-rawvideo-h={h}",
|
||||||
|
"--demuxer-rawvideo-mp-format=rgb24",
|
||||||
|
f"--demuxer-rawvideo-fps={self.fps}",
|
||||||
|
f"--title={self.title}",
|
||||||
|
"-",
|
||||||
|
]
|
||||||
|
print(f"Starting mpv: {' '.join(cmd)}", file=sys.stderr)
|
||||||
|
self._process = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Start audio playback if we have an audio source
|
||||||
|
if self.audio_source:
|
||||||
|
audio_cmd = [
|
||||||
|
"ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
|
||||||
|
str(self.audio_source)
|
||||||
|
]
|
||||||
|
print(f"Starting audio: {self.audio_source}", file=sys.stderr)
|
||||||
|
self._audio_process = subprocess.Popen(
|
||||||
|
audio_cmd,
|
||||||
|
stdout=subprocess.DEVNULL,
|
||||||
|
stderr=subprocess.DEVNULL,
|
||||||
|
)
|
||||||
|
|
||||||
|
def write(self, frame: np.ndarray, t: float):
|
||||||
|
"""Display frame."""
|
||||||
|
if not self._is_open:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Ensure frame is correct format
|
||||||
|
if frame.dtype != np.uint8:
|
||||||
|
frame = np.clip(frame, 0, 255).astype(np.uint8)
|
||||||
|
if not frame.flags['C_CONTIGUOUS']:
|
||||||
|
frame = np.ascontiguousarray(frame)
|
||||||
|
|
||||||
|
# Start mpv on first frame
|
||||||
|
if self._process is None:
|
||||||
|
self._start_mpv((frame.shape[1], frame.shape[0]))
|
||||||
|
|
||||||
|
# Check if mpv is still running
|
||||||
|
if self._process.poll() is not None:
|
||||||
|
self._is_open = False
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._process.stdin.write(frame.tobytes())
|
||||||
|
self._process.stdin.flush() # Prevent buffering
|
||||||
|
except BrokenPipeError:
|
||||||
|
self._is_open = False
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the display and audio."""
|
||||||
|
if self._process:
|
||||||
|
try:
|
||||||
|
self._process.stdin.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
self._process.terminate()
|
||||||
|
self._process.wait()
|
||||||
|
if self._audio_process:
|
||||||
|
self._audio_process.terminate()
|
||||||
|
self._audio_process.wait()
|
||||||
|
self._is_open = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_open(self) -> bool:
|
||||||
|
if self._process and self._process.poll() is not None:
|
||||||
|
self._is_open = False
|
||||||
|
return self._is_open
|
||||||
|
|
||||||
|
|
||||||
|
class FileOutput(Output):
|
||||||
|
"""
|
||||||
|
Write frames to a video file using ffmpeg.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
path: str,
|
||||||
|
size: Tuple[int, int],
|
||||||
|
fps: float = 30,
|
||||||
|
codec: str = "libx264",
|
||||||
|
crf: int = 18,
|
||||||
|
preset: str = "fast",
|
||||||
|
audio_source: str = None,
|
||||||
|
):
|
||||||
|
self.path = Path(path)
|
||||||
|
self.size = size
|
||||||
|
self.fps = fps
|
||||||
|
self._is_open = True
|
||||||
|
|
||||||
|
# Build ffmpeg command
|
||||||
|
cmd = [
|
||||||
|
"ffmpeg", "-y",
|
||||||
|
"-f", "rawvideo",
|
||||||
|
"-vcodec", "rawvideo",
|
||||||
|
"-pix_fmt", "rgb24",
|
||||||
|
"-s", f"{size[0]}x{size[1]}",
|
||||||
|
"-r", str(fps),
|
||||||
|
"-i", "-",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Add audio input if provided
|
||||||
|
if audio_source:
|
||||||
|
cmd.extend(["-i", str(audio_source)])
|
||||||
|
|
||||||
|
cmd.extend([
|
||||||
|
"-c:v", codec,
|
||||||
|
"-preset", preset,
|
||||||
|
"-crf", str(crf),
|
||||||
|
"-pix_fmt", "yuv420p",
|
||||||
|
])
|
||||||
|
|
||||||
|
# Add audio codec if we have audio
|
||||||
|
if audio_source:
|
||||||
|
cmd.extend(["-c:a", "aac", "-b:a", "192k", "-shortest"])
|
||||||
|
|
||||||
|
cmd.append(str(self.path))
|
||||||
|
|
||||||
|
self._process = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdin=subprocess.PIPE,
|
||||||
|
stderr=subprocess.DEVNULL,
|
||||||
|
)
|
||||||
|
|
||||||
|
def write(self, frame: np.ndarray, t: float):
|
||||||
|
"""Write frame to video file."""
|
||||||
|
if not self._is_open or self._process.poll() is not None:
|
||||||
|
self._is_open = False
|
||||||
|
return
|
||||||
|
|
||||||
|
# Resize if needed
|
||||||
|
if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]:
|
||||||
|
import cv2
|
||||||
|
frame = cv2.resize(frame, self.size)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._process.stdin.write(frame.tobytes())
|
||||||
|
except BrokenPipeError:
|
||||||
|
self._is_open = False
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the video file."""
|
||||||
|
if self._process:
|
||||||
|
self._process.stdin.close()
|
||||||
|
self._process.wait()
|
||||||
|
self._is_open = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_open(self) -> bool:
|
||||||
|
return self._is_open and self._process.poll() is None
|
||||||
|
|
||||||
|
|
||||||
|
class MultiOutput(Output):
|
||||||
|
"""
|
||||||
|
Write to multiple outputs simultaneously.
|
||||||
|
|
||||||
|
Useful for recording while showing preview.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, outputs: list):
|
||||||
|
self.outputs = outputs
|
||||||
|
|
||||||
|
def write(self, frame: np.ndarray, t: float):
|
||||||
|
for output in self.outputs:
|
||||||
|
if output.is_open:
|
||||||
|
output.write(frame, t)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
for output in self.outputs:
|
||||||
|
output.close()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_open(self) -> bool:
|
||||||
|
return any(o.is_open for o in self.outputs)
|
||||||
|
|
||||||
|
|
||||||
|
class NullOutput(Output):
|
||||||
|
"""
|
||||||
|
Discard frames (for benchmarking).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._is_open = True
|
||||||
|
self.frame_count = 0
|
||||||
|
|
||||||
|
def write(self, frame: np.ndarray, t: float):
|
||||||
|
self.frame_count += 1
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._is_open = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_open(self) -> bool:
|
||||||
|
return self._is_open
|
||||||
414
streaming/recipe_adapter.py
Normal file
414
streaming/recipe_adapter.py
Normal file
@@ -0,0 +1,414 @@
|
|||||||
|
"""
|
||||||
|
Adapter to run sexp recipes through the streaming compositor.
|
||||||
|
|
||||||
|
Bridges the gap between:
|
||||||
|
- Existing recipe format (sexp files with stages, effects, analysis)
|
||||||
|
- Streaming compositor (sources, effect chains, compositor config)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
|
||||||
|
|
||||||
|
from .compositor import StreamingCompositor
|
||||||
|
from .sources import VideoSource
|
||||||
|
from .audio import FileAudioAnalyzer
|
||||||
|
|
||||||
|
|
||||||
|
class RecipeAdapter:
|
||||||
|
"""
|
||||||
|
Adapts a compiled sexp recipe to run through the streaming compositor.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
adapter = RecipeAdapter("effects/quick_test.sexp")
|
||||||
|
adapter.run(output="preview", duration=60)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
recipe_path: str,
|
||||||
|
params: Dict[str, Any] = None,
|
||||||
|
backend: str = "numpy",
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Load and prepare a recipe for streaming.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recipe_path: Path to .sexp recipe file
|
||||||
|
params: Parameter overrides
|
||||||
|
backend: "numpy" or "glsl"
|
||||||
|
"""
|
||||||
|
self.recipe_path = Path(recipe_path)
|
||||||
|
self.recipe_dir = self.recipe_path.parent
|
||||||
|
self.params = params or {}
|
||||||
|
self.backend = backend
|
||||||
|
|
||||||
|
# Compile recipe
|
||||||
|
self._compile()
|
||||||
|
|
||||||
|
def _compile(self):
|
||||||
|
"""Compile the recipe and extract structure."""
|
||||||
|
from artdag.sexp.compiler import compile_string
|
||||||
|
|
||||||
|
recipe_text = self.recipe_path.read_text()
|
||||||
|
self.compiled = compile_string(recipe_text, self.params, recipe_dir=self.recipe_dir)
|
||||||
|
|
||||||
|
# Extract key info
|
||||||
|
self.sources = {} # name -> path
|
||||||
|
self.effects_registry = {} # effect_name -> path
|
||||||
|
self.analyzers = {} # name -> analyzer info
|
||||||
|
|
||||||
|
# Walk nodes to find sources and structure
|
||||||
|
# nodes is a list in CompiledRecipe
|
||||||
|
for node in self.compiled.nodes:
|
||||||
|
node_type = node.get("type", "")
|
||||||
|
|
||||||
|
if node_type == "SOURCE":
|
||||||
|
config = node.get("config", {})
|
||||||
|
path = config.get("path")
|
||||||
|
if path:
|
||||||
|
self.sources[node["id"]] = self.recipe_dir / path
|
||||||
|
|
||||||
|
elif node_type == "ANALYZE":
|
||||||
|
config = node.get("config", {})
|
||||||
|
self.analyzers[node["id"]] = {
|
||||||
|
"analyzer": config.get("analyzer"),
|
||||||
|
"path": config.get("analyzer_path"),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get effects registry from compiled recipe
|
||||||
|
# registry has 'effects' sub-dict
|
||||||
|
effects_dict = self.compiled.registry.get("effects", {})
|
||||||
|
for name, info in effects_dict.items():
|
||||||
|
if info.get("path"):
|
||||||
|
self.effects_registry[name] = Path(info["path"])
|
||||||
|
|
||||||
|
def run_analysis(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Run analysis phase (energy, beats, etc.).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict of analysis track name -> {times, values, duration}
|
||||||
|
"""
|
||||||
|
print(f"Running analysis...", file=sys.stderr)
|
||||||
|
|
||||||
|
# Use existing planner's analysis execution
|
||||||
|
from artdag.sexp.planner import create_plan
|
||||||
|
|
||||||
|
analysis_data = {}
|
||||||
|
|
||||||
|
def on_analysis(node_id: str, results: dict):
|
||||||
|
analysis_data[node_id] = results
|
||||||
|
print(f" {node_id[:16]}...: {len(results.get('times', []))} samples", file=sys.stderr)
|
||||||
|
|
||||||
|
# Create plan (runs analysis as side effect)
|
||||||
|
plan = create_plan(
|
||||||
|
self.compiled,
|
||||||
|
inputs={},
|
||||||
|
recipe_dir=self.recipe_dir,
|
||||||
|
on_analysis=on_analysis,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Also store named analysis tracks
|
||||||
|
for name, data in plan.analysis.items():
|
||||||
|
analysis_data[name] = data
|
||||||
|
|
||||||
|
return analysis_data
|
||||||
|
|
||||||
|
def build_compositor(
|
||||||
|
self,
|
||||||
|
analysis_data: Dict[str, Any] = None,
|
||||||
|
fps: float = None,
|
||||||
|
) -> StreamingCompositor:
|
||||||
|
"""
|
||||||
|
Build a streaming compositor from the recipe.
|
||||||
|
|
||||||
|
This is a simplified version that handles common patterns.
|
||||||
|
Complex recipes may need manual configuration.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
analysis_data: Pre-computed analysis data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Configured StreamingCompositor
|
||||||
|
"""
|
||||||
|
# Extract video and audio sources in SLICE_ON input order
|
||||||
|
video_sources = []
|
||||||
|
audio_source = None
|
||||||
|
|
||||||
|
# Find audio source first
|
||||||
|
for node_id, path in self.sources.items():
|
||||||
|
suffix = path.suffix.lower()
|
||||||
|
if suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
|
||||||
|
audio_source = str(path)
|
||||||
|
break
|
||||||
|
|
||||||
|
# Find SLICE_ON node to get correct video order
|
||||||
|
slice_on_inputs = None
|
||||||
|
for node in self.compiled.nodes:
|
||||||
|
if node.get('type') == 'SLICE_ON':
|
||||||
|
# Use 'videos' config key which has the correct order
|
||||||
|
config = node.get('config', {})
|
||||||
|
slice_on_inputs = config.get('videos', [])
|
||||||
|
break
|
||||||
|
|
||||||
|
if slice_on_inputs:
|
||||||
|
# Trace each SLICE_ON input back to its SOURCE
|
||||||
|
node_lookup = {n['id']: n for n in self.compiled.nodes}
|
||||||
|
|
||||||
|
def trace_to_source(node_id, visited=None):
|
||||||
|
"""Trace a node back to its SOURCE, return source path."""
|
||||||
|
if visited is None:
|
||||||
|
visited = set()
|
||||||
|
if node_id in visited:
|
||||||
|
return None
|
||||||
|
visited.add(node_id)
|
||||||
|
|
||||||
|
node = node_lookup.get(node_id)
|
||||||
|
if not node:
|
||||||
|
return None
|
||||||
|
if node.get('type') == 'SOURCE':
|
||||||
|
return self.sources.get(node_id)
|
||||||
|
# Recurse through inputs
|
||||||
|
for inp in node.get('inputs', []):
|
||||||
|
result = trace_to_source(inp, visited)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Build video_sources in SLICE_ON input order
|
||||||
|
for inp_id in slice_on_inputs:
|
||||||
|
source_path = trace_to_source(inp_id)
|
||||||
|
if source_path:
|
||||||
|
suffix = source_path.suffix.lower()
|
||||||
|
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
|
||||||
|
video_sources.append(str(source_path))
|
||||||
|
|
||||||
|
# Fallback to definition order if no SLICE_ON
|
||||||
|
if not video_sources:
|
||||||
|
for node_id, path in self.sources.items():
|
||||||
|
suffix = path.suffix.lower()
|
||||||
|
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
|
||||||
|
video_sources.append(str(path))
|
||||||
|
|
||||||
|
if not video_sources:
|
||||||
|
raise ValueError("No video sources found in recipe")
|
||||||
|
|
||||||
|
# Build effect chains - use live audio bindings (matching video_sources count)
|
||||||
|
effects_per_source = self._build_streaming_effects(n_sources=len(video_sources))
|
||||||
|
|
||||||
|
# Build compositor config from recipe
|
||||||
|
compositor_config = self._extract_compositor_config(analysis_data)
|
||||||
|
|
||||||
|
return StreamingCompositor(
|
||||||
|
sources=video_sources,
|
||||||
|
effects_per_source=effects_per_source,
|
||||||
|
compositor_config=compositor_config,
|
||||||
|
analysis_data=analysis_data or {},
|
||||||
|
backend=self.backend,
|
||||||
|
recipe_dir=self.recipe_dir,
|
||||||
|
fps=fps or self.compiled.encoding.get("fps", 30),
|
||||||
|
audio_source=audio_source,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _build_streaming_effects(self, n_sources: int = None) -> List[List[Dict]]:
|
||||||
|
"""
|
||||||
|
Build effect chains for streaming with live audio bindings.
|
||||||
|
|
||||||
|
Replicates the recipe's effect pipeline:
|
||||||
|
- Per source: rotate, zoom, invert, hue_shift, ascii_art
|
||||||
|
- All driven by live_energy and live_beat
|
||||||
|
"""
|
||||||
|
if n_sources is None:
|
||||||
|
n_sources = len([p for p in self.sources.values()
|
||||||
|
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
|
||||||
|
|
||||||
|
effects_per_source = []
|
||||||
|
|
||||||
|
for i in range(n_sources):
|
||||||
|
# Alternate rotation direction per source
|
||||||
|
rot_dir = 1 if i % 2 == 0 else -1
|
||||||
|
|
||||||
|
effects = [
|
||||||
|
# Rotate - energy drives angle
|
||||||
|
{
|
||||||
|
"effect": "rotate",
|
||||||
|
"effect_path": str(self.effects_registry.get("rotate", "")),
|
||||||
|
"angle": {
|
||||||
|
"_binding": True,
|
||||||
|
"source": "live_energy",
|
||||||
|
"feature": "values",
|
||||||
|
"range": [0, 45 * rot_dir],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
# Zoom - energy drives amount
|
||||||
|
{
|
||||||
|
"effect": "zoom",
|
||||||
|
"effect_path": str(self.effects_registry.get("zoom", "")),
|
||||||
|
"amount": {
|
||||||
|
"_binding": True,
|
||||||
|
"source": "live_energy",
|
||||||
|
"feature": "values",
|
||||||
|
"range": [1.0, 1.5] if i % 2 == 0 else [1.0, 0.7],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
# Invert - beat triggers
|
||||||
|
{
|
||||||
|
"effect": "invert",
|
||||||
|
"effect_path": str(self.effects_registry.get("invert", "")),
|
||||||
|
"amount": {
|
||||||
|
"_binding": True,
|
||||||
|
"source": "live_beat",
|
||||||
|
"feature": "values",
|
||||||
|
"range": [0, 1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
# Hue shift - energy drives hue
|
||||||
|
{
|
||||||
|
"effect": "hue_shift",
|
||||||
|
"effect_path": str(self.effects_registry.get("hue_shift", "")),
|
||||||
|
"degrees": {
|
||||||
|
"_binding": True,
|
||||||
|
"source": "live_energy",
|
||||||
|
"feature": "values",
|
||||||
|
"range": [0, 180],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
# ASCII art - energy drives char size, beat triggers mix
|
||||||
|
{
|
||||||
|
"effect": "ascii_art",
|
||||||
|
"effect_path": str(self.effects_registry.get("ascii_art", "")),
|
||||||
|
"char_size": {
|
||||||
|
"_binding": True,
|
||||||
|
"source": "live_energy",
|
||||||
|
"feature": "values",
|
||||||
|
"range": [4, 32],
|
||||||
|
},
|
||||||
|
"mix": {
|
||||||
|
"_binding": True,
|
||||||
|
"source": "live_beat",
|
||||||
|
"feature": "values",
|
||||||
|
"range": [0, 1],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
effects_per_source.append(effects)
|
||||||
|
|
||||||
|
return effects_per_source
|
||||||
|
|
||||||
|
def _extract_effects(self) -> List[List[Dict]]:
|
||||||
|
"""Extract effect chains for each source (legacy, pre-computed analysis)."""
|
||||||
|
# Simplified: find EFFECT nodes and their configs
|
||||||
|
effects_per_source = []
|
||||||
|
|
||||||
|
for node_id, path in self.sources.items():
|
||||||
|
if path.suffix.lower() not in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Find effects that depend on this source
|
||||||
|
# This is simplified - real implementation would trace the DAG
|
||||||
|
effects = []
|
||||||
|
|
||||||
|
for node in self.compiled.nodes:
|
||||||
|
if node.get("type") == "EFFECT":
|
||||||
|
config = node.get("config", {})
|
||||||
|
effect_name = config.get("effect")
|
||||||
|
if effect_name and effect_name in self.effects_registry:
|
||||||
|
effect_config = {
|
||||||
|
"effect": effect_name,
|
||||||
|
"effect_path": str(self.effects_registry[effect_name]),
|
||||||
|
}
|
||||||
|
# Copy only effect params (filter out internal fields)
|
||||||
|
internal_fields = (
|
||||||
|
"effect", "effect_path", "cid", "effect_cid",
|
||||||
|
"effects_registry", "analysis_refs", "inputs",
|
||||||
|
)
|
||||||
|
for k, v in config.items():
|
||||||
|
if k not in internal_fields:
|
||||||
|
effect_config[k] = v
|
||||||
|
effects.append(effect_config)
|
||||||
|
break # One effect per source for now
|
||||||
|
|
||||||
|
effects_per_source.append(effects)
|
||||||
|
|
||||||
|
return effects_per_source
|
||||||
|
|
||||||
|
def _extract_compositor_config(self, analysis_data: Dict) -> Dict:
|
||||||
|
"""Extract compositor configuration."""
|
||||||
|
# Look for blend_multi or similar composition nodes
|
||||||
|
for node in self.compiled.nodes:
|
||||||
|
if node.get("type") == "EFFECT":
|
||||||
|
config = node.get("config", {})
|
||||||
|
if config.get("effect") == "blend_multi":
|
||||||
|
return {
|
||||||
|
"mode": config.get("mode", "alpha"),
|
||||||
|
"weights": config.get("weights", []),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default: equal blend
|
||||||
|
n_sources = len([p for p in self.sources.values()
|
||||||
|
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
|
||||||
|
return {
|
||||||
|
"mode": "alpha",
|
||||||
|
"weights": [1.0 / n_sources] * n_sources if n_sources > 0 else [1.0],
|
||||||
|
}
|
||||||
|
|
||||||
|
def run(
|
||||||
|
self,
|
||||||
|
output: str = "preview",
|
||||||
|
duration: float = None,
|
||||||
|
fps: float = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Run the recipe through streaming compositor.
|
||||||
|
|
||||||
|
Everything streams: video frames read on-demand, audio analyzed in real-time.
|
||||||
|
No pre-computation.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
output: "preview", filename, or Output object
|
||||||
|
duration: Duration in seconds (default: audio duration)
|
||||||
|
fps: Frame rate (default from recipe, or 30)
|
||||||
|
"""
|
||||||
|
# Build compositor with recipe executor for full pipeline
|
||||||
|
from .recipe_executor import StreamingRecipeExecutor
|
||||||
|
|
||||||
|
compositor = self.build_compositor(analysis_data={}, fps=fps)
|
||||||
|
|
||||||
|
# Use audio duration if not specified
|
||||||
|
if duration is None:
|
||||||
|
if compositor._audio_analyzer:
|
||||||
|
duration = compositor._audio_analyzer.duration
|
||||||
|
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
# Live mode - run until quit
|
||||||
|
print("Live mode - press 'q' to quit", file=sys.stderr)
|
||||||
|
|
||||||
|
# Create sexp executor that interprets the recipe
|
||||||
|
from .sexp_executor import SexpStreamingExecutor
|
||||||
|
executor = SexpStreamingExecutor(self.compiled, seed=42)
|
||||||
|
|
||||||
|
compositor.run(output=output, duration=duration, recipe_executor=executor)
|
||||||
|
|
||||||
|
|
||||||
|
def run_recipe(
|
||||||
|
recipe_path: str,
|
||||||
|
output: str = "preview",
|
||||||
|
duration: float = None,
|
||||||
|
params: Dict = None,
|
||||||
|
fps: float = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Run a recipe through streaming compositor.
|
||||||
|
|
||||||
|
Everything streams in real-time: video frames, audio analysis.
|
||||||
|
No pre-computation - starts immediately.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
run_recipe("effects/quick_test.sexp", output="preview", duration=30)
|
||||||
|
run_recipe("effects/quick_test.sexp", output="preview", fps=5) # Lower fps for slow systems
|
||||||
|
"""
|
||||||
|
adapter = RecipeAdapter(recipe_path, params=params)
|
||||||
|
adapter.run(output=output, duration=duration, fps=fps)
|
||||||
415
streaming/recipe_executor.py
Normal file
415
streaming/recipe_executor.py
Normal file
@@ -0,0 +1,415 @@
|
|||||||
|
"""
|
||||||
|
Streaming recipe executor.
|
||||||
|
|
||||||
|
Implements the full recipe logic for real-time streaming:
|
||||||
|
- Scans (state machines that evolve on beats)
|
||||||
|
- Process-pair template (two clips with sporadic effects, blended)
|
||||||
|
- Cycle-crossfade (dynamic composition cycling through video pairs)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import random
|
||||||
|
import numpy as np
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ScanState:
|
||||||
|
"""State for a scan (beat-driven state machine)."""
|
||||||
|
value: Any = 0
|
||||||
|
rng: random.Random = field(default_factory=random.Random)
|
||||||
|
|
||||||
|
|
||||||
|
class StreamingScans:
|
||||||
|
"""
|
||||||
|
Real-time scan executor.
|
||||||
|
|
||||||
|
Scans are state machines that evolve on each beat.
|
||||||
|
They drive effect parameters like invert triggers, hue shifts, etc.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, seed: int = 42, n_sources: int = 4):
|
||||||
|
self.master_seed = seed
|
||||||
|
self.n_sources = n_sources
|
||||||
|
self.scans: Dict[str, ScanState] = {}
|
||||||
|
self.beat_count = 0
|
||||||
|
self.current_time = 0.0
|
||||||
|
self.last_beat_time = 0.0
|
||||||
|
self._init_scans()
|
||||||
|
|
||||||
|
def _init_scans(self):
|
||||||
|
"""Initialize all scans with their own RNG seeds."""
|
||||||
|
scan_names = []
|
||||||
|
|
||||||
|
# Per-pair scans (dynamic based on n_sources)
|
||||||
|
for i in range(self.n_sources):
|
||||||
|
scan_names.extend([
|
||||||
|
f"inv_a_{i}", f"inv_b_{i}", f"hue_a_{i}", f"hue_b_{i}",
|
||||||
|
f"ascii_a_{i}", f"ascii_b_{i}", f"pair_mix_{i}", f"pair_rot_{i}",
|
||||||
|
])
|
||||||
|
|
||||||
|
# Global scans
|
||||||
|
scan_names.extend(["whole_spin", "ripple_gate", "cycle"])
|
||||||
|
|
||||||
|
for i, name in enumerate(scan_names):
|
||||||
|
rng = random.Random(self.master_seed + i)
|
||||||
|
self.scans[name] = ScanState(value=self._init_value(name), rng=rng)
|
||||||
|
|
||||||
|
def _init_value(self, name: str) -> Any:
|
||||||
|
"""Get initial value for a scan."""
|
||||||
|
if name.startswith("inv_") or name.startswith("ascii_"):
|
||||||
|
return 0 # Counter for remaining beats
|
||||||
|
elif name.startswith("hue_"):
|
||||||
|
return {"rem": 0, "hue": 0}
|
||||||
|
elif name.startswith("pair_mix"):
|
||||||
|
return {"rem": 0, "opacity": 0.5}
|
||||||
|
elif name.startswith("pair_rot"):
|
||||||
|
pair_idx = int(name.split("_")[-1])
|
||||||
|
rot_dir = 1 if pair_idx % 2 == 0 else -1
|
||||||
|
return {"beat": 0, "clen": 25, "dir": rot_dir, "angle": 0}
|
||||||
|
elif name == "whole_spin":
|
||||||
|
return {
|
||||||
|
"phase": 0, # 0 = waiting, 1 = spinning
|
||||||
|
"beat": 0, # beats into current phase
|
||||||
|
"plen": 20, # beats in this phase
|
||||||
|
"dir": 1, # spin direction
|
||||||
|
"total_angle": 0.0, # cumulative angle after all spins
|
||||||
|
"spin_start_angle": 0.0, # angle when current spin started
|
||||||
|
"spin_start_time": 0.0, # time when current spin started
|
||||||
|
"spin_end_time": 0.0, # estimated time when spin ends
|
||||||
|
}
|
||||||
|
elif name == "ripple_gate":
|
||||||
|
return {"rem": 0, "cx": 0.5, "cy": 0.5}
|
||||||
|
elif name == "cycle":
|
||||||
|
return {"cycle": 0, "beat": 0, "clen": 60}
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def on_beat(self):
|
||||||
|
"""Update all scans on a beat."""
|
||||||
|
self.beat_count += 1
|
||||||
|
# Estimate beat interval from last two beats
|
||||||
|
beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5
|
||||||
|
self.last_beat_time = self.current_time
|
||||||
|
|
||||||
|
for name, state in self.scans.items():
|
||||||
|
state.value = self._step_scan(name, state.value, state.rng, beat_interval)
|
||||||
|
|
||||||
|
def _step_scan(self, name: str, value: Any, rng: random.Random, beat_interval: float = 0.5) -> Any:
|
||||||
|
"""Step a scan forward by one beat."""
|
||||||
|
|
||||||
|
# Invert scan: 10% chance, lasts 1-5 beats
|
||||||
|
if name.startswith("inv_"):
|
||||||
|
if value > 0:
|
||||||
|
return value - 1
|
||||||
|
elif rng.random() < 0.1:
|
||||||
|
return rng.randint(1, 5)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Hue scan: 10% chance, random hue 30-330, lasts 1-5 beats
|
||||||
|
elif name.startswith("hue_"):
|
||||||
|
if value["rem"] > 0:
|
||||||
|
return {"rem": value["rem"] - 1, "hue": value["hue"]}
|
||||||
|
elif rng.random() < 0.1:
|
||||||
|
return {"rem": rng.randint(1, 5), "hue": rng.uniform(30, 330)}
|
||||||
|
return {"rem": 0, "hue": 0}
|
||||||
|
|
||||||
|
# ASCII scan: 5% chance, lasts 1-3 beats
|
||||||
|
elif name.startswith("ascii_"):
|
||||||
|
if value > 0:
|
||||||
|
return value - 1
|
||||||
|
elif rng.random() < 0.05:
|
||||||
|
return rng.randint(1, 3)
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Pair mix: changes every 1-11 beats
|
||||||
|
elif name.startswith("pair_mix"):
|
||||||
|
if value["rem"] > 0:
|
||||||
|
return {"rem": value["rem"] - 1, "opacity": value["opacity"]}
|
||||||
|
return {"rem": rng.randint(1, 11), "opacity": rng.choice([0, 0.5, 1.0])}
|
||||||
|
|
||||||
|
# Pair rotation: full rotation every 20-30 beats
|
||||||
|
elif name.startswith("pair_rot"):
|
||||||
|
beat = value["beat"]
|
||||||
|
clen = value["clen"]
|
||||||
|
dir_ = value["dir"]
|
||||||
|
angle = value["angle"]
|
||||||
|
|
||||||
|
if beat + 1 < clen:
|
||||||
|
new_angle = angle + dir_ * (360 / clen)
|
||||||
|
return {"beat": beat + 1, "clen": clen, "dir": dir_, "angle": new_angle}
|
||||||
|
else:
|
||||||
|
return {"beat": 0, "clen": rng.randint(20, 30), "dir": -dir_, "angle": angle}
|
||||||
|
|
||||||
|
# Whole spin: sporadic 720 degree spins (cumulative - stays rotated)
|
||||||
|
elif name == "whole_spin":
|
||||||
|
phase = value["phase"]
|
||||||
|
beat = value["beat"]
|
||||||
|
plen = value["plen"]
|
||||||
|
dir_ = value["dir"]
|
||||||
|
total_angle = value.get("total_angle", 0.0)
|
||||||
|
spin_start_angle = value.get("spin_start_angle", 0.0)
|
||||||
|
spin_start_time = value.get("spin_start_time", 0.0)
|
||||||
|
spin_end_time = value.get("spin_end_time", 0.0)
|
||||||
|
|
||||||
|
if phase == 1:
|
||||||
|
# Currently spinning
|
||||||
|
if beat + 1 < plen:
|
||||||
|
return {
|
||||||
|
"phase": 1, "beat": beat + 1, "plen": plen, "dir": dir_,
|
||||||
|
"total_angle": total_angle,
|
||||||
|
"spin_start_angle": spin_start_angle,
|
||||||
|
"spin_start_time": spin_start_time,
|
||||||
|
"spin_end_time": spin_end_time,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Spin complete - update total_angle with final spin
|
||||||
|
new_total = spin_start_angle + dir_ * 720.0
|
||||||
|
return {
|
||||||
|
"phase": 0, "beat": 0, "plen": rng.randint(20, 40), "dir": dir_,
|
||||||
|
"total_angle": new_total,
|
||||||
|
"spin_start_angle": new_total,
|
||||||
|
"spin_start_time": self.current_time,
|
||||||
|
"spin_end_time": self.current_time,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Waiting phase
|
||||||
|
if beat + 1 < plen:
|
||||||
|
return {
|
||||||
|
"phase": 0, "beat": beat + 1, "plen": plen, "dir": dir_,
|
||||||
|
"total_angle": total_angle,
|
||||||
|
"spin_start_angle": spin_start_angle,
|
||||||
|
"spin_start_time": spin_start_time,
|
||||||
|
"spin_end_time": spin_end_time,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
# Start new spin
|
||||||
|
new_dir = 1 if rng.random() < 0.5 else -1
|
||||||
|
new_plen = rng.randint(10, 25)
|
||||||
|
spin_duration = new_plen * beat_interval
|
||||||
|
return {
|
||||||
|
"phase": 1, "beat": 0, "plen": new_plen, "dir": new_dir,
|
||||||
|
"total_angle": total_angle,
|
||||||
|
"spin_start_angle": total_angle,
|
||||||
|
"spin_start_time": self.current_time,
|
||||||
|
"spin_end_time": self.current_time + spin_duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Ripple gate: 5% chance, lasts 1-20 beats
|
||||||
|
elif name == "ripple_gate":
|
||||||
|
if value["rem"] > 0:
|
||||||
|
return {"rem": value["rem"] - 1, "cx": value["cx"], "cy": value["cy"]}
|
||||||
|
elif rng.random() < 0.05:
|
||||||
|
return {"rem": rng.randint(1, 20),
|
||||||
|
"cx": rng.uniform(0.1, 0.9),
|
||||||
|
"cy": rng.uniform(0.1, 0.9)}
|
||||||
|
return {"rem": 0, "cx": 0.5, "cy": 0.5}
|
||||||
|
|
||||||
|
# Cycle: track which video pair is active
|
||||||
|
elif name == "cycle":
|
||||||
|
beat = value["beat"]
|
||||||
|
clen = value["clen"]
|
||||||
|
cycle = value["cycle"]
|
||||||
|
|
||||||
|
if beat + 1 < clen:
|
||||||
|
return {"cycle": cycle, "beat": beat + 1, "clen": clen}
|
||||||
|
else:
|
||||||
|
# Move to next pair, vary cycle length
|
||||||
|
return {"cycle": (cycle + 1) % 4, "beat": 0,
|
||||||
|
"clen": 40 + (self.beat_count * 7) % 41}
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def get_emit(self, name: str) -> float:
|
||||||
|
"""Get emitted value for a scan."""
|
||||||
|
value = self.scans[name].value
|
||||||
|
|
||||||
|
if name.startswith("inv_") or name.startswith("ascii_"):
|
||||||
|
return 1.0 if value > 0 else 0.0
|
||||||
|
|
||||||
|
elif name.startswith("hue_"):
|
||||||
|
return value["hue"] if value["rem"] > 0 else 0.0
|
||||||
|
|
||||||
|
elif name.startswith("pair_mix"):
|
||||||
|
return value["opacity"]
|
||||||
|
|
||||||
|
elif name.startswith("pair_rot"):
|
||||||
|
return value["angle"]
|
||||||
|
|
||||||
|
elif name == "whole_spin":
|
||||||
|
# Smooth time-based interpolation during spin
|
||||||
|
phase = value.get("phase", 0)
|
||||||
|
if phase == 1:
|
||||||
|
# Currently spinning - interpolate based on time
|
||||||
|
spin_start_time = value.get("spin_start_time", 0.0)
|
||||||
|
spin_end_time = value.get("spin_end_time", spin_start_time + 1.0)
|
||||||
|
spin_start_angle = value.get("spin_start_angle", 0.0)
|
||||||
|
dir_ = value.get("dir", 1)
|
||||||
|
|
||||||
|
duration = spin_end_time - spin_start_time
|
||||||
|
if duration > 0:
|
||||||
|
progress = (self.current_time - spin_start_time) / duration
|
||||||
|
progress = max(0.0, min(1.0, progress)) # clamp to 0-1
|
||||||
|
else:
|
||||||
|
progress = 1.0
|
||||||
|
|
||||||
|
return spin_start_angle + progress * 720.0 * dir_
|
||||||
|
else:
|
||||||
|
# Not spinning - return cumulative angle
|
||||||
|
return value.get("total_angle", 0.0)
|
||||||
|
|
||||||
|
elif name == "ripple_gate":
|
||||||
|
return 1.0 if value["rem"] > 0 else 0.0
|
||||||
|
|
||||||
|
elif name == "cycle":
|
||||||
|
return value
|
||||||
|
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class StreamingRecipeExecutor:
|
||||||
|
"""
|
||||||
|
Executes a recipe in streaming mode.
|
||||||
|
|
||||||
|
Implements:
|
||||||
|
- process-pair: two video clips with opposite effects, blended
|
||||||
|
- cycle-crossfade: dynamic cycling through video pairs
|
||||||
|
- Final effects: whole-spin rotation, ripple
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, n_sources: int = 4, seed: int = 42):
|
||||||
|
self.n_sources = n_sources
|
||||||
|
self.scans = StreamingScans(seed, n_sources=n_sources)
|
||||||
|
self.last_beat_detected = False
|
||||||
|
self.current_time = 0.0
|
||||||
|
|
||||||
|
def on_frame(self, energy: float, is_beat: bool, t: float = 0.0):
|
||||||
|
"""Called each frame with current audio analysis."""
|
||||||
|
self.current_time = t
|
||||||
|
self.scans.current_time = t
|
||||||
|
# Update scans on beat
|
||||||
|
if is_beat and not self.last_beat_detected:
|
||||||
|
self.scans.on_beat()
|
||||||
|
self.last_beat_detected = is_beat
|
||||||
|
|
||||||
|
def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict:
|
||||||
|
"""
|
||||||
|
Get effect parameters for a source clip.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
source_idx: Which video source (0-3)
|
||||||
|
clip: "a" or "b" (each source has two clips)
|
||||||
|
energy: Current audio energy (0-1)
|
||||||
|
"""
|
||||||
|
suffix = f"_{source_idx}"
|
||||||
|
|
||||||
|
# Rotation ranges alternate
|
||||||
|
if source_idx % 2 == 0:
|
||||||
|
rot_range = [0, 45] if clip == "a" else [0, -45]
|
||||||
|
zoom_range = [1, 1.5] if clip == "a" else [1, 0.5]
|
||||||
|
else:
|
||||||
|
rot_range = [0, -45] if clip == "a" else [0, 45]
|
||||||
|
zoom_range = [1, 0.5] if clip == "a" else [1, 1.5]
|
||||||
|
|
||||||
|
return {
|
||||||
|
"rotate_angle": rot_range[0] + energy * (rot_range[1] - rot_range[0]),
|
||||||
|
"zoom_amount": zoom_range[0] + energy * (zoom_range[1] - zoom_range[0]),
|
||||||
|
"invert_amount": self.scans.get_emit(f"inv_{clip}{suffix}"),
|
||||||
|
"hue_degrees": self.scans.get_emit(f"hue_{clip}{suffix}"),
|
||||||
|
"ascii_mix": 0, # Disabled - too slow without GPU
|
||||||
|
"ascii_char_size": 4 + energy * 28, # 4-32
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_pair_params(self, source_idx: int) -> Dict:
|
||||||
|
"""Get blend and rotation params for a video pair."""
|
||||||
|
suffix = f"_{source_idx}"
|
||||||
|
return {
|
||||||
|
"blend_opacity": self.scans.get_emit(f"pair_mix{suffix}"),
|
||||||
|
"pair_rotation": self.scans.get_emit(f"pair_rot{suffix}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_cycle_weights(self) -> List[float]:
|
||||||
|
"""Get blend weights for cycle-crossfade composition."""
|
||||||
|
cycle_state = self.scans.get_emit("cycle")
|
||||||
|
active = cycle_state["cycle"]
|
||||||
|
beat = cycle_state["beat"]
|
||||||
|
clen = cycle_state["clen"]
|
||||||
|
n = self.n_sources
|
||||||
|
|
||||||
|
phase3 = beat * 3
|
||||||
|
weights = []
|
||||||
|
|
||||||
|
for p in range(n):
|
||||||
|
prev = (p + n - 1) % n
|
||||||
|
|
||||||
|
if active == p:
|
||||||
|
if phase3 < clen:
|
||||||
|
w = 0.9
|
||||||
|
elif phase3 < clen * 2:
|
||||||
|
w = 0.9 - ((phase3 - clen) / clen) * 0.85
|
||||||
|
else:
|
||||||
|
w = 0.05
|
||||||
|
elif active == prev:
|
||||||
|
if phase3 < clen:
|
||||||
|
w = 0.05
|
||||||
|
elif phase3 < clen * 2:
|
||||||
|
w = 0.05 + ((phase3 - clen) / clen) * 0.85
|
||||||
|
else:
|
||||||
|
w = 0.9
|
||||||
|
else:
|
||||||
|
w = 0.05
|
||||||
|
|
||||||
|
weights.append(w)
|
||||||
|
|
||||||
|
# Normalize
|
||||||
|
total = sum(weights)
|
||||||
|
if total > 0:
|
||||||
|
weights = [w / total for w in weights]
|
||||||
|
|
||||||
|
return weights
|
||||||
|
|
||||||
|
def get_cycle_zooms(self) -> List[float]:
|
||||||
|
"""Get zoom amounts for cycle-crossfade."""
|
||||||
|
cycle_state = self.scans.get_emit("cycle")
|
||||||
|
active = cycle_state["cycle"]
|
||||||
|
beat = cycle_state["beat"]
|
||||||
|
clen = cycle_state["clen"]
|
||||||
|
n = self.n_sources
|
||||||
|
|
||||||
|
phase3 = beat * 3
|
||||||
|
zooms = []
|
||||||
|
|
||||||
|
for p in range(n):
|
||||||
|
prev = (p + n - 1) % n
|
||||||
|
|
||||||
|
if active == p:
|
||||||
|
if phase3 < clen:
|
||||||
|
z = 1.0
|
||||||
|
elif phase3 < clen * 2:
|
||||||
|
z = 1.0 + ((phase3 - clen) / clen) * 1.0
|
||||||
|
else:
|
||||||
|
z = 0.1
|
||||||
|
elif active == prev:
|
||||||
|
if phase3 < clen:
|
||||||
|
z = 3.0 # Start big
|
||||||
|
elif phase3 < clen * 2:
|
||||||
|
z = 3.0 - ((phase3 - clen) / clen) * 2.0 # Shrink to 1.0
|
||||||
|
else:
|
||||||
|
z = 1.0
|
||||||
|
else:
|
||||||
|
z = 0.1
|
||||||
|
|
||||||
|
zooms.append(z)
|
||||||
|
|
||||||
|
return zooms
|
||||||
|
|
||||||
|
def get_final_effects(self, energy: float) -> Dict:
|
||||||
|
"""Get final composition effects (whole-spin, ripple)."""
|
||||||
|
ripple_gate = self.scans.get_emit("ripple_gate")
|
||||||
|
ripple_state = self.scans.scans["ripple_gate"].value
|
||||||
|
|
||||||
|
return {
|
||||||
|
"whole_spin_angle": self.scans.get_emit("whole_spin"),
|
||||||
|
"ripple_amplitude": ripple_gate * (5 + energy * 45), # 5-50
|
||||||
|
"ripple_cx": ripple_state["cx"],
|
||||||
|
"ripple_cy": ripple_state["cy"],
|
||||||
|
}
|
||||||
678
streaming/sexp_executor.py
Normal file
678
streaming/sexp_executor.py
Normal file
@@ -0,0 +1,678 @@
|
|||||||
|
"""
|
||||||
|
Streaming S-expression executor.
|
||||||
|
|
||||||
|
Executes compiled sexp recipes in real-time by:
|
||||||
|
- Evaluating scan expressions on each beat
|
||||||
|
- Resolving bindings to get effect parameter values
|
||||||
|
- Applying effects frame-by-frame
|
||||||
|
- Evaluating SLICE_ON Lambda for cycle crossfade
|
||||||
|
"""
|
||||||
|
|
||||||
|
import random
|
||||||
|
import numpy as np
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
from .sexp_interp import SexpInterpreter, eval_slice_on_lambda
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ScanState:
|
||||||
|
"""Runtime state for a scan."""
|
||||||
|
node_id: str
|
||||||
|
name: Optional[str]
|
||||||
|
value: Any
|
||||||
|
rng: random.Random
|
||||||
|
init_expr: dict
|
||||||
|
step_expr: dict
|
||||||
|
emit_expr: dict
|
||||||
|
|
||||||
|
|
||||||
|
class ExprEvaluator:
|
||||||
|
"""
|
||||||
|
Evaluates compiled expression ASTs.
|
||||||
|
|
||||||
|
Expressions are dicts with:
|
||||||
|
- _expr: True (marks as expression)
|
||||||
|
- op: operation name
|
||||||
|
- args: list of arguments
|
||||||
|
- name: for 'var' ops
|
||||||
|
- keys: for 'dict' ops
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, rng: random.Random = None):
|
||||||
|
self.rng = rng or random.Random()
|
||||||
|
|
||||||
|
def eval(self, expr: Any, env: Dict[str, Any]) -> Any:
|
||||||
|
"""Evaluate an expression in the given environment."""
|
||||||
|
# Literal values
|
||||||
|
if not isinstance(expr, dict):
|
||||||
|
return expr
|
||||||
|
|
||||||
|
# Check if it's an expression
|
||||||
|
if not expr.get('_expr'):
|
||||||
|
# It's a plain dict - return as-is
|
||||||
|
return expr
|
||||||
|
|
||||||
|
op = expr.get('op')
|
||||||
|
args = expr.get('args', [])
|
||||||
|
|
||||||
|
# Evaluate based on operation
|
||||||
|
if op == 'var':
|
||||||
|
name = expr.get('name')
|
||||||
|
if name in env:
|
||||||
|
return env[name]
|
||||||
|
raise KeyError(f"Unknown variable: {name}")
|
||||||
|
|
||||||
|
elif op == 'dict':
|
||||||
|
keys = expr.get('keys', [])
|
||||||
|
values = [self.eval(a, env) for a in args]
|
||||||
|
return dict(zip(keys, values))
|
||||||
|
|
||||||
|
elif op == 'get':
|
||||||
|
obj = self.eval(args[0], env)
|
||||||
|
key = args[1]
|
||||||
|
return obj.get(key) if isinstance(obj, dict) else obj[key]
|
||||||
|
|
||||||
|
elif op == 'if':
|
||||||
|
cond = self.eval(args[0], env)
|
||||||
|
if cond:
|
||||||
|
return self.eval(args[1], env)
|
||||||
|
elif len(args) > 2:
|
||||||
|
return self.eval(args[2], env)
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Comparison ops
|
||||||
|
elif op == '<':
|
||||||
|
return self.eval(args[0], env) < self.eval(args[1], env)
|
||||||
|
elif op == '>':
|
||||||
|
return self.eval(args[0], env) > self.eval(args[1], env)
|
||||||
|
elif op == '<=':
|
||||||
|
return self.eval(args[0], env) <= self.eval(args[1], env)
|
||||||
|
elif op == '>=':
|
||||||
|
return self.eval(args[0], env) >= self.eval(args[1], env)
|
||||||
|
elif op == '=':
|
||||||
|
return self.eval(args[0], env) == self.eval(args[1], env)
|
||||||
|
elif op == '!=':
|
||||||
|
return self.eval(args[0], env) != self.eval(args[1], env)
|
||||||
|
|
||||||
|
# Arithmetic ops
|
||||||
|
elif op == '+':
|
||||||
|
return self.eval(args[0], env) + self.eval(args[1], env)
|
||||||
|
elif op == '-':
|
||||||
|
return self.eval(args[0], env) - self.eval(args[1], env)
|
||||||
|
elif op == '*':
|
||||||
|
return self.eval(args[0], env) * self.eval(args[1], env)
|
||||||
|
elif op == '/':
|
||||||
|
return self.eval(args[0], env) / self.eval(args[1], env)
|
||||||
|
elif op == 'mod':
|
||||||
|
return self.eval(args[0], env) % self.eval(args[1], env)
|
||||||
|
|
||||||
|
# Random ops
|
||||||
|
elif op == 'rand':
|
||||||
|
return self.rng.random()
|
||||||
|
elif op == 'rand-int':
|
||||||
|
lo = self.eval(args[0], env)
|
||||||
|
hi = self.eval(args[1], env)
|
||||||
|
return self.rng.randint(lo, hi)
|
||||||
|
elif op == 'rand-range':
|
||||||
|
lo = self.eval(args[0], env)
|
||||||
|
hi = self.eval(args[1], env)
|
||||||
|
return self.rng.uniform(lo, hi)
|
||||||
|
|
||||||
|
# Logic ops
|
||||||
|
elif op == 'and':
|
||||||
|
return all(self.eval(a, env) for a in args)
|
||||||
|
elif op == 'or':
|
||||||
|
return any(self.eval(a, env) for a in args)
|
||||||
|
elif op == 'not':
|
||||||
|
return not self.eval(args[0], env)
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown operation: {op}")
|
||||||
|
|
||||||
|
|
||||||
|
class SexpStreamingExecutor:
|
||||||
|
"""
|
||||||
|
Executes a compiled sexp recipe in streaming mode.
|
||||||
|
|
||||||
|
Reads scan definitions, effect chains, and bindings from the
|
||||||
|
compiled recipe and executes them frame-by-frame.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, compiled_recipe, seed: int = 42):
|
||||||
|
self.recipe = compiled_recipe
|
||||||
|
self.master_seed = seed
|
||||||
|
|
||||||
|
# Build node lookup
|
||||||
|
self.nodes = {n['id']: n for n in compiled_recipe.nodes}
|
||||||
|
|
||||||
|
# State (must be initialized before _init_scans)
|
||||||
|
self.beat_count = 0
|
||||||
|
self.current_time = 0.0
|
||||||
|
self.last_beat_time = 0.0
|
||||||
|
self.last_beat_detected = False
|
||||||
|
self.energy = 0.0
|
||||||
|
|
||||||
|
# Initialize scans
|
||||||
|
self.scans: Dict[str, ScanState] = {}
|
||||||
|
self.scan_outputs: Dict[str, Any] = {} # Current emit values by node_id
|
||||||
|
self._init_scans()
|
||||||
|
|
||||||
|
# Initialize SLICE_ON interpreter
|
||||||
|
self.sexp_interp = SexpInterpreter(random.Random(seed))
|
||||||
|
self._slice_on_lambda = None
|
||||||
|
self._slice_on_acc = None
|
||||||
|
self._slice_on_result = None # Last evaluation result {layers, compose, acc}
|
||||||
|
self._init_slice_on()
|
||||||
|
|
||||||
|
def _init_slice_on(self):
|
||||||
|
"""Initialize SLICE_ON Lambda for cycle crossfade."""
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if node.get('type') == 'SLICE_ON':
|
||||||
|
config = node.get('config', {})
|
||||||
|
self._slice_on_lambda = config.get('fn')
|
||||||
|
init = config.get('init', {})
|
||||||
|
self._slice_on_acc = {
|
||||||
|
'cycle': init.get('cycle', 0),
|
||||||
|
'beat': init.get('beat', 0),
|
||||||
|
'clen': init.get('clen', 60),
|
||||||
|
}
|
||||||
|
# Evaluate initial state
|
||||||
|
self._eval_slice_on()
|
||||||
|
break
|
||||||
|
|
||||||
|
def _eval_slice_on(self):
|
||||||
|
"""Evaluate the SLICE_ON Lambda with current state."""
|
||||||
|
if not self._slice_on_lambda:
|
||||||
|
return
|
||||||
|
|
||||||
|
n = len(self._get_video_sources())
|
||||||
|
videos = list(range(n)) # Placeholder video indices
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = eval_slice_on_lambda(
|
||||||
|
self._slice_on_lambda,
|
||||||
|
self._slice_on_acc,
|
||||||
|
self.beat_count,
|
||||||
|
0.0, # start time (not used for weights)
|
||||||
|
1.0, # end time (not used for weights)
|
||||||
|
videos,
|
||||||
|
self.sexp_interp,
|
||||||
|
)
|
||||||
|
self._slice_on_result = result
|
||||||
|
# Update accumulator for next beat
|
||||||
|
if 'acc' in result:
|
||||||
|
self._slice_on_acc = result['acc']
|
||||||
|
except Exception as e:
|
||||||
|
import sys
|
||||||
|
print(f"SLICE_ON eval error: {e}", file=sys.stderr)
|
||||||
|
|
||||||
|
def _init_scans(self):
|
||||||
|
"""Initialize all scan nodes from the recipe."""
|
||||||
|
seed_offset = 0
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if node.get('type') == 'SCAN':
|
||||||
|
node_id = node['id']
|
||||||
|
config = node.get('config', {})
|
||||||
|
|
||||||
|
# Create RNG with unique seed
|
||||||
|
scan_seed = config.get('seed', self.master_seed + seed_offset)
|
||||||
|
rng = random.Random(scan_seed)
|
||||||
|
seed_offset += 1
|
||||||
|
|
||||||
|
# Evaluate initial value
|
||||||
|
init_expr = config.get('init', 0)
|
||||||
|
evaluator = ExprEvaluator(rng)
|
||||||
|
init_value = evaluator.eval(init_expr, {})
|
||||||
|
|
||||||
|
self.scans[node_id] = ScanState(
|
||||||
|
node_id=node_id,
|
||||||
|
name=node.get('name'),
|
||||||
|
value=init_value,
|
||||||
|
rng=rng,
|
||||||
|
init_expr=init_expr,
|
||||||
|
step_expr=config.get('step_expr', {}),
|
||||||
|
emit_expr=config.get('emit_expr', {}),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Compute initial emit
|
||||||
|
self._update_emit(node_id)
|
||||||
|
|
||||||
|
def _update_emit(self, node_id: str):
|
||||||
|
"""Update the emit value for a scan."""
|
||||||
|
scan = self.scans[node_id]
|
||||||
|
evaluator = ExprEvaluator(scan.rng)
|
||||||
|
|
||||||
|
# Build environment from current state
|
||||||
|
env = self._build_scan_env(scan)
|
||||||
|
|
||||||
|
# Evaluate emit expression
|
||||||
|
emit_value = evaluator.eval(scan.emit_expr, env)
|
||||||
|
self.scan_outputs[node_id] = emit_value
|
||||||
|
|
||||||
|
def _build_scan_env(self, scan: ScanState) -> Dict[str, Any]:
|
||||||
|
"""Build environment for scan expression evaluation."""
|
||||||
|
env = {}
|
||||||
|
|
||||||
|
# Add state variables
|
||||||
|
if isinstance(scan.value, dict):
|
||||||
|
env.update(scan.value)
|
||||||
|
else:
|
||||||
|
env['acc'] = scan.value
|
||||||
|
|
||||||
|
# Add beat count
|
||||||
|
env['beat_count'] = self.beat_count
|
||||||
|
env['time'] = self.current_time
|
||||||
|
|
||||||
|
return env
|
||||||
|
|
||||||
|
def on_beat(self):
|
||||||
|
"""Update all scans on a beat."""
|
||||||
|
self.beat_count += 1
|
||||||
|
|
||||||
|
# Estimate beat interval
|
||||||
|
beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5
|
||||||
|
self.last_beat_time = self.current_time
|
||||||
|
|
||||||
|
# Step each scan
|
||||||
|
for node_id, scan in self.scans.items():
|
||||||
|
evaluator = ExprEvaluator(scan.rng)
|
||||||
|
env = self._build_scan_env(scan)
|
||||||
|
|
||||||
|
# Evaluate step expression
|
||||||
|
new_value = evaluator.eval(scan.step_expr, env)
|
||||||
|
scan.value = new_value
|
||||||
|
|
||||||
|
# Update emit
|
||||||
|
self._update_emit(node_id)
|
||||||
|
|
||||||
|
# Step the cycle state
|
||||||
|
self._step_cycle()
|
||||||
|
|
||||||
|
def on_frame(self, energy: float, is_beat: bool, t: float = 0.0):
|
||||||
|
"""Called each frame with audio analysis."""
|
||||||
|
self.current_time = t
|
||||||
|
self.energy = energy
|
||||||
|
|
||||||
|
# Update scans on beat (edge detection)
|
||||||
|
if is_beat and not self.last_beat_detected:
|
||||||
|
self.on_beat()
|
||||||
|
self.last_beat_detected = is_beat
|
||||||
|
|
||||||
|
def resolve_binding(self, binding: dict) -> Any:
|
||||||
|
"""Resolve a binding to get the current value."""
|
||||||
|
if not isinstance(binding, dict) or not binding.get('_binding'):
|
||||||
|
return binding
|
||||||
|
|
||||||
|
source_id = binding.get('source')
|
||||||
|
feature = binding.get('feature', 'values')
|
||||||
|
range_map = binding.get('range')
|
||||||
|
|
||||||
|
# Get the raw value
|
||||||
|
if source_id in self.scan_outputs:
|
||||||
|
value = self.scan_outputs[source_id]
|
||||||
|
else:
|
||||||
|
# Might be an analyzer reference - use energy as fallback
|
||||||
|
value = self.energy
|
||||||
|
|
||||||
|
# Extract feature if value is a dict
|
||||||
|
if isinstance(value, dict) and feature in value:
|
||||||
|
value = value[feature]
|
||||||
|
|
||||||
|
# Apply range mapping
|
||||||
|
if range_map and isinstance(value, (int, float)):
|
||||||
|
lo, hi = range_map
|
||||||
|
value = lo + value * (hi - lo)
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def get_effect_params(self, effect_node: dict) -> Dict[str, Any]:
|
||||||
|
"""Get resolved parameters for an effect node."""
|
||||||
|
config = effect_node.get('config', {})
|
||||||
|
params = {}
|
||||||
|
|
||||||
|
for key, value in config.items():
|
||||||
|
# Skip internal fields
|
||||||
|
if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', 'analysis_refs'):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Resolve bindings
|
||||||
|
params[key] = self.resolve_binding(value)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def get_scan_value(self, name: str) -> Any:
|
||||||
|
"""Get scan output by name."""
|
||||||
|
for node_id, scan in self.scans.items():
|
||||||
|
if scan.name == name:
|
||||||
|
return self.scan_outputs.get(node_id)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_all_scan_values(self) -> Dict[str, Any]:
|
||||||
|
"""Get all named scan outputs."""
|
||||||
|
result = {}
|
||||||
|
for node_id, scan in self.scans.items():
|
||||||
|
if scan.name:
|
||||||
|
result[scan.name] = self.scan_outputs.get(node_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# === Compositor interface methods ===
|
||||||
|
|
||||||
|
def _get_video_sources(self) -> List[str]:
|
||||||
|
"""Get list of video source node IDs."""
|
||||||
|
sources = []
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if node.get('type') == 'SOURCE':
|
||||||
|
sources.append(node['id'])
|
||||||
|
# Filter to video only (exclude audio - last one is usually audio)
|
||||||
|
# Look at file extensions in the paths
|
||||||
|
return sources[:-1] if len(sources) > 1 else sources
|
||||||
|
|
||||||
|
def _trace_effect_chain(self, start_id: str, stop_at_blend: bool = True) -> List[dict]:
|
||||||
|
"""Trace effect chain from a node, returning effects in order."""
|
||||||
|
chain = []
|
||||||
|
current_id = start_id
|
||||||
|
|
||||||
|
for _ in range(20): # Max depth
|
||||||
|
# Find node that uses current as input
|
||||||
|
next_node = None
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if current_id in node.get('inputs', []):
|
||||||
|
if node.get('type') == 'EFFECT':
|
||||||
|
effect_type = node.get('config', {}).get('effect')
|
||||||
|
chain.append(node)
|
||||||
|
if stop_at_blend and effect_type == 'blend':
|
||||||
|
return chain
|
||||||
|
next_node = node
|
||||||
|
break
|
||||||
|
elif node.get('type') == 'SEGMENT':
|
||||||
|
next_node = node
|
||||||
|
break
|
||||||
|
|
||||||
|
if next_node is None:
|
||||||
|
break
|
||||||
|
current_id = next_node['id']
|
||||||
|
|
||||||
|
return chain
|
||||||
|
|
||||||
|
def _find_clip_chains(self, source_idx: int) -> tuple:
|
||||||
|
"""Find effect chains for clip A and B from a source."""
|
||||||
|
sources = self._get_video_sources()
|
||||||
|
if source_idx >= len(sources):
|
||||||
|
return [], []
|
||||||
|
|
||||||
|
source_id = sources[source_idx]
|
||||||
|
|
||||||
|
# Find SEGMENT node
|
||||||
|
segment_id = None
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if node.get('type') == 'SEGMENT' and source_id in node.get('inputs', []):
|
||||||
|
segment_id = node['id']
|
||||||
|
break
|
||||||
|
|
||||||
|
if not segment_id:
|
||||||
|
return [], []
|
||||||
|
|
||||||
|
# Find the two effect chains from segment (clip A and clip B)
|
||||||
|
chains = []
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if segment_id in node.get('inputs', []) and node.get('type') == 'EFFECT':
|
||||||
|
chain = self._trace_effect_chain(segment_id)
|
||||||
|
# Get chain starting from this specific branch
|
||||||
|
branch_chain = [node]
|
||||||
|
current = node['id']
|
||||||
|
for _ in range(10):
|
||||||
|
found = False
|
||||||
|
for n in self.recipe.nodes:
|
||||||
|
if current in n.get('inputs', []) and n.get('type') == 'EFFECT':
|
||||||
|
branch_chain.append(n)
|
||||||
|
if n.get('config', {}).get('effect') == 'blend':
|
||||||
|
break
|
||||||
|
current = n['id']
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
if not found:
|
||||||
|
break
|
||||||
|
chains.append(branch_chain)
|
||||||
|
|
||||||
|
# Return first two chains as A and B
|
||||||
|
chain_a = chains[0] if len(chains) > 0 else []
|
||||||
|
chain_b = chains[1] if len(chains) > 1 else []
|
||||||
|
return chain_a, chain_b
|
||||||
|
|
||||||
|
def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict:
|
||||||
|
"""Get effect parameters for a source clip (compositor interface)."""
|
||||||
|
# Get the correct chain for this clip
|
||||||
|
chain_a, chain_b = self._find_clip_chains(source_idx)
|
||||||
|
chain = chain_a if clip == 'a' else chain_b
|
||||||
|
|
||||||
|
# Default params
|
||||||
|
params = {
|
||||||
|
"rotate_angle": 0,
|
||||||
|
"zoom_amount": 1.0,
|
||||||
|
"invert_amount": 0,
|
||||||
|
"hue_degrees": 0,
|
||||||
|
"ascii_mix": 0,
|
||||||
|
"ascii_char_size": 8,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Resolve from effects in chain
|
||||||
|
for eff in chain:
|
||||||
|
config = eff.get('config', {})
|
||||||
|
effect_type = config.get('effect')
|
||||||
|
|
||||||
|
if effect_type == 'rotate':
|
||||||
|
angle_binding = config.get('angle')
|
||||||
|
if angle_binding:
|
||||||
|
if isinstance(angle_binding, dict) and angle_binding.get('_binding'):
|
||||||
|
# Bound to analyzer - use energy with range
|
||||||
|
range_map = angle_binding.get('range')
|
||||||
|
if range_map:
|
||||||
|
lo, hi = range_map
|
||||||
|
params["rotate_angle"] = lo + energy * (hi - lo)
|
||||||
|
else:
|
||||||
|
params["rotate_angle"] = self.resolve_binding(angle_binding)
|
||||||
|
else:
|
||||||
|
params["rotate_angle"] = angle_binding if isinstance(angle_binding, (int, float)) else 0
|
||||||
|
|
||||||
|
elif effect_type == 'zoom':
|
||||||
|
amount_binding = config.get('amount')
|
||||||
|
if amount_binding:
|
||||||
|
if isinstance(amount_binding, dict) and amount_binding.get('_binding'):
|
||||||
|
range_map = amount_binding.get('range')
|
||||||
|
if range_map:
|
||||||
|
lo, hi = range_map
|
||||||
|
params["zoom_amount"] = lo + energy * (hi - lo)
|
||||||
|
else:
|
||||||
|
params["zoom_amount"] = self.resolve_binding(amount_binding)
|
||||||
|
else:
|
||||||
|
params["zoom_amount"] = amount_binding if isinstance(amount_binding, (int, float)) else 1.0
|
||||||
|
|
||||||
|
elif effect_type == 'invert':
|
||||||
|
amount_binding = config.get('amount')
|
||||||
|
if amount_binding:
|
||||||
|
val = self.resolve_binding(amount_binding)
|
||||||
|
params["invert_amount"] = val if isinstance(val, (int, float)) else 0
|
||||||
|
|
||||||
|
elif effect_type == 'hue_shift':
|
||||||
|
deg_binding = config.get('degrees')
|
||||||
|
if deg_binding:
|
||||||
|
val = self.resolve_binding(deg_binding)
|
||||||
|
params["hue_degrees"] = val if isinstance(val, (int, float)) else 0
|
||||||
|
|
||||||
|
elif effect_type == 'ascii_art':
|
||||||
|
mix_binding = config.get('mix')
|
||||||
|
if mix_binding:
|
||||||
|
val = self.resolve_binding(mix_binding)
|
||||||
|
params["ascii_mix"] = val if isinstance(val, (int, float)) else 0
|
||||||
|
size_binding = config.get('char_size')
|
||||||
|
if size_binding:
|
||||||
|
if isinstance(size_binding, dict) and size_binding.get('_binding'):
|
||||||
|
range_map = size_binding.get('range')
|
||||||
|
if range_map:
|
||||||
|
lo, hi = range_map
|
||||||
|
params["ascii_char_size"] = lo + energy * (hi - lo)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def get_pair_params(self, source_idx: int) -> Dict:
|
||||||
|
"""Get blend and rotation params for a video pair (compositor interface)."""
|
||||||
|
params = {
|
||||||
|
"blend_opacity": 0.5,
|
||||||
|
"pair_rotation": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Find the blend node for this source
|
||||||
|
chain_a, _ = self._find_clip_chains(source_idx)
|
||||||
|
|
||||||
|
# The last effect in chain_a should be the blend
|
||||||
|
blend_node = None
|
||||||
|
for eff in reversed(chain_a):
|
||||||
|
if eff.get('config', {}).get('effect') == 'blend':
|
||||||
|
blend_node = eff
|
||||||
|
break
|
||||||
|
|
||||||
|
if blend_node:
|
||||||
|
config = blend_node.get('config', {})
|
||||||
|
opacity_binding = config.get('opacity')
|
||||||
|
if opacity_binding:
|
||||||
|
val = self.resolve_binding(opacity_binding)
|
||||||
|
if isinstance(val, (int, float)):
|
||||||
|
params["blend_opacity"] = val
|
||||||
|
|
||||||
|
# Find rotate after blend (pair rotation)
|
||||||
|
blend_id = blend_node['id']
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if blend_id in node.get('inputs', []) and node.get('type') == 'EFFECT':
|
||||||
|
if node.get('config', {}).get('effect') == 'rotate':
|
||||||
|
angle_binding = node.get('config', {}).get('angle')
|
||||||
|
if angle_binding:
|
||||||
|
val = self.resolve_binding(angle_binding)
|
||||||
|
if isinstance(val, (int, float)):
|
||||||
|
params["pair_rotation"] = val
|
||||||
|
break
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
def _get_cycle_state(self) -> dict:
|
||||||
|
"""Get current cycle state from SLICE_ON or internal tracking."""
|
||||||
|
if not hasattr(self, '_cycle_state'):
|
||||||
|
# Initialize from SLICE_ON node
|
||||||
|
for node in self.recipe.nodes:
|
||||||
|
if node.get('type') == 'SLICE_ON':
|
||||||
|
init = node.get('config', {}).get('init', {})
|
||||||
|
self._cycle_state = {
|
||||||
|
'cycle': init.get('cycle', 0),
|
||||||
|
'beat': init.get('beat', 0),
|
||||||
|
'clen': init.get('clen', 60),
|
||||||
|
}
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
self._cycle_state = {'cycle': 0, 'beat': 0, 'clen': 60}
|
||||||
|
|
||||||
|
return self._cycle_state
|
||||||
|
|
||||||
|
def _step_cycle(self):
|
||||||
|
"""Step the cycle state forward on beat by evaluating SLICE_ON Lambda."""
|
||||||
|
# Use interpreter to evaluate the Lambda
|
||||||
|
self._eval_slice_on()
|
||||||
|
|
||||||
|
def get_cycle_weights(self) -> List[float]:
|
||||||
|
"""Get blend weights for cycle-crossfade from SLICE_ON result."""
|
||||||
|
n = len(self._get_video_sources())
|
||||||
|
if n == 0:
|
||||||
|
return [1.0]
|
||||||
|
|
||||||
|
# Get weights from interpreted result
|
||||||
|
if self._slice_on_result:
|
||||||
|
compose = self._slice_on_result.get('compose', {})
|
||||||
|
weights = compose.get('weights', [])
|
||||||
|
if weights and len(weights) == n:
|
||||||
|
# Normalize
|
||||||
|
total = sum(weights)
|
||||||
|
if total > 0:
|
||||||
|
return [w / total for w in weights]
|
||||||
|
|
||||||
|
# Fallback: equal weights
|
||||||
|
return [1.0 / n] * n
|
||||||
|
|
||||||
|
def get_cycle_zooms(self) -> List[float]:
|
||||||
|
"""Get zoom amounts for cycle-crossfade from SLICE_ON result."""
|
||||||
|
n = len(self._get_video_sources())
|
||||||
|
if n == 0:
|
||||||
|
return [1.0]
|
||||||
|
|
||||||
|
# Get zooms from interpreted result (layers -> effects -> zoom amount)
|
||||||
|
if self._slice_on_result:
|
||||||
|
layers = self._slice_on_result.get('layers', [])
|
||||||
|
if layers and len(layers) == n:
|
||||||
|
zooms = []
|
||||||
|
for layer in layers:
|
||||||
|
effects = layer.get('effects', [])
|
||||||
|
zoom_amt = 1.0
|
||||||
|
for eff in effects:
|
||||||
|
if eff.get('effect') == 'zoom' or (hasattr(eff.get('effect'), 'name') and eff.get('effect').name == 'zoom'):
|
||||||
|
zoom_amt = eff.get('amount', 1.0)
|
||||||
|
break
|
||||||
|
zooms.append(zoom_amt)
|
||||||
|
return zooms
|
||||||
|
|
||||||
|
# Fallback
|
||||||
|
return [1.0] * n
|
||||||
|
|
||||||
|
def _get_final_rotate_scan_id(self) -> str:
|
||||||
|
"""Find the scan ID that drives the final rotation (after SLICE_ON)."""
|
||||||
|
if hasattr(self, '_final_rotate_scan_id'):
|
||||||
|
return self._final_rotate_scan_id
|
||||||
|
|
||||||
|
# Find SLICE_ON node index
|
||||||
|
slice_on_idx = None
|
||||||
|
for i, node in enumerate(self.recipe.nodes):
|
||||||
|
if node.get('type') == 'SLICE_ON':
|
||||||
|
slice_on_idx = i
|
||||||
|
break
|
||||||
|
|
||||||
|
# Find rotate effect after SLICE_ON
|
||||||
|
if slice_on_idx is not None:
|
||||||
|
for node in self.recipe.nodes[slice_on_idx + 1:]:
|
||||||
|
if node.get('type') == 'EFFECT':
|
||||||
|
config = node.get('config', {})
|
||||||
|
if config.get('effect') == 'rotate':
|
||||||
|
angle_binding = config.get('angle', {})
|
||||||
|
if isinstance(angle_binding, dict) and angle_binding.get('_binding'):
|
||||||
|
self._final_rotate_scan_id = angle_binding.get('source')
|
||||||
|
return self._final_rotate_scan_id
|
||||||
|
|
||||||
|
self._final_rotate_scan_id = None
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_final_effects(self, energy: float) -> Dict:
|
||||||
|
"""Get final composition effects (compositor interface)."""
|
||||||
|
# Get named scans
|
||||||
|
scan_values = self.get_all_scan_values()
|
||||||
|
|
||||||
|
# Whole spin - get from the specific scan bound to final rotate effect
|
||||||
|
whole_spin = 0
|
||||||
|
final_rotate_scan_id = self._get_final_rotate_scan_id()
|
||||||
|
if final_rotate_scan_id and final_rotate_scan_id in self.scan_outputs:
|
||||||
|
val = self.scan_outputs[final_rotate_scan_id]
|
||||||
|
if isinstance(val, dict) and 'angle' in val:
|
||||||
|
whole_spin = val['angle']
|
||||||
|
elif isinstance(val, (int, float)):
|
||||||
|
whole_spin = val
|
||||||
|
|
||||||
|
# Ripple
|
||||||
|
ripple_gate = scan_values.get('ripple-gate', 0)
|
||||||
|
ripple_cx = scan_values.get('ripple-cx', 0.5)
|
||||||
|
ripple_cy = scan_values.get('ripple-cy', 0.5)
|
||||||
|
|
||||||
|
if isinstance(ripple_gate, dict):
|
||||||
|
ripple_gate = ripple_gate.get('gate', 0) if 'gate' in ripple_gate else 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"whole_spin_angle": whole_spin,
|
||||||
|
"ripple_amplitude": ripple_gate * (5 + energy * 45),
|
||||||
|
"ripple_cx": ripple_cx if isinstance(ripple_cx, (int, float)) else 0.5,
|
||||||
|
"ripple_cy": ripple_cy if isinstance(ripple_cy, (int, float)) else 0.5,
|
||||||
|
}
|
||||||
376
streaming/sexp_interp.py
Normal file
376
streaming/sexp_interp.py
Normal file
@@ -0,0 +1,376 @@
|
|||||||
|
"""
|
||||||
|
S-expression interpreter for streaming execution.
|
||||||
|
|
||||||
|
Evaluates sexp expressions including:
|
||||||
|
- let bindings
|
||||||
|
- lambda definitions and calls
|
||||||
|
- Arithmetic, comparison, logic operators
|
||||||
|
- dict/list operations
|
||||||
|
- Random number generation
|
||||||
|
"""
|
||||||
|
|
||||||
|
import random
|
||||||
|
from typing import Any, Dict, List, Callable
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Lambda:
|
||||||
|
"""Runtime lambda value."""
|
||||||
|
params: List[str]
|
||||||
|
body: Any
|
||||||
|
closure: Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
class Symbol:
|
||||||
|
"""Symbol reference."""
|
||||||
|
def __init__(self, name: str):
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"Symbol({self.name})"
|
||||||
|
|
||||||
|
|
||||||
|
class SexpInterpreter:
|
||||||
|
"""
|
||||||
|
Interprets S-expressions in real-time.
|
||||||
|
|
||||||
|
Handles the full sexp language used in recipes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, rng: random.Random = None):
|
||||||
|
self.rng = rng or random.Random()
|
||||||
|
self.globals: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
def eval(self, expr: Any, env: Dict[str, Any] = None) -> Any:
|
||||||
|
"""Evaluate an expression in the given environment."""
|
||||||
|
if env is None:
|
||||||
|
env = {}
|
||||||
|
|
||||||
|
# Literals
|
||||||
|
if isinstance(expr, (int, float, str, bool)) or expr is None:
|
||||||
|
return expr
|
||||||
|
|
||||||
|
# Symbol lookup
|
||||||
|
if isinstance(expr, Symbol) or (hasattr(expr, 'name') and hasattr(expr, '__class__') and expr.__class__.__name__ == 'Symbol'):
|
||||||
|
name = expr.name if hasattr(expr, 'name') else str(expr)
|
||||||
|
if name in env:
|
||||||
|
return env[name]
|
||||||
|
if name in self.globals:
|
||||||
|
return self.globals[name]
|
||||||
|
raise NameError(f"Undefined symbol: {name}")
|
||||||
|
|
||||||
|
# Compiled expression dict (from compiler)
|
||||||
|
if isinstance(expr, dict):
|
||||||
|
if expr.get('_expr'):
|
||||||
|
return self._eval_compiled_expr(expr, env)
|
||||||
|
# Plain dict - evaluate values that might be expressions
|
||||||
|
result = {}
|
||||||
|
for k, v in expr.items():
|
||||||
|
# Some keys should keep Symbol values as strings (effect names, modes)
|
||||||
|
if k in ('effect', 'mode') and hasattr(v, 'name'):
|
||||||
|
result[k] = v.name
|
||||||
|
else:
|
||||||
|
result[k] = self.eval(v, env)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# List expression (sexp)
|
||||||
|
if isinstance(expr, (list, tuple)) and len(expr) > 0:
|
||||||
|
return self._eval_list(expr, env)
|
||||||
|
|
||||||
|
# Empty list
|
||||||
|
if isinstance(expr, (list, tuple)):
|
||||||
|
return []
|
||||||
|
|
||||||
|
return expr
|
||||||
|
|
||||||
|
def _eval_compiled_expr(self, expr: dict, env: Dict[str, Any]) -> Any:
|
||||||
|
"""Evaluate a compiled expression dict."""
|
||||||
|
op = expr.get('op')
|
||||||
|
args = expr.get('args', [])
|
||||||
|
|
||||||
|
if op == 'var':
|
||||||
|
name = expr.get('name')
|
||||||
|
if name in env:
|
||||||
|
return env[name]
|
||||||
|
if name in self.globals:
|
||||||
|
return self.globals[name]
|
||||||
|
raise NameError(f"Undefined: {name}")
|
||||||
|
|
||||||
|
elif op == 'dict':
|
||||||
|
keys = expr.get('keys', [])
|
||||||
|
values = [self.eval(a, env) for a in args]
|
||||||
|
return dict(zip(keys, values))
|
||||||
|
|
||||||
|
elif op == 'get':
|
||||||
|
obj = self.eval(args[0], env)
|
||||||
|
key = args[1]
|
||||||
|
return obj.get(key) if isinstance(obj, dict) else obj[key]
|
||||||
|
|
||||||
|
elif op == 'if':
|
||||||
|
cond = self.eval(args[0], env)
|
||||||
|
if cond:
|
||||||
|
return self.eval(args[1], env)
|
||||||
|
elif len(args) > 2:
|
||||||
|
return self.eval(args[2], env)
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Comparison
|
||||||
|
elif op == '<':
|
||||||
|
return self.eval(args[0], env) < self.eval(args[1], env)
|
||||||
|
elif op == '>':
|
||||||
|
return self.eval(args[0], env) > self.eval(args[1], env)
|
||||||
|
elif op == '<=':
|
||||||
|
return self.eval(args[0], env) <= self.eval(args[1], env)
|
||||||
|
elif op == '>=':
|
||||||
|
return self.eval(args[0], env) >= self.eval(args[1], env)
|
||||||
|
elif op == '=':
|
||||||
|
return self.eval(args[0], env) == self.eval(args[1], env)
|
||||||
|
elif op == '!=':
|
||||||
|
return self.eval(args[0], env) != self.eval(args[1], env)
|
||||||
|
|
||||||
|
# Arithmetic
|
||||||
|
elif op == '+':
|
||||||
|
return self.eval(args[0], env) + self.eval(args[1], env)
|
||||||
|
elif op == '-':
|
||||||
|
return self.eval(args[0], env) - self.eval(args[1], env)
|
||||||
|
elif op == '*':
|
||||||
|
return self.eval(args[0], env) * self.eval(args[1], env)
|
||||||
|
elif op == '/':
|
||||||
|
return self.eval(args[0], env) / self.eval(args[1], env)
|
||||||
|
elif op == 'mod':
|
||||||
|
return self.eval(args[0], env) % self.eval(args[1], env)
|
||||||
|
|
||||||
|
# Random
|
||||||
|
elif op == 'rand':
|
||||||
|
return self.rng.random()
|
||||||
|
elif op == 'rand-int':
|
||||||
|
return self.rng.randint(self.eval(args[0], env), self.eval(args[1], env))
|
||||||
|
elif op == 'rand-range':
|
||||||
|
return self.rng.uniform(self.eval(args[0], env), self.eval(args[1], env))
|
||||||
|
|
||||||
|
# Logic
|
||||||
|
elif op == 'and':
|
||||||
|
return all(self.eval(a, env) for a in args)
|
||||||
|
elif op == 'or':
|
||||||
|
return any(self.eval(a, env) for a in args)
|
||||||
|
elif op == 'not':
|
||||||
|
return not self.eval(args[0], env)
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unknown op: {op}")
|
||||||
|
|
||||||
|
def _eval_list(self, expr: list, env: Dict[str, Any]) -> Any:
|
||||||
|
"""Evaluate a list expression (sexp form)."""
|
||||||
|
if len(expr) == 0:
|
||||||
|
return []
|
||||||
|
|
||||||
|
head = expr[0]
|
||||||
|
|
||||||
|
# Get head name
|
||||||
|
if isinstance(head, Symbol) or (hasattr(head, 'name') and hasattr(head, '__class__')):
|
||||||
|
head_name = head.name if hasattr(head, 'name') else str(head)
|
||||||
|
elif isinstance(head, str):
|
||||||
|
head_name = head
|
||||||
|
else:
|
||||||
|
# Not a symbol - check if it's a data list or function call
|
||||||
|
if isinstance(head, dict):
|
||||||
|
# List of dicts - evaluate each element as data
|
||||||
|
return [self.eval(item, env) for item in expr]
|
||||||
|
# Otherwise evaluate as function call
|
||||||
|
fn = self.eval(head, env)
|
||||||
|
args = [self.eval(a, env) for a in expr[1:]]
|
||||||
|
return self._call(fn, args, env)
|
||||||
|
|
||||||
|
# Special forms
|
||||||
|
if head_name == 'let':
|
||||||
|
return self._eval_let(expr, env)
|
||||||
|
elif head_name in ('lambda', 'fn'):
|
||||||
|
return self._eval_lambda(expr, env)
|
||||||
|
elif head_name == 'if':
|
||||||
|
return self._eval_if(expr, env)
|
||||||
|
elif head_name == 'dict':
|
||||||
|
return self._eval_dict(expr, env)
|
||||||
|
elif head_name == 'get':
|
||||||
|
obj = self.eval(expr[1], env)
|
||||||
|
key = self.eval(expr[2], env) if len(expr) > 2 else expr[2]
|
||||||
|
if isinstance(key, str):
|
||||||
|
return obj.get(key) if isinstance(obj, dict) else getattr(obj, key, None)
|
||||||
|
return obj[key]
|
||||||
|
elif head_name == 'len':
|
||||||
|
return len(self.eval(expr[1], env))
|
||||||
|
elif head_name == 'range':
|
||||||
|
start = self.eval(expr[1], env)
|
||||||
|
end = self.eval(expr[2], env) if len(expr) > 2 else start
|
||||||
|
if len(expr) == 2:
|
||||||
|
return list(range(end))
|
||||||
|
return list(range(start, end))
|
||||||
|
elif head_name == 'map':
|
||||||
|
fn = self.eval(expr[1], env)
|
||||||
|
lst = self.eval(expr[2], env)
|
||||||
|
return [self._call(fn, [x], env) for x in lst]
|
||||||
|
elif head_name == 'mod':
|
||||||
|
return self.eval(expr[1], env) % self.eval(expr[2], env)
|
||||||
|
|
||||||
|
# Arithmetic
|
||||||
|
elif head_name == '+':
|
||||||
|
return self.eval(expr[1], env) + self.eval(expr[2], env)
|
||||||
|
elif head_name == '-':
|
||||||
|
if len(expr) == 2:
|
||||||
|
return -self.eval(expr[1], env)
|
||||||
|
return self.eval(expr[1], env) - self.eval(expr[2], env)
|
||||||
|
elif head_name == '*':
|
||||||
|
return self.eval(expr[1], env) * self.eval(expr[2], env)
|
||||||
|
elif head_name == '/':
|
||||||
|
return self.eval(expr[1], env) / self.eval(expr[2], env)
|
||||||
|
|
||||||
|
# Comparison
|
||||||
|
elif head_name == '<':
|
||||||
|
return self.eval(expr[1], env) < self.eval(expr[2], env)
|
||||||
|
elif head_name == '>':
|
||||||
|
return self.eval(expr[1], env) > self.eval(expr[2], env)
|
||||||
|
elif head_name == '<=':
|
||||||
|
return self.eval(expr[1], env) <= self.eval(expr[2], env)
|
||||||
|
elif head_name == '>=':
|
||||||
|
return self.eval(expr[1], env) >= self.eval(expr[2], env)
|
||||||
|
elif head_name == '=':
|
||||||
|
return self.eval(expr[1], env) == self.eval(expr[2], env)
|
||||||
|
|
||||||
|
# Logic
|
||||||
|
elif head_name == 'and':
|
||||||
|
return all(self.eval(a, env) for a in expr[1:])
|
||||||
|
elif head_name == 'or':
|
||||||
|
return any(self.eval(a, env) for a in expr[1:])
|
||||||
|
elif head_name == 'not':
|
||||||
|
return not self.eval(expr[1], env)
|
||||||
|
|
||||||
|
# Function call
|
||||||
|
else:
|
||||||
|
fn = env.get(head_name) or self.globals.get(head_name)
|
||||||
|
if fn is None:
|
||||||
|
raise NameError(f"Undefined function: {head_name}")
|
||||||
|
args = [self.eval(a, env) for a in expr[1:]]
|
||||||
|
return self._call(fn, args, env)
|
||||||
|
|
||||||
|
def _eval_let(self, expr: list, env: Dict[str, Any]) -> Any:
|
||||||
|
"""Evaluate (let [bindings...] body)."""
|
||||||
|
bindings = expr[1]
|
||||||
|
body = expr[2]
|
||||||
|
|
||||||
|
# Create new environment with bindings
|
||||||
|
new_env = dict(env)
|
||||||
|
|
||||||
|
# Process bindings in pairs
|
||||||
|
i = 0
|
||||||
|
while i < len(bindings):
|
||||||
|
name = bindings[i]
|
||||||
|
if isinstance(name, Symbol) or hasattr(name, 'name'):
|
||||||
|
name = name.name if hasattr(name, 'name') else str(name)
|
||||||
|
value = self.eval(bindings[i + 1], new_env)
|
||||||
|
new_env[name] = value
|
||||||
|
i += 2
|
||||||
|
|
||||||
|
return self.eval(body, new_env)
|
||||||
|
|
||||||
|
def _eval_lambda(self, expr: list, env: Dict[str, Any]) -> Lambda:
|
||||||
|
"""Evaluate (lambda [params] body)."""
|
||||||
|
params_expr = expr[1]
|
||||||
|
body = expr[2]
|
||||||
|
|
||||||
|
# Extract parameter names
|
||||||
|
params = []
|
||||||
|
for p in params_expr:
|
||||||
|
if isinstance(p, Symbol) or hasattr(p, 'name'):
|
||||||
|
params.append(p.name if hasattr(p, 'name') else str(p))
|
||||||
|
else:
|
||||||
|
params.append(str(p))
|
||||||
|
|
||||||
|
return Lambda(params=params, body=body, closure=dict(env))
|
||||||
|
|
||||||
|
def _eval_if(self, expr: list, env: Dict[str, Any]) -> Any:
|
||||||
|
"""Evaluate (if cond then else)."""
|
||||||
|
cond = self.eval(expr[1], env)
|
||||||
|
if cond:
|
||||||
|
return self.eval(expr[2], env)
|
||||||
|
elif len(expr) > 3:
|
||||||
|
return self.eval(expr[3], env)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _eval_dict(self, expr: list, env: Dict[str, Any]) -> dict:
|
||||||
|
"""Evaluate (dict :key val ...)."""
|
||||||
|
result = {}
|
||||||
|
i = 1
|
||||||
|
while i < len(expr):
|
||||||
|
key = expr[i]
|
||||||
|
# Handle keyword syntax (:key) and Keyword objects
|
||||||
|
if hasattr(key, 'name'):
|
||||||
|
key = key.name
|
||||||
|
elif hasattr(key, '__class__') and key.__class__.__name__ == 'Keyword':
|
||||||
|
key = str(key).lstrip(':')
|
||||||
|
elif isinstance(key, str) and key.startswith(':'):
|
||||||
|
key = key[1:]
|
||||||
|
value = self.eval(expr[i + 1], env)
|
||||||
|
result[key] = value
|
||||||
|
i += 2
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _call(self, fn: Any, args: List[Any], env: Dict[str, Any]) -> Any:
|
||||||
|
"""Call a function with arguments."""
|
||||||
|
if isinstance(fn, Lambda):
|
||||||
|
# Our own Lambda type
|
||||||
|
call_env = dict(fn.closure)
|
||||||
|
for param, arg in zip(fn.params, args):
|
||||||
|
call_env[param] = arg
|
||||||
|
return self.eval(fn.body, call_env)
|
||||||
|
elif hasattr(fn, 'params') and hasattr(fn, 'body'):
|
||||||
|
# Lambda from parser (artdag.sexp.parser.Lambda)
|
||||||
|
call_env = dict(env)
|
||||||
|
if hasattr(fn, 'closure') and fn.closure:
|
||||||
|
call_env.update(fn.closure)
|
||||||
|
# Get param names
|
||||||
|
params = []
|
||||||
|
for p in fn.params:
|
||||||
|
if hasattr(p, 'name'):
|
||||||
|
params.append(p.name)
|
||||||
|
else:
|
||||||
|
params.append(str(p))
|
||||||
|
for param, arg in zip(params, args):
|
||||||
|
call_env[param] = arg
|
||||||
|
return self.eval(fn.body, call_env)
|
||||||
|
elif callable(fn):
|
||||||
|
return fn(*args)
|
||||||
|
else:
|
||||||
|
raise TypeError(f"Not callable: {type(fn).__name__}")
|
||||||
|
|
||||||
|
|
||||||
|
def eval_slice_on_lambda(lambda_obj, acc: dict, i: int, start: float, end: float,
|
||||||
|
videos: list, interp: SexpInterpreter = None) -> dict:
|
||||||
|
"""
|
||||||
|
Evaluate a SLICE_ON lambda function.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lambda_obj: The Lambda object from the compiled recipe
|
||||||
|
acc: Current accumulator state
|
||||||
|
i: Beat index
|
||||||
|
start: Slice start time
|
||||||
|
end: Slice end time
|
||||||
|
videos: List of video inputs
|
||||||
|
interp: Interpreter to use
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with 'layers', 'compose', 'acc' keys
|
||||||
|
"""
|
||||||
|
if interp is None:
|
||||||
|
interp = SexpInterpreter()
|
||||||
|
|
||||||
|
# Set up global 'videos' for (len videos) to work
|
||||||
|
interp.globals['videos'] = videos
|
||||||
|
|
||||||
|
# Build initial environment with lambda parameters
|
||||||
|
env = dict(lambda_obj.closure) if hasattr(lambda_obj, 'closure') and lambda_obj.closure else {}
|
||||||
|
env['videos'] = videos
|
||||||
|
|
||||||
|
# Call the lambda
|
||||||
|
result = interp._call(lambda_obj, [acc, i, start, end], env)
|
||||||
|
|
||||||
|
return result
|
||||||
281
streaming/sources.py
Normal file
281
streaming/sources.py
Normal file
@@ -0,0 +1,281 @@
|
|||||||
|
"""
|
||||||
|
Video and image sources with looping support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional, Tuple
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class Source(ABC):
|
||||||
|
"""Abstract base class for frame sources."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def read_frame(self, t: float) -> np.ndarray:
|
||||||
|
"""Read frame at time t (with looping if needed)."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def duration(self) -> float:
|
||||||
|
"""Source duration in seconds."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def size(self) -> Tuple[int, int]:
|
||||||
|
"""Frame size as (width, height)."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def fps(self) -> float:
|
||||||
|
"""Frames per second."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class VideoSource(Source):
|
||||||
|
"""
|
||||||
|
Video file source with automatic looping.
|
||||||
|
|
||||||
|
Reads frames on-demand, seeking as needed. When time exceeds
|
||||||
|
duration, wraps around (loops).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, path: str, target_fps: float = 30):
|
||||||
|
self.path = Path(path)
|
||||||
|
self.target_fps = target_fps
|
||||||
|
|
||||||
|
# Initialize decode state first (before _probe which could fail)
|
||||||
|
self._process: Optional[subprocess.Popen] = None
|
||||||
|
self._current_start: Optional[float] = None
|
||||||
|
self._frame_buffer: Optional[np.ndarray] = None
|
||||||
|
self._buffer_time: Optional[float] = None
|
||||||
|
|
||||||
|
self._duration = None
|
||||||
|
self._size = None
|
||||||
|
self._fps = None
|
||||||
|
|
||||||
|
if not self.path.exists():
|
||||||
|
raise FileNotFoundError(f"Video not found: {path}")
|
||||||
|
|
||||||
|
self._probe()
|
||||||
|
|
||||||
|
def _probe(self):
|
||||||
|
"""Get video metadata."""
|
||||||
|
cmd = [
|
||||||
|
"ffprobe", "-v", "quiet",
|
||||||
|
"-print_format", "json",
|
||||||
|
"-show_format", "-show_streams",
|
||||||
|
str(self.path)
|
||||||
|
]
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
data = json.loads(result.stdout)
|
||||||
|
|
||||||
|
# Get duration
|
||||||
|
self._duration = float(data["format"]["duration"])
|
||||||
|
|
||||||
|
# Get video stream info
|
||||||
|
for stream in data["streams"]:
|
||||||
|
if stream["codec_type"] == "video":
|
||||||
|
self._size = (int(stream["width"]), int(stream["height"]))
|
||||||
|
# Parse fps from r_frame_rate (e.g., "30/1" or "30000/1001")
|
||||||
|
fps_parts = stream.get("r_frame_rate", "30/1").split("/")
|
||||||
|
self._fps = float(fps_parts[0]) / float(fps_parts[1])
|
||||||
|
break
|
||||||
|
|
||||||
|
@property
|
||||||
|
def duration(self) -> float:
|
||||||
|
return self._duration
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> Tuple[int, int]:
|
||||||
|
return self._size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fps(self) -> float:
|
||||||
|
return self._fps
|
||||||
|
|
||||||
|
def _start_decode(self, start_time: float):
|
||||||
|
"""Start ffmpeg decode process from given time."""
|
||||||
|
if self._process:
|
||||||
|
try:
|
||||||
|
self._process.stdout.close()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
self._process.terminate()
|
||||||
|
try:
|
||||||
|
self._process.wait(timeout=1)
|
||||||
|
except:
|
||||||
|
self._process.kill()
|
||||||
|
self._process.wait()
|
||||||
|
|
||||||
|
w, h = self._size
|
||||||
|
cmd = [
|
||||||
|
"ffmpeg", "-v", "quiet",
|
||||||
|
"-ss", str(start_time),
|
||||||
|
"-i", str(self.path),
|
||||||
|
"-f", "rawvideo",
|
||||||
|
"-pix_fmt", "rgb24",
|
||||||
|
"-r", str(self.target_fps),
|
||||||
|
"-"
|
||||||
|
]
|
||||||
|
self._process = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.DEVNULL,
|
||||||
|
bufsize=w * h * 3 * 4, # Buffer a few frames
|
||||||
|
)
|
||||||
|
self._current_start = start_time
|
||||||
|
self._buffer_time = start_time
|
||||||
|
|
||||||
|
def read_frame(self, t: float) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Read frame at time t.
|
||||||
|
|
||||||
|
If t exceeds duration, wraps around (loops).
|
||||||
|
Seeks if needed, otherwise reads sequentially.
|
||||||
|
"""
|
||||||
|
# Wrap time for looping
|
||||||
|
t_wrapped = t % self._duration
|
||||||
|
|
||||||
|
# Check if we need to seek (loop point or large time jump)
|
||||||
|
need_seek = (
|
||||||
|
self._process is None or
|
||||||
|
self._buffer_time is None or
|
||||||
|
abs(t_wrapped - self._buffer_time) > 1.0 / self.target_fps * 2
|
||||||
|
)
|
||||||
|
|
||||||
|
if need_seek:
|
||||||
|
self._start_decode(t_wrapped)
|
||||||
|
|
||||||
|
# Read frame
|
||||||
|
w, h = self._size
|
||||||
|
frame_size = w * h * 3
|
||||||
|
|
||||||
|
# Try to read with retries for seek settling
|
||||||
|
for attempt in range(3):
|
||||||
|
raw = self._process.stdout.read(frame_size)
|
||||||
|
if len(raw) == frame_size:
|
||||||
|
break
|
||||||
|
# End of stream or seek not ready - restart from beginning
|
||||||
|
self._start_decode(0)
|
||||||
|
|
||||||
|
if len(raw) < frame_size:
|
||||||
|
# Still no data - return last frame or black
|
||||||
|
if self._frame_buffer is not None:
|
||||||
|
return self._frame_buffer.copy()
|
||||||
|
return np.zeros((h, w, 3), dtype=np.uint8)
|
||||||
|
|
||||||
|
frame = np.frombuffer(raw, dtype=np.uint8).reshape((h, w, 3))
|
||||||
|
self._frame_buffer = frame # Cache for fallback
|
||||||
|
self._buffer_time = t_wrapped + 1.0 / self.target_fps
|
||||||
|
|
||||||
|
return frame
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Clean up resources."""
|
||||||
|
if self._process:
|
||||||
|
self._process.terminate()
|
||||||
|
self._process.wait()
|
||||||
|
self._process = None
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"VideoSource({self.path.name}, {self._size[0]}x{self._size[1]}, {self._duration:.1f}s)"
|
||||||
|
|
||||||
|
|
||||||
|
class ImageSource(Source):
|
||||||
|
"""
|
||||||
|
Static image source (returns same frame for any time).
|
||||||
|
|
||||||
|
Useful for backgrounds, overlays, etc.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, path: str):
|
||||||
|
self.path = Path(path)
|
||||||
|
if not self.path.exists():
|
||||||
|
raise FileNotFoundError(f"Image not found: {path}")
|
||||||
|
|
||||||
|
# Load image
|
||||||
|
import cv2
|
||||||
|
self._frame = cv2.imread(str(self.path))
|
||||||
|
self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2RGB)
|
||||||
|
self._size = (self._frame.shape[1], self._frame.shape[0])
|
||||||
|
|
||||||
|
@property
|
||||||
|
def duration(self) -> float:
|
||||||
|
return float('inf') # Images last forever
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> Tuple[int, int]:
|
||||||
|
return self._size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fps(self) -> float:
|
||||||
|
return 30.0 # Arbitrary
|
||||||
|
|
||||||
|
def read_frame(self, t: float) -> np.ndarray:
|
||||||
|
return self._frame.copy()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"ImageSource({self.path.name}, {self._size[0]}x{self._size[1]})"
|
||||||
|
|
||||||
|
|
||||||
|
class LiveSource(Source):
|
||||||
|
"""
|
||||||
|
Live video capture source (webcam, capture card, etc.).
|
||||||
|
|
||||||
|
Time parameter is ignored - always returns latest frame.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, device: int = 0, size: Tuple[int, int] = (1280, 720), fps: float = 30):
|
||||||
|
import cv2
|
||||||
|
self._cap = cv2.VideoCapture(device)
|
||||||
|
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, size[0])
|
||||||
|
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1])
|
||||||
|
self._cap.set(cv2.CAP_PROP_FPS, fps)
|
||||||
|
|
||||||
|
# Get actual settings
|
||||||
|
self._size = (
|
||||||
|
int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
|
||||||
|
int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||||
|
)
|
||||||
|
self._fps = self._cap.get(cv2.CAP_PROP_FPS)
|
||||||
|
|
||||||
|
if not self._cap.isOpened():
|
||||||
|
raise RuntimeError(f"Could not open video device {device}")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def duration(self) -> float:
|
||||||
|
return float('inf') # Live - no duration
|
||||||
|
|
||||||
|
@property
|
||||||
|
def size(self) -> Tuple[int, int]:
|
||||||
|
return self._size
|
||||||
|
|
||||||
|
@property
|
||||||
|
def fps(self) -> float:
|
||||||
|
return self._fps
|
||||||
|
|
||||||
|
def read_frame(self, t: float) -> np.ndarray:
|
||||||
|
"""Read latest frame (t is ignored for live sources)."""
|
||||||
|
import cv2
|
||||||
|
ret, frame = self._cap.read()
|
||||||
|
if not ret:
|
||||||
|
return np.zeros((self._size[1], self._size[0], 3), dtype=np.uint8)
|
||||||
|
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._cap.release()
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"LiveSource({self._size[0]}x{self._size[1]}, {self._fps}fps)"
|
||||||
65
templates/cycle-crossfade.sexp
Normal file
65
templates/cycle-crossfade.sexp
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
;; cycle-crossfade template
|
||||||
|
;;
|
||||||
|
;; Generalized cycling zoom-crossfade for any number of video layers.
|
||||||
|
;; Cycles through videos with smooth zoom-based crossfade transitions.
|
||||||
|
;;
|
||||||
|
;; Parameters:
|
||||||
|
;; beat-data - beat analysis node (drives timing)
|
||||||
|
;; input-videos - list of video nodes to cycle through
|
||||||
|
;; init-clen - initial cycle length in beats
|
||||||
|
;;
|
||||||
|
;; NOTE: The parameter is named "input-videos" (not "videos") because
|
||||||
|
;; template substitution replaces param names everywhere in the AST.
|
||||||
|
;; The planner's _expand_slice_on injects env["videos"] at plan time,
|
||||||
|
;; so (len videos) inside the lambda references that injected value.
|
||||||
|
|
||||||
|
(deftemplate cycle-crossfade
|
||||||
|
(beat-data input-videos init-clen)
|
||||||
|
|
||||||
|
(slice-on beat-data
|
||||||
|
:videos input-videos
|
||||||
|
:init {:cycle 0 :beat 0 :clen init-clen}
|
||||||
|
:fn (lambda [acc i start end]
|
||||||
|
(let [beat (get acc "beat")
|
||||||
|
clen (get acc "clen")
|
||||||
|
active (get acc "cycle")
|
||||||
|
n (len videos)
|
||||||
|
phase3 (* beat 3)
|
||||||
|
wt (lambda [p]
|
||||||
|
(let [prev (mod (+ p (- n 1)) n)]
|
||||||
|
(if (= active p)
|
||||||
|
(if (< phase3 clen) 1.0
|
||||||
|
(if (< phase3 (* clen 2))
|
||||||
|
(- 1.0 (* (/ (- phase3 clen) clen) 1.0))
|
||||||
|
0.0))
|
||||||
|
(if (= active prev)
|
||||||
|
(if (< phase3 clen) 0.0
|
||||||
|
(if (< phase3 (* clen 2))
|
||||||
|
(* (/ (- phase3 clen) clen) 1.0)
|
||||||
|
1.0))
|
||||||
|
0.0))))
|
||||||
|
zm (lambda [p]
|
||||||
|
(let [prev (mod (+ p (- n 1)) n)]
|
||||||
|
(if (= active p)
|
||||||
|
;; Active video: normal -> zoom out during transition -> tiny
|
||||||
|
(if (< phase3 clen) 1.0
|
||||||
|
(if (< phase3 (* clen 2))
|
||||||
|
(+ 1.0 (* (/ (- phase3 clen) clen) 1.0))
|
||||||
|
0.1))
|
||||||
|
(if (= active prev)
|
||||||
|
;; Incoming video: tiny -> zoom in during transition -> normal
|
||||||
|
(if (< phase3 clen) 0.1
|
||||||
|
(if (< phase3 (* clen 2))
|
||||||
|
(+ 0.1 (* (/ (- phase3 clen) clen) 0.9))
|
||||||
|
1.0))
|
||||||
|
0.1))))
|
||||||
|
new-acc (if (< (+ beat 1) clen)
|
||||||
|
(dict :cycle active :beat (+ beat 1) :clen clen)
|
||||||
|
(dict :cycle (mod (+ active 1) n) :beat 0
|
||||||
|
:clen (+ 40 (mod (* i 7) 41))))]
|
||||||
|
{:layers (map (lambda [p]
|
||||||
|
{:video p :effects [{:effect zoom :amount (zm p)}]})
|
||||||
|
(range 0 n))
|
||||||
|
:compose {:effect blend_multi :mode "alpha"
|
||||||
|
:weights (map (lambda [p] (wt p)) (range 0 n))}
|
||||||
|
:acc new-acc}))))
|
||||||
112
templates/process-pair.sexp
Normal file
112
templates/process-pair.sexp
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
;; process-pair template
|
||||||
|
;;
|
||||||
|
;; Reusable video-pair processor: takes a single video source, creates two
|
||||||
|
;; clips (A and B) with opposite rotations and sporadic effects, blends them,
|
||||||
|
;; and applies a per-pair slow rotation driven by a beat scan.
|
||||||
|
;;
|
||||||
|
;; All sporadic triggers (invert, hue-shift, ascii) and pair-level controls
|
||||||
|
;; (blend opacity, rotation) are defined internally using seed offsets.
|
||||||
|
;;
|
||||||
|
;; Parameters:
|
||||||
|
;; video - source video node
|
||||||
|
;; energy - energy analysis node (drives rotation/zoom amounts)
|
||||||
|
;; beat-data - beat analysis node (drives sporadic triggers)
|
||||||
|
;; rng - RNG object from (make-rng seed) for auto-derived seeds
|
||||||
|
;; rot-dir - initial rotation direction: 1 (clockwise) or -1 (anti-clockwise)
|
||||||
|
;; rot-a/b - rotation ranges for clip A/B (e.g. [0 45])
|
||||||
|
;; zoom-a/b - zoom ranges for clip A/B (e.g. [1 1.5])
|
||||||
|
|
||||||
|
(deftemplate process-pair
|
||||||
|
(video energy beat-data rng rot-dir rot-a rot-b zoom-a zoom-b)
|
||||||
|
|
||||||
|
;; --- Sporadic triggers for clip A ---
|
||||||
|
|
||||||
|
;; Invert: 10% chance per beat, lasts 1-5 beats
|
||||||
|
(def inv-a (scan beat-data :rng rng :init 0
|
||||||
|
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0))
|
||||||
|
:emit (if (> acc 0) 1 0)))
|
||||||
|
|
||||||
|
;; Hue shift: 10% chance, random hue 30-330 deg, lasts 1-5 beats
|
||||||
|
(def hue-a (scan beat-data :rng rng :init (dict :rem 0 :hue 0)
|
||||||
|
:step (if (> rem 0)
|
||||||
|
(dict :rem (- rem 1) :hue hue)
|
||||||
|
(if (< (rand) 0.1)
|
||||||
|
(dict :rem (rand-int 1 5) :hue (rand-range 30 330))
|
||||||
|
(dict :rem 0 :hue 0)))
|
||||||
|
:emit (if (> rem 0) hue 0)))
|
||||||
|
|
||||||
|
;; ASCII art: 5% chance, lasts 1-3 beats
|
||||||
|
(def ascii-a (scan beat-data :rng rng :init 0
|
||||||
|
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0))
|
||||||
|
:emit (if (> acc 0) 1 0)))
|
||||||
|
|
||||||
|
;; --- Sporadic triggers for clip B (offset seeds) ---
|
||||||
|
|
||||||
|
(def inv-b (scan beat-data :rng rng :init 0
|
||||||
|
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0))
|
||||||
|
:emit (if (> acc 0) 1 0)))
|
||||||
|
|
||||||
|
(def hue-b (scan beat-data :rng rng :init (dict :rem 0 :hue 0)
|
||||||
|
:step (if (> rem 0)
|
||||||
|
(dict :rem (- rem 1) :hue hue)
|
||||||
|
(if (< (rand) 0.1)
|
||||||
|
(dict :rem (rand-int 1 5) :hue (rand-range 30 330))
|
||||||
|
(dict :rem 0 :hue 0)))
|
||||||
|
:emit (if (> rem 0) hue 0)))
|
||||||
|
|
||||||
|
(def ascii-b (scan beat-data :rng rng :init 0
|
||||||
|
:step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0))
|
||||||
|
:emit (if (> acc 0) 1 0)))
|
||||||
|
|
||||||
|
;; --- Pair-level controls ---
|
||||||
|
|
||||||
|
;; Internal A/B blend: randomly show A (0), both (0.5), or B (1), every 1-11 beats
|
||||||
|
(def pair-mix (scan beat-data :rng rng
|
||||||
|
:init (dict :rem 0 :opacity 0.5)
|
||||||
|
:step (if (> rem 0)
|
||||||
|
(dict :rem (- rem 1) :opacity opacity)
|
||||||
|
(dict :rem (rand-int 1 11) :opacity (* (rand-int 0 2) 0.5)))
|
||||||
|
:emit opacity))
|
||||||
|
|
||||||
|
;; Per-pair rotation: one full rotation every 20-30 beats, alternating direction
|
||||||
|
(def pair-rot (scan beat-data :rng rng
|
||||||
|
:init (dict :beat 0 :clen 25 :dir rot-dir :angle 0)
|
||||||
|
:step (if (< (+ beat 1) clen)
|
||||||
|
(dict :beat (+ beat 1) :clen clen :dir dir
|
||||||
|
:angle (+ angle (* dir (/ 360 clen))))
|
||||||
|
(dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1)
|
||||||
|
:angle angle))
|
||||||
|
:emit angle))
|
||||||
|
|
||||||
|
;; --- Clip A processing ---
|
||||||
|
(def clip-a (-> video (segment :start 0 :duration (bind energy duration))))
|
||||||
|
(def rotated-a (-> clip-a
|
||||||
|
(effect rotate :angle (bind energy values :range rot-a))
|
||||||
|
(effect zoom :amount (bind energy values :range zoom-a))
|
||||||
|
(effect invert :amount (bind inv-a values))
|
||||||
|
(effect hue_shift :degrees (bind hue-a values))
|
||||||
|
;; ASCII disabled - too slow without GPU
|
||||||
|
;; (effect ascii_art
|
||||||
|
;; :char_size (bind energy values :range [4 32])
|
||||||
|
;; :mix (bind ascii-a values))
|
||||||
|
))
|
||||||
|
|
||||||
|
;; --- Clip B processing ---
|
||||||
|
(def clip-b (-> video (segment :start 0 :duration (bind energy duration))))
|
||||||
|
(def rotated-b (-> clip-b
|
||||||
|
(effect rotate :angle (bind energy values :range rot-b))
|
||||||
|
(effect zoom :amount (bind energy values :range zoom-b))
|
||||||
|
(effect invert :amount (bind inv-b values))
|
||||||
|
(effect hue_shift :degrees (bind hue-b values))
|
||||||
|
;; ASCII disabled - too slow without GPU
|
||||||
|
;; (effect ascii_art
|
||||||
|
;; :char_size (bind energy values :range [4 32])
|
||||||
|
;; :mix (bind ascii-b values))
|
||||||
|
))
|
||||||
|
|
||||||
|
;; --- Blend A+B and apply pair rotation ---
|
||||||
|
(-> rotated-a
|
||||||
|
(effect blend rotated-b
|
||||||
|
:mode "alpha" :opacity (bind pair-mix values) :resize_mode "fit")
|
||||||
|
(effect rotate
|
||||||
|
:angle (bind pair-rot values))))
|
||||||
Reference in New Issue
Block a user