Replace batch DAG system with streaming architecture

- Remove legacy_tasks.py, hybrid_state.py, render.py
- Remove old task modules (analyze, execute, execute_sexp, orchestrate)
- Add streaming interpreter from test repo
- Add sexp_effects with primitives and video effects
- Add streaming Celery task with CID-based asset resolution
- Support both CID and friendly name references for assets
- Add .dockerignore to prevent local clones from conflicting

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
giles
2026-02-02 19:10:11 +00:00
parent 270eeb3fcf
commit bb458aa924
107 changed files with 15830 additions and 3211 deletions

22
.dockerignore Normal file
View File

@@ -0,0 +1,22 @@
# Don't copy local clones - Dockerfile will clone fresh
artdag-effects/
# Python cache
__pycache__/
*.py[cod]
*.egg-info/
.pytest_cache/
# Virtual environments
.venv/
venv/
# Local env
.env
# Git
.git/
# IDE
.vscode/
.idea/

View File

@@ -0,0 +1,62 @@
{% extends "base.html" %}
{% block title %}Run Artifacts{% endblock %}
{% block content %}
<div class="mb-6">
<a href="/runs/{{ run_id }}/detail" class="inline-flex items-center text-blue-400 hover:text-blue-300">
<svg class="w-4 h-4 mr-1" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M15 19l-7-7 7-7"/>
</svg>
Back to Run
</a>
</div>
<h1 class="text-2xl font-bold text-white mb-6">Run Artifacts</h1>
{% if artifacts %}
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
{% for artifact in artifacts %}
<div class="bg-gray-800 rounded-lg p-4">
<div class="flex items-center justify-between mb-3">
<span class="px-2 py-1 text-xs rounded
{% if artifact.role == 'input' %}bg-blue-600
{% elif artifact.role == 'output' %}bg-green-600
{% else %}bg-purple-600{% endif %}">
{{ artifact.role }}
</span>
<span class="text-sm text-gray-400">{{ artifact.step_name }}</span>
</div>
<div class="mb-3">
<p class="text-xs text-gray-500 mb-1">Content Hash</p>
<p class="font-mono text-xs text-gray-300 truncate">{{ artifact.hash }}</p>
</div>
<div class="flex items-center justify-between text-sm">
<span class="text-gray-400">
{% if artifact.media_type == 'video' %}Video
{% elif artifact.media_type == 'image' %}Image
{% elif artifact.media_type == 'audio' %}Audio
{% else %}File{% endif %}
</span>
<span class="text-gray-500">{{ (artifact.size_bytes / 1024)|round(1) }} KB</span>
</div>
<div class="mt-3 flex gap-2">
<a href="/cache/{{ artifact.hash }}" class="flex-1 px-3 py-1 bg-gray-700 hover:bg-gray-600 text-center text-sm rounded transition-colors">
View
</a>
<a href="/cache/{{ artifact.hash }}/raw" class="flex-1 px-3 py-1 bg-blue-600 hover:bg-blue-700 text-center text-sm rounded transition-colors">
Download
</a>
</div>
</div>
{% endfor %}
</div>
{% else %}
<div class="bg-gray-800 rounded-lg p-6 text-center">
<p class="text-gray-400">No artifacts found for this run.</p>
</div>
{% endif %}
{% endblock %}

View File

@@ -0,0 +1,99 @@
{% extends "base.html" %}
{% block title %}Run Plan - {{ run_id[:16] }}{% endblock %}
{% block head %}
<script src="https://unpkg.com/cytoscape@3.25.0/dist/cytoscape.min.js"></script>
{% endblock %}
{% block content %}
<div class="mb-6">
<a href="/runs/{{ run_id }}/detail" class="inline-flex items-center text-blue-400 hover:text-blue-300">
<svg class="w-4 h-4 mr-1" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M15 19l-7-7 7-7"/>
</svg>
Back to Run
</a>
</div>
<h1 class="text-2xl font-bold text-white mb-6">Execution Plan</h1>
{% if plan %}
<div class="grid grid-cols-1 lg:grid-cols-2 gap-6">
<!-- DAG Visualization -->
<div class="bg-gray-800 rounded-lg p-4">
<h2 class="text-lg font-semibold text-white mb-4">DAG Visualization</h2>
<div id="dag-container" class="w-full h-96 bg-gray-900 rounded"></div>
</div>
<!-- Steps List -->
<div class="bg-gray-800 rounded-lg p-4">
<h2 class="text-lg font-semibold text-white mb-4">Steps ({{ plan.steps|length if plan.steps else 0 }})</h2>
<div class="space-y-3 max-h-96 overflow-y-auto">
{% for step in plan.get('steps', []) %}
<div class="bg-gray-900 rounded-lg p-3">
<div class="flex items-center justify-between mb-2">
<span class="font-medium text-white">{{ step.name or step.id or 'Step ' ~ loop.index }}</span>
<span class="px-2 py-0.5 text-xs rounded {% if step.status == 'completed' %}bg-green-600{% elif step.cached %}bg-blue-600{% else %}bg-gray-600{% endif %}">
{{ step.status or ('cached' if step.cached else 'pending') }}
</span>
</div>
{% if step.cache_id %}
<div class="text-xs text-gray-400 font-mono truncate">
{{ step.cache_id[:24] }}...
</div>
{% endif %}
</div>
{% else %}
<p class="text-gray-500">No steps defined</p>
{% endfor %}
</div>
</div>
</div>
<script>
document.addEventListener('DOMContentLoaded', function() {
const elements = {{ dag_elements | tojson | safe }};
if (elements.length > 0) {
cytoscape({
container: document.getElementById('dag-container'),
elements: elements,
style: [
{
selector: 'node',
style: {
'background-color': 'data(color)',
'label': 'data(label)',
'color': '#fff',
'text-valign': 'bottom',
'text-margin-y': 5,
'font-size': '10px'
}
},
{
selector: 'edge',
style: {
'width': 2,
'line-color': '#6b7280',
'target-arrow-color': '#6b7280',
'target-arrow-shape': 'triangle',
'curve-style': 'bezier'
}
}
],
layout: {
name: 'breadthfirst',
directed: true,
padding: 20
}
});
}
});
</script>
{% else %}
<div class="bg-gray-800 rounded-lg p-6 text-center">
<p class="text-gray-400">No execution plan available for this run.</p>
</div>
{% endif %}
{% endblock %}

View File

@@ -1,8 +1,8 @@
""" """
Art DAG Celery Application Art DAG Celery Application
Distributed rendering for the Art DAG system. Streaming video rendering for the Art DAG system.
Uses the foundational artdag language from GitHub. Uses S-expression recipes with frame-by-frame processing.
""" """
import os import os
@@ -14,7 +14,7 @@ app = Celery(
'art_celery', 'art_celery',
broker=REDIS_URL, broker=REDIS_URL,
backend=REDIS_URL, backend=REDIS_URL,
include=['legacy_tasks', 'tasks', 'tasks.analyze', 'tasks.execute', 'tasks.orchestrate', 'tasks.execute_sexp'] include=['tasks', 'tasks.streaming']
) )
app.conf.update( app.conf.update(

17
configs/audio-dizzy.sexp Normal file
View File

@@ -0,0 +1,17 @@
;; Audio Configuration - dizzy.mp3
;;
;; Defines audio analyzer and playback for a recipe.
;; Pass to recipe with: --audio configs/audio-dizzy.sexp
;;
;; Provides:
;; - music: audio analyzer for beat/energy detection
;; - audio-playback: path for synchronized playback
(require-primitives "streaming")
;; Audio analyzer (provides beat detection and energy levels)
;; Paths relative to working directory (project root)
(def music (streaming:make-audio-analyzer "dizzy.mp3"))
;; Audio playback path (for sync with video output)
(audio-playback "dizzy.mp3")

View File

@@ -0,0 +1,17 @@
;; Audio Configuration - dizzy.mp3
;;
;; Defines audio analyzer and playback for a recipe.
;; Pass to recipe with: --audio configs/audio-dizzy.sexp
;;
;; Provides:
;; - music: audio analyzer for beat/energy detection
;; - audio-playback: path for synchronized playback
(require-primitives "streaming")
;; Audio analyzer (provides beat detection and energy levels)
;; Paths relative to working directory (project root)
(def music (streaming:make-audio-analyzer "woods_half/halleluwah.webm"))
;; Audio playback path (for sync with video output)
(audio-playback "woods_half/halleluwah.webm")

View File

@@ -0,0 +1,38 @@
;; Default Sources Configuration
;;
;; Defines video sources and per-pair effect configurations.
;; Pass to recipe with: --sources configs/sources-default.sexp
;;
;; Required by recipes using process-pair macro:
;; - sources: array of video sources
;; - pair-configs: array of effect configurations per source
(require-primitives "streaming")
;; Video sources array
;; Paths relative to working directory (project root)
(def sources [
(streaming:make-video-source "monday.webm" 30)
(streaming:make-video-source "escher.webm" 30)
(streaming:make-video-source "2.webm" 30)
(streaming:make-video-source "disruptors.webm" 30)
(streaming:make-video-source "4.mp4" 30)
(streaming:make-video-source "ecstacy.mp4" 30)
(streaming:make-video-source "dopple.webm" 30)
(streaming:make-video-source "5.mp4" 30)
])
;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges
;; :dir = rotation direction (1 or -1)
;; :rot-a, :rot-b = max rotation angles for clip A and B
;; :zoom-a, :zoom-b = max zoom amounts for clip A and B
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4
{:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller)
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed)
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5
])

View File

@@ -0,0 +1,19 @@
;; Half-resolution Woods Sources (960x540)
;;
;; Pass to recipe with: --sources configs/sources-woods-half.sexp
(require-primitives "streaming")
(def sources [
(streaming:make-video-source "woods_half/1.webm" 30)
(streaming:make-video-source "woods_half/2.webm" 30)
(streaming:make-video-source "woods_half/3.webm" 30)
(streaming:make-video-source "woods_half/4.webm" 30)
])
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5}
])

View File

@@ -0,0 +1,39 @@
;; Default Sources Configuration
;;
;; Defines video sources and per-pair effect configurations.
;; Pass to recipe with: --sources configs/sources-default.sexp
;;
;; Required by recipes using process-pair macro:
;; - sources: array of video sources
;; - pair-configs: array of effect configurations per source
(require-primitives "streaming")
;; Video sources array
;; Paths relative to working directory (project root)
(def sources [
(streaming:make-video-source "woods/1.webm" 10)
(streaming:make-video-source "woods/2.webm" 10)
(streaming:make-video-source "woods/3.webm" 10)
(streaming:make-video-source "woods/4.webm" 10)
(streaming:make-video-source "woods/5.webm" 10)
(streaming:make-video-source "woods/6.webm" 10)
(streaming:make-video-source "woods/7.webm" 10)
(streaming:make-video-source "woods/8.webm" 10)
])
;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges
;; :dir = rotation direction (1 or -1)
;; :rot-a, :rot-b = max rotation angles for clip A and B
;; :zoom-a, :zoom-b = max zoom amounts for clip A and B
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
])

View File

@@ -0,0 +1,150 @@
;; Quick Test - Fully Explicit Streaming Version
;;
;; The interpreter is completely generic - knows nothing about video/audio.
;; All domain logic is explicit via primitives.
;;
;; Run with built-in sources/audio:
;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp --fps 30
;;
;; Run with external config files:
;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp \
;; --sources configs/sources-default.sexp \
;; --audio configs/audio-dizzy.sexp \
;; --fps 30
(stream "quick_test_explicit"
:fps 30
:width 1920
:height 1080
:seed 42
;; Load standard primitives and effects
(include :path "../templates/standard-primitives.sexp")
(include :path "../templates/standard-effects.sexp")
;; Load reusable templates
(include :path "../templates/stream-process-pair.sexp")
(include :path "../templates/crossfade-zoom.sexp")
;; === SOURCES AS ARRAY ===
(def sources [
(streaming:make-video-source "monday.webm" 30)
(streaming:make-video-source "escher.webm" 30)
(streaming:make-video-source "2.webm" 30)
(streaming:make-video-source "disruptors.webm" 30)
(streaming:make-video-source "4.mp4" 30)
(streaming:make-video-source "ecstacy.mp4" 30)
(streaming:make-video-source "dopple.webm" 30)
(streaming:make-video-source "5.mp4" 30)
])
;; Per-pair config: [rot-dir, rot-a-max, rot-b-max, zoom-a-max, zoom-b-max]
;; Pairs 3,6: reversed (negative rot-a, positive rot-b, shrink zoom-a, grow zoom-b)
;; Pair 5: smaller ranges
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4
{:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller)
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed)
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5
])
;; Audio analyzer
(def music (streaming:make-audio-analyzer "dizzy.mp3"))
;; Audio playback
(audio-playback "../dizzy.mp3")
;; === GLOBAL SCANS ===
;; Cycle state: which source is active (recipe-specific)
;; clen = beats per source (8-24 beats = ~4-12 seconds)
(scan cycle (streaming:audio-beat music t)
:init {:active 0 :beat 0 :clen 16}
:step (if (< (+ beat 1) clen)
(dict :active active :beat (+ beat 1) :clen clen)
(dict :active (mod (+ active 1) (len sources)) :beat 0
:clen (+ 8 (mod (* (streaming:audio-beat-count music t) 7) 17)))))
;; Reusable scans from templates (require 'music' to be defined)
(include :path "../templates/scan-oscillating-spin.sexp")
(include :path "../templates/scan-ripple-drops.sexp")
;; === PER-PAIR STATE (dynamically sized based on sources) ===
;; Each pair has: inv-a, inv-b, hue-a, hue-b, mix, rot-angle
(scan pairs (streaming:audio-beat music t)
:init {:states (map (core:range (len sources)) (lambda (_)
{:inv-a 0 :inv-b 0 :hue-a 0 :hue-b 0 :hue-a-val 0 :hue-b-val 0 :mix 0.5 :mix-rem 5 :angle 0 :rot-beat 0 :rot-clen 25}))}
:step (dict :states (map states (lambda (p)
(let [;; Invert toggles (10% chance, lasts 1-4 beats)
new-inv-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-a) 1)))
new-inv-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-b) 1)))
;; Hue shifts (10% chance, lasts 1-4 beats) - use countdown like invert
old-hue-a (get p :hue-a)
old-hue-b (get p :hue-b)
new-hue-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-a 1)))
new-hue-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-b 1)))
;; Pick random hue value when triggering (stored separately)
new-hue-a-val (if (> new-hue-a old-hue-a) (+ 30 (* (core:rand) 300)) (get p :hue-a-val))
new-hue-b-val (if (> new-hue-b old-hue-b) (+ 30 (* (core:rand) 300)) (get p :hue-b-val))
;; Mix (holds for 1-10 beats, then picks 0, 0.5, or 1)
mix-rem (get p :mix-rem)
old-mix (get p :mix)
new-mix-rem (if (> mix-rem 0) (- mix-rem 1) (+ 1 (core:rand-int 1 10)))
new-mix (if (> mix-rem 0) old-mix (* (core:rand-int 0 2) 0.5))
;; Rotation (accumulates, reverses direction when cycle completes)
rot-beat (get p :rot-beat)
rot-clen (get p :rot-clen)
old-angle (get p :angle)
;; Note: dir comes from pair-configs, but we store rotation state here
new-rot-beat (if (< (+ rot-beat 1) rot-clen) (+ rot-beat 1) 0)
new-rot-clen (if (< (+ rot-beat 1) rot-clen) rot-clen (+ 20 (core:rand-int 0 10)))
new-angle (+ old-angle (/ 360 rot-clen))]
(dict :inv-a new-inv-a :inv-b new-inv-b
:hue-a new-hue-a :hue-b new-hue-b
:hue-a-val new-hue-a-val :hue-b-val new-hue-b-val
:mix new-mix :mix-rem new-mix-rem
:angle new-angle :rot-beat new-rot-beat :rot-clen new-rot-clen))))))
;; === FRAME PIPELINE ===
(frame
(let [now t
e (streaming:audio-energy music now)
;; Get cycle state
active (bind cycle :active)
beat-pos (bind cycle :beat)
clen (bind cycle :clen)
;; Transition logic: last third of cycle crossfades to next
phase3 (* beat-pos 3)
fading (and (>= phase3 (* clen 2)) (< phase3 (* clen 3)))
fade-amt (if fading (/ (- phase3 (* clen 2)) clen) 0)
next-idx (mod (+ active 1) (len sources))
;; Get pair states array (required by process-pair macro)
pair-states (bind pairs :states)
;; Process active pair using macro from template
active-frame (process-pair active)
;; Crossfade with zoom during transition (using macro)
result (if fading
(crossfade-zoom active-frame (process-pair next-idx) fade-amt)
active-frame)
;; Final: global spin + ripple
spun (rotate result :angle (bind spin :angle))
rip-gate (bind ripple-state :gate)
rip-amp (* rip-gate (core:map-range e 0 1 5 50))]
(ripple spun
:amplitude rip-amp
:center_x (bind ripple-state :cx)
:center_y (bind ripple-state :cy)
:frequency 8
:decay 2
:speed 5))))

View File

@@ -1,294 +0,0 @@
"""
Hybrid State Manager: Local Redis + IPNS Sync.
Provides fast local operations with eventual consistency across L1 nodes.
- Local Redis: Fast reads/writes (microseconds)
- IPNS Sync: Background sync with other nodes (every N seconds)
- Duplicate work: Accepted, idempotent (same inputs → same CID)
Usage:
from hybrid_state import get_state_manager
state = get_state_manager()
# Fast local lookup
cid = state.get_cached_cid(cache_id)
# Fast local write (synced in background)
state.set_cached_cid(cache_id, output_cid)
"""
import json
import logging
import os
import threading
import time
from typing import Dict, Optional
import redis
logger = logging.getLogger(__name__)
# Configuration
REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/5")
CLUSTER_KEY = os.environ.get("ARTDAG_CLUSTER_KEY", "default")
IPNS_SYNC_INTERVAL = int(os.environ.get("ARTDAG_IPNS_SYNC_INTERVAL", "30"))
IPNS_ENABLED = os.environ.get("ARTDAG_IPNS_SYNC", "").lower() in ("true", "1", "yes")
# Redis keys
CACHE_KEY = "artdag:cid_cache" # hash: cache_id → output CID
ANALYSIS_KEY = "artdag:analysis_cache" # hash: input_hash:features → analysis CID
PLAN_KEY = "artdag:plan_cache" # hash: plan_id → plan CID
RUN_KEY = "artdag:run_cache" # hash: run_id → output CID
CLAIM_KEY_PREFIX = "artdag:claim:" # string: cache_id → worker (with TTL)
# IPNS names (relative to cluster key)
IPNS_CACHE_NAME = "cache"
IPNS_ANALYSIS_NAME = "analysis"
IPNS_PLAN_NAME = "plans"
class HybridStateManager:
"""
Local Redis + async IPNS sync for distributed L1 coordination.
Fast path (local Redis):
- get_cached_cid / set_cached_cid
- try_claim / release_claim
Slow path (background IPNS sync):
- Periodically syncs local state with global IPNS state
- Merges remote state into local (pulls new entries)
- Publishes local state to IPNS (pushes updates)
"""
def __init__(
self,
redis_url: str = REDIS_URL,
cluster_key: str = CLUSTER_KEY,
sync_interval: int = IPNS_SYNC_INTERVAL,
ipns_enabled: bool = IPNS_ENABLED,
):
self.cluster_key = cluster_key
self.sync_interval = sync_interval
self.ipns_enabled = ipns_enabled
# Connect to Redis
self._redis = redis.from_url(redis_url, decode_responses=True)
# IPNS client (lazy import)
self._ipfs = None
# Sync thread
self._sync_thread = None
self._stop_sync = threading.Event()
# Start background sync if enabled
if self.ipns_enabled:
self._start_background_sync()
@property
def ipfs(self):
"""Lazy import of IPFS client."""
if self._ipfs is None:
try:
import ipfs_client
self._ipfs = ipfs_client
except ImportError:
logger.warning("ipfs_client not available, IPNS sync disabled")
self._ipfs = False
return self._ipfs if self._ipfs else None
# ========== CID Cache ==========
def get_cached_cid(self, cache_id: str) -> Optional[str]:
"""Get output CID for a cache_id. Fast local lookup."""
return self._redis.hget(CACHE_KEY, cache_id)
def set_cached_cid(self, cache_id: str, cid: str) -> None:
"""Set output CID for a cache_id. Fast local write."""
self._redis.hset(CACHE_KEY, cache_id, cid)
def get_all_cached_cids(self) -> Dict[str, str]:
"""Get all cached CIDs."""
return self._redis.hgetall(CACHE_KEY)
# ========== Analysis Cache ==========
def get_analysis_cid(self, input_hash: str, features: list) -> Optional[str]:
"""Get analysis CID for input + features."""
key = f"{input_hash}:{','.join(sorted(features))}"
return self._redis.hget(ANALYSIS_KEY, key)
def set_analysis_cid(self, input_hash: str, features: list, cid: str) -> None:
"""Set analysis CID for input + features."""
key = f"{input_hash}:{','.join(sorted(features))}"
self._redis.hset(ANALYSIS_KEY, key, cid)
def get_all_analysis_cids(self) -> Dict[str, str]:
"""Get all analysis CIDs."""
return self._redis.hgetall(ANALYSIS_KEY)
# ========== Plan Cache ==========
def get_plan_cid(self, plan_id: str) -> Optional[str]:
"""Get plan CID for a plan_id."""
return self._redis.hget(PLAN_KEY, plan_id)
def set_plan_cid(self, plan_id: str, cid: str) -> None:
"""Set plan CID for a plan_id."""
self._redis.hset(PLAN_KEY, plan_id, cid)
def get_all_plan_cids(self) -> Dict[str, str]:
"""Get all plan CIDs."""
return self._redis.hgetall(PLAN_KEY)
# ========== Run Cache ==========
def get_run_cid(self, run_id: str) -> Optional[str]:
"""Get output CID for a run_id."""
return self._redis.hget(RUN_KEY, run_id)
def set_run_cid(self, run_id: str, cid: str) -> None:
"""Set output CID for a run_id."""
self._redis.hset(RUN_KEY, run_id, cid)
# ========== Claiming ==========
def try_claim(self, cache_id: str, worker_id: str, ttl: int = 300) -> bool:
"""
Try to claim a cache_id for execution.
Returns True if claimed, False if already claimed by another worker.
Uses Redis SETNX for atomic claim.
"""
key = f"{CLAIM_KEY_PREFIX}{cache_id}"
return self._redis.set(key, worker_id, nx=True, ex=ttl)
def release_claim(self, cache_id: str) -> None:
"""Release a claim."""
key = f"{CLAIM_KEY_PREFIX}{cache_id}"
self._redis.delete(key)
def get_claim(self, cache_id: str) -> Optional[str]:
"""Get current claim holder for a cache_id."""
key = f"{CLAIM_KEY_PREFIX}{cache_id}"
return self._redis.get(key)
# ========== IPNS Sync ==========
def _start_background_sync(self):
"""Start background IPNS sync thread."""
if self._sync_thread is not None:
return
def sync_loop():
logger.info(f"IPNS sync started (interval={self.sync_interval}s)")
while not self._stop_sync.wait(timeout=self.sync_interval):
try:
self._sync_with_ipns()
except Exception as e:
logger.warning(f"IPNS sync failed: {e}")
self._sync_thread = threading.Thread(target=sync_loop, daemon=True)
self._sync_thread.start()
def stop_sync(self):
"""Stop background sync thread."""
self._stop_sync.set()
if self._sync_thread:
self._sync_thread.join(timeout=5)
def _sync_with_ipns(self):
"""Sync local state with IPNS global state."""
if not self.ipfs:
return
logger.debug("Starting IPNS sync...")
# Sync each cache type
self._sync_hash(CACHE_KEY, IPNS_CACHE_NAME)
self._sync_hash(ANALYSIS_KEY, IPNS_ANALYSIS_NAME)
self._sync_hash(PLAN_KEY, IPNS_PLAN_NAME)
logger.debug("IPNS sync complete")
def _sync_hash(self, redis_key: str, ipns_name: str):
"""Sync a Redis hash with IPNS."""
ipns_full_name = f"{self.cluster_key}/{ipns_name}"
# Pull: resolve IPNS → get global state
global_state = {}
try:
global_cid = self.ipfs.name_resolve(ipns_full_name)
if global_cid:
global_bytes = self.ipfs.get_bytes(global_cid)
if global_bytes:
global_state = json.loads(global_bytes.decode('utf-8'))
logger.debug(f"Pulled {len(global_state)} entries from {ipns_name}")
except Exception as e:
logger.debug(f"Could not resolve {ipns_full_name}: {e}")
# Merge global into local (add entries we don't have)
if global_state:
pipe = self._redis.pipeline()
for key, value in global_state.items():
pipe.hsetnx(redis_key, key, value)
results = pipe.execute()
added = sum(1 for r in results if r)
if added:
logger.info(f"Merged {added} new entries from IPNS/{ipns_name}")
# Push: get local state, merge with global, publish
local_state = self._redis.hgetall(redis_key)
if local_state:
merged = {**global_state, **local_state}
# Only publish if we have new entries
if len(merged) > len(global_state):
try:
new_cid = self.ipfs.add_json(merged)
if new_cid:
# Note: name_publish can be slow
self.ipfs.name_publish(ipns_full_name, new_cid)
logger.info(f"Published {len(merged)} entries to IPNS/{ipns_name}")
except Exception as e:
logger.warning(f"Failed to publish to {ipns_full_name}: {e}")
def force_sync(self):
"""Force an immediate IPNS sync (blocking)."""
self._sync_with_ipns()
# ========== Stats ==========
def get_stats(self) -> Dict:
"""Get cache statistics."""
return {
"cached_cids": self._redis.hlen(CACHE_KEY),
"analysis_cids": self._redis.hlen(ANALYSIS_KEY),
"plan_cids": self._redis.hlen(PLAN_KEY),
"run_cids": self._redis.hlen(RUN_KEY),
"ipns_enabled": self.ipns_enabled,
"cluster_key": self.cluster_key[:16] + "..." if len(self.cluster_key) > 16 else self.cluster_key,
}
# Singleton instance
_state_manager: Optional[HybridStateManager] = None
def get_state_manager() -> HybridStateManager:
"""Get the singleton state manager instance."""
global _state_manager
if _state_manager is None:
_state_manager = HybridStateManager()
return _state_manager
def reset_state_manager():
"""Reset the singleton (for testing)."""
global _state_manager
if _state_manager:
_state_manager.stop_sync()
_state_manager = None

File diff suppressed because it is too large Load Diff

View File

@@ -1,65 +0,0 @@
#!/usr/bin/env python3
"""
CLI to submit render tasks to Art DAG Celery.
Usage:
python render.py dog cat # Render cat through dog effect
python render.py identity cat # Render cat through identity effect
python render.py <effect> <input> # General form
"""
import argparse
import json
import sys
from legacy_tasks import render_effect
# Known asset hashes
ASSETS = {
"cat": "33268b6e167deaf018cc538de12dbe562612b33e89a749391cef855b320a269b",
}
def main():
parser = argparse.ArgumentParser(description="Submit render task to Art DAG Celery")
parser.add_argument("effect", help="Effect to apply (e.g., dog, identity)")
parser.add_argument("input", help="Input asset name or hash")
parser.add_argument("--output", "-o", help="Output name (default: <effect>-from-<input>)")
parser.add_argument("--sync", "-s", action="store_true", help="Wait for result")
args = parser.parse_args()
# Resolve input to hash
input_hash = ASSETS.get(args.input, args.input)
if len(input_hash) != 64:
print(f"Error: Unknown asset '{args.input}' and not a valid hash")
sys.exit(1)
# Generate output name
output_name = args.output or f"{args.effect}-from-{args.input}-celery"
print(f"Submitting render task:")
print(f" Effect: {args.effect}")
print(f" Input: {args.input} ({input_hash[:16]}...)")
print(f" Output: {output_name}")
# Submit task
task = render_effect.delay(input_hash, args.effect, output_name)
print(f" Task ID: {task.id}")
if args.sync:
print("\nWaiting for result...")
try:
result = task.get(timeout=300)
print("\nRender complete!")
print(json.dumps(result, indent=2))
except Exception as e:
print(f"\nRender failed: {e}")
sys.exit(1)
else:
print("\nTask submitted. Check status with:")
print(f" celery -A celery_app inspect query_task {task.id}")
if __name__ == "__main__":
main()

32
sexp_effects/__init__.py Normal file
View File

@@ -0,0 +1,32 @@
"""
S-Expression Effects System
Safe, shareable effects defined in S-expressions.
"""
from .parser import parse, parse_file, Symbol, Keyword
from .interpreter import (
Interpreter,
get_interpreter,
load_effect,
load_effects_dir,
run_effect,
list_effects,
make_process_frame,
)
from .primitives import PRIMITIVES
__all__ = [
'parse',
'parse_file',
'Symbol',
'Keyword',
'Interpreter',
'get_interpreter',
'load_effect',
'load_effects_dir',
'run_effect',
'list_effects',
'make_process_frame',
'PRIMITIVES',
]

View File

@@ -0,0 +1,17 @@
;; ASCII Art effect - converts image to ASCII characters
(require-primitives "ascii")
(define-effect ascii_art
:params (
(char_size :type int :default 8 :range [4 32])
(alphabet :type string :default "standard")
(color_mode :type string :default "color" :desc ""color", "mono", "invert", or any color name/hex")
(background_color :type string :default "black" :desc "background color name/hex")
(invert_colors :type int :default 0 :desc "swap foreground and background colors")
(contrast :type float :default 1.5 :range [1 3])
)
(let* ((sample (cell-sample frame char_size))
(colors (nth sample 0))
(luminances (nth sample 1))
(chars (luminance-to-chars luminances alphabet contrast)))
(render-char-grid frame chars colors char_size color_mode background_color invert_colors)))

View File

@@ -0,0 +1,52 @@
;; ASCII Art FX - converts image to ASCII characters with per-character effects
(require-primitives "ascii")
(define-effect ascii_art_fx
:params (
;; Basic parameters
(char_size :type int :default 8 :range [4 32]
:desc "Size of each character cell in pixels")
(alphabet :type string :default "standard"
:desc "Character set to use")
(color_mode :type string :default "color"
:choices [color mono invert]
:desc "Color mode: color, mono, invert, or any color name/hex")
(background_color :type string :default "black"
:desc "Background color name or hex value")
(invert_colors :type int :default 0 :range [0 1]
:desc "Swap foreground and background colors (0/1)")
(contrast :type float :default 1.5 :range [1 3]
:desc "Character selection contrast")
;; Per-character effects
(char_jitter :type float :default 0 :range [0 20]
:desc "Position jitter amount in pixels")
(char_scale :type float :default 1.0 :range [0.5 2.0]
:desc "Character scale factor")
(char_rotation :type float :default 0 :range [0 180]
:desc "Rotation amount in degrees")
(char_hue_shift :type float :default 0 :range [0 360]
:desc "Hue shift in degrees")
;; Modulation sources
(jitter_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives jitter modulation")
(scale_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives scale modulation")
(rotation_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives rotation modulation")
(hue_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist]
:desc "What drives hue shift modulation")
)
(let* ((sample (cell-sample frame char_size))
(colors (nth sample 0))
(luminances (nth sample 1))
(chars (luminance-to-chars luminances alphabet contrast)))
(render-char-grid-fx frame chars colors luminances char_size
color_mode background_color invert_colors
char_jitter char_scale char_rotation char_hue_shift
jitter_source scale_source rotation_source hue_source)))

View File

@@ -0,0 +1,102 @@
;; Composable ASCII Art with Per-Zone Expression-Driven Effects
;; Requires ascii primitive library for the ascii-fx-zone primitive
(require-primitives "ascii")
;; Two modes of operation:
;;
;; 1. EXPRESSION MODE: Use zone-* variables in expression parameters
;; Zone variables available:
;; zone-row, zone-col: Grid position (integers)
;; zone-row-norm, zone-col-norm: Normalized position (0-1)
;; zone-lum: Cell luminance (0-1)
;; zone-sat: Cell saturation (0-1)
;; zone-hue: Cell hue (0-360)
;; zone-r, zone-g, zone-b: RGB components (0-1)
;;
;; Example:
;; (ascii-fx-zone frame
;; :cols 80
;; :char_hue (* zone-lum 180)
;; :char_rotation (* zone-col-norm 30))
;;
;; 2. CELL EFFECT MODE: Pass a lambda to apply arbitrary effects per-cell
;; The lambda receives (cell-image zone-dict) and returns modified cell.
;; Zone dict contains: row, col, row-norm, col-norm, lum, sat, hue, r, g, b,
;; char, color, cell_size, plus any bound analysis values.
;;
;; Any loaded sexp effect can be called on cells - each cell is just a small frame:
;; (blur cell radius) - Gaussian blur
;; (rotate cell angle) - Rotate by angle degrees
;; (brightness cell factor) - Adjust brightness
;; (contrast cell factor) - Adjust contrast
;; (saturation cell factor) - Adjust saturation
;; (hue_shift cell degrees) - Shift hue
;; (rgb_split cell offset_x offset_y) - RGB channel split
;; (invert cell) - Invert colors
;; (pixelate cell block_size) - Pixelate
;; (wave cell amplitude freq) - Wave distortion
;; ... and any other loaded effect
;;
;; Example:
;; (ascii-fx-zone frame
;; :cols 60
;; :cell_effect (lambda [cell zone]
;; (blur (rotate cell (* (get zone "energy") 45))
;; (if (> (get zone "lum") 0.5) 3 0))))
(define-effect ascii_fx_zone
:params (
(cols :type int :default 80 :range [20 200]
:desc "Number of character columns")
(char_size :type int :default nil :range [4 32]
:desc "Character cell size in pixels (overrides cols if set)")
(alphabet :type string :default "standard"
:desc "Character set: standard, blocks, simple, digits, or custom string")
(color_mode :type string :default "color"
:desc "Color mode: color, mono, invert, or any color name/hex")
(background :type string :default "black"
:desc "Background color name or hex value")
(contrast :type float :default 1.5 :range [0.5 3.0]
:desc "Contrast for character selection")
(char_hue :type any :default nil
:desc "Hue shift expression (evaluated per-zone with zone-* vars)")
(char_saturation :type any :default nil
:desc "Saturation multiplier expression (1.0 = unchanged)")
(char_brightness :type any :default nil
:desc "Brightness multiplier expression (1.0 = unchanged)")
(char_scale :type any :default nil
:desc "Character scale expression (1.0 = normal size)")
(char_rotation :type any :default nil
:desc "Character rotation expression (degrees)")
(char_jitter :type any :default nil
:desc "Position jitter expression (pixels)")
(cell_effect :type any :default nil
:desc "Lambda (cell zone) -> cell for arbitrary per-cell effects")
;; Convenience params for staged recipes (avoids compile-time expression issues)
(energy :type float :default nil
:desc "Energy multiplier (0-1) from audio analysis bind")
(rotation_scale :type float :default 0
:desc "Max rotation at top-right when energy=1 (degrees)")
)
;; The ascii-fx-zone special form handles expression params
;; If energy + rotation_scale provided, it builds: energy * scale * position_factor
;; where position_factor = 0 at bottom-left, 3 at top-right
;; If cell_effect provided, each character is rendered to a cell image,
;; passed to the lambda, and the result composited back
(ascii-fx-zone frame
:cols cols
:char_size char_size
:alphabet alphabet
:color_mode color_mode
:background background
:contrast contrast
:char_hue char_hue
:char_saturation char_saturation
:char_brightness char_brightness
:char_scale char_scale
:char_rotation char_rotation
:char_jitter char_jitter
:cell_effect cell_effect
:energy energy
:rotation_scale rotation_scale))

View File

@@ -0,0 +1,30 @@
;; ASCII Zones effect - different character sets for different brightness zones
;; Dark areas use simple chars, mid uses standard, bright uses blocks
(require-primitives "ascii")
(define-effect ascii_zones
:params (
(char_size :type int :default 8 :range [4 32])
(dark_threshold :type int :default 80 :range [0 128])
(bright_threshold :type int :default 180 :range [128 255])
(color_mode :type string :default "color")
)
(let* ((sample (cell-sample frame char_size))
(colors (nth sample 0))
(luminances (nth sample 1))
;; Start with simple chars as base
(base-chars (luminance-to-chars luminances "simple" 1.2))
;; Map each cell to appropriate alphabet based on brightness zone
(zoned-chars (map-char-grid base-chars luminances
(lambda (r c ch lum)
(cond
;; Bright zones: use block characters
((> lum bright_threshold)
(alphabet-char "blocks" (floor (/ (- lum bright_threshold) 15))))
;; Dark zones: use simple sparse chars
((< lum dark_threshold)
(alphabet-char " .-" (floor (/ lum 30))))
;; Mid zones: use standard ASCII
(else
(alphabet-char "standard" (floor (/ lum 4)))))))))
(render-char-grid frame zoned-chars colors char_size color_mode (list 0 0 0))))

View File

@@ -0,0 +1,31 @@
;; Blend effect - combines two video frames
;; Streaming-compatible: frame is background, overlay is second frame
;; Usage: (blend background overlay :opacity 0.5 :mode "alpha")
;;
;; Params:
;; mode - blend mode (add, multiply, screen, overlay, difference, lighten, darken, alpha)
;; opacity - blend amount (0-1)
(require-primitives "image" "blending" "core")
(define-effect blend
:params (
(overlay :type frame :default nil)
(mode :type string :default "alpha")
(opacity :type float :default 0.5)
)
(if (core:is-nil overlay)
frame
(let [a frame
b overlay
a-h (image:height a)
a-w (image:width a)
b-h (image:height b)
b-w (image:width b)
;; Resize b to match a if needed
b-sized (if (and (= a-w b-w) (= a-h b-h))
b
(image:resize b a-w a-h "linear"))]
(if (= mode "alpha")
(blending:blend-images a b-sized opacity)
(blending:blend-images a (blending:blend-mode a b-sized mode) opacity)))))

View File

@@ -0,0 +1,58 @@
;; N-way weighted blend effect
;; Streaming-compatible: pass inputs as a list of frames
;; Usage: (blend_multi :inputs [(read a) (read b) (read c)] :weights [0.3 0.4 0.3])
;;
;; Parameters:
;; inputs - list of N frames to blend
;; weights - list of N floats, one per input (resolved per-frame)
;; mode - blend mode applied when folding each frame in:
;; "alpha" — pure weighted average (default)
;; "multiply" — darken by multiplication
;; "screen" — lighten (inverse multiply)
;; "overlay" — contrast-boosting midtone blend
;; "soft-light" — gentle dodge/burn
;; "hard-light" — strong dodge/burn
;; "color-dodge" — brightens towards white
;; "color-burn" — darkens towards black
;; "difference" — absolute pixel difference
;; "exclusion" — softer difference
;; "add" — additive (clamped)
;; "subtract" — subtractive (clamped)
;; "darken" — per-pixel minimum
;; "lighten" — per-pixel maximum
;; resize_mode - how to match frame dimensions (fit, crop, stretch)
;;
;; Uses a left-fold over inputs[1..N-1]. At each step the running
;; opacity is: w[i] / (w[0] + w[1] + ... + w[i])
;; which produces the correct normalised weighted result.
(require-primitives "image" "blending")
(define-effect blend_multi
:params (
(inputs :type list :default [])
(weights :type list :default [])
(mode :type string :default "alpha")
(resize_mode :type string :default "fit")
)
(let [n (len inputs)
;; Target dimensions from first frame
target-w (image:width (nth inputs 0))
target-h (image:height (nth inputs 0))
;; Fold over indices 1..n-1
;; Accumulator is (list blended-frame running-weight-sum)
seed (list (nth inputs 0) (nth weights 0))
result (reduce (range 1 n) seed
(lambda (pair i)
(let [acc (nth pair 0)
running (nth pair 1)
w (nth weights i)
new-running (+ running w)
opacity (/ w (max new-running 0.001))
f (image:resize (nth inputs i) target-w target-h "linear")
;; Apply blend mode then mix with opacity
blended (if (= mode "alpha")
(blending:blend-images acc f opacity)
(blending:blend-images acc (blending:blend-mode acc f mode) opacity))]
(list blended new-running))))]
(nth result 0)))

View File

@@ -0,0 +1,16 @@
;; Bloom effect - glow on bright areas
(require-primitives "image" "blending")
(define-effect bloom
:params (
(intensity :type float :default 0.5 :range [0 2])
(threshold :type int :default 200 :range [0 255])
(radius :type int :default 15 :range [1 50])
)
(let* ((bright (map-pixels frame
(lambda (x y c)
(if (> (luminance c) threshold)
c
(rgb 0 0 0)))))
(blurred (image:blur bright radius)))
(blending:blend-mode frame blurred "add")))

View File

@@ -0,0 +1,8 @@
;; Blur effect - gaussian blur
(require-primitives "image")
(define-effect blur
:params (
(radius :type int :default 5 :range [1 50])
)
(image:blur frame (max 1 radius)))

View File

@@ -0,0 +1,9 @@
;; Brightness effect - adjusts overall brightness
;; Uses vectorized adjust primitive for fast processing
(require-primitives "color_ops")
(define-effect brightness
:params (
(amount :type int :default 0 :range [-255 255])
)
(color_ops:adjust-brightness frame amount))

View File

@@ -0,0 +1,13 @@
;; Color adjustment effect - replaces TRANSFORM node
(require-primitives "color_ops")
(define-effect color-adjust
:params (
(brightness :type int :default 0 :range [-255 255] :desc "Brightness adjustment")
(contrast :type float :default 1 :range [0 3] :desc "Contrast multiplier")
(saturation :type float :default 1 :range [0 2] :desc "Saturation multiplier")
)
(-> frame
(color_ops:adjust-brightness brightness)
(color_ops:adjust-contrast contrast)
(color_ops:adjust-saturation saturation)))

View File

@@ -0,0 +1,13 @@
;; Color Cycle effect - animated hue rotation
(require-primitives "color_ops")
(define-effect color_cycle
:params (
(speed :type int :default 1 :range [0 10])
)
(let ((shift (* t speed 360)))
(map-pixels frame
(lambda (x y c)
(let* ((hsv (rgb->hsv c))
(new-h (mod (+ (first hsv) shift) 360)))
(hsv->rgb (list new-h (nth hsv 1) (nth hsv 2))))))))

View File

@@ -0,0 +1,9 @@
;; Contrast effect - adjusts image contrast
;; Uses vectorized adjust primitive for fast processing
(require-primitives "color_ops")
(define-effect contrast
:params (
(amount :type int :default 1 :range [0.5 3])
)
(color_ops:adjust-contrast frame amount))

View File

@@ -0,0 +1,30 @@
;; CRT effect - old monitor simulation
(require-primitives "image")
(define-effect crt
:params (
(line_spacing :type int :default 2 :range [1 10])
(line_opacity :type float :default 0.3 :range [0 1])
(vignette_amount :type float :default 0.2)
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (/ w 2))
(cy (/ h 2))
(max-dist (sqrt (+ (* cx cx) (* cy cy)))))
(map-pixels frame
(lambda (x y c)
(let* (;; Scanline darkening
(scanline-factor (if (= 0 (mod y line_spacing))
(- 1 line_opacity)
1))
;; Vignette
(dx (- x cx))
(dy (- y cy))
(dist (sqrt (+ (* dx dx) (* dy dy))))
(vignette-factor (- 1 (* (/ dist max-dist) vignette_amount)))
;; Combined
(factor (* scanline-factor vignette-factor)))
(rgb (* (red c) factor)
(* (green c) factor)
(* (blue c) factor)))))))

View File

@@ -0,0 +1,14 @@
;; Datamosh effect - glitch block corruption
(define-effect datamosh
:params (
(block_size :type int :default 32 :range [8 128])
(corruption :type float :default 0.3 :range [0 1])
(max_offset :type int :default 50 :range [0 200])
(color_corrupt :type bool :default true)
)
;; Get previous frame from state, or use current frame if none
(let ((prev (state-get "prev_frame" frame)))
(begin
(state-set "prev_frame" (copy frame))
(datamosh frame prev block_size corruption max_offset color_corrupt))))

View File

@@ -0,0 +1,19 @@
;; Echo effect - motion trails using frame buffer
(require-primitives "blending")
(define-effect echo
:params (
(num_echoes :type int :default 4 :range [1 20])
(decay :type float :default 0.5 :range [0 1])
)
(let* ((buffer (state-get 'buffer (list)))
(new-buffer (take (cons frame buffer) (+ num_echoes 1))))
(begin
(state-set 'buffer new-buffer)
;; Blend frames with decay
(if (< (length new-buffer) 2)
frame
(let ((result (copy frame)))
;; Simple blend of first two frames for now
;; Full version would fold over all frames
(blending:blend-images frame (nth new-buffer 1) (* decay 0.5)))))))

View File

@@ -0,0 +1,9 @@
;; Edge detection effect - highlights edges
(require-primitives "image")
(define-effect edge_detect
:params (
(low :type int :default 50 :range [10 100])
(high :type int :default 150 :range [50 300])
)
(image:edge-detect frame low high))

View File

@@ -0,0 +1,13 @@
;; Emboss effect - creates raised/3D appearance
(require-primitives "blending")
(define-effect emboss
:params (
(strength :type int :default 1 :range [0.5 3])
(blend :type float :default 0.3 :range [0 1])
)
(let* ((kernel (list (list (- strength) (- strength) 0)
(list (- strength) 1 strength)
(list 0 strength strength)))
(embossed (convolve frame kernel)))
(blending:blend-images embossed frame blend)))

View File

@@ -0,0 +1,19 @@
;; Film Grain effect - adds film grain texture
(require-primitives "core")
(define-effect film_grain
:params (
(intensity :type float :default 0.2 :range [0 1])
(colored :type bool :default false)
)
(let ((grain-amount (* intensity 50)))
(map-pixels frame
(lambda (x y c)
(if colored
(rgb (clamp (+ (red c) (gaussian 0 grain-amount)) 0 255)
(clamp (+ (green c) (gaussian 0 grain-amount)) 0 255)
(clamp (+ (blue c) (gaussian 0 grain-amount)) 0 255))
(let ((n (gaussian 0 grain-amount)))
(rgb (clamp (+ (red c) n) 0 255)
(clamp (+ (green c) n) 0 255)
(clamp (+ (blue c) n) 0 255))))))))

View File

@@ -0,0 +1,16 @@
;; Fisheye effect - barrel/pincushion lens distortion
(require-primitives "geometry" "image")
(define-effect fisheye
:params (
(strength :type float :default 0.3 :range [-1 1])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(zoom_correct :type bool :default true)
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
(coords (geometry:fisheye-coords w h strength cx cy zoom_correct)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,16 @@
;; Flip effect - flips image horizontally or vertically
(require-primitives "geometry")
(define-effect flip
:params (
(horizontal :type bool :default true)
(vertical :type bool :default false)
)
(let ((result frame))
(if horizontal
(set! result (geometry:flip-img result "horizontal"))
nil)
(if vertical
(set! result (geometry:flip-img result "vertical"))
nil)
result))

View File

@@ -0,0 +1,7 @@
;; Grayscale effect - converts to grayscale
;; Uses vectorized mix-gray primitive for fast processing
(require-primitives "image")
(define-effect grayscale
:params ()
(image:grayscale frame))

View File

@@ -0,0 +1,12 @@
;; Hue shift effect - rotates hue values
;; Uses vectorized shift-hsv primitive for fast processing
(require-primitives "color_ops")
(define-effect hue_shift
:params (
(degrees :type int :default 0 :range [0 360])
(speed :type int :default 0 :desc "rotation per second")
)
(let ((shift (+ degrees (* speed t))))
(color_ops:shift-hsv frame shift 1 1)))

View File

@@ -0,0 +1,9 @@
;; Invert effect - inverts all colors
;; Uses vectorized invert-img primitive for fast processing
;; amount param: 0 = no invert, 1 = full invert (threshold at 0.5)
(require-primitives "color_ops")
(define-effect invert
:params ((amount :type float :default 1 :range [0 1]))
(if (> amount 0.5) (color_ops:invert-img frame) frame))

View File

@@ -0,0 +1,20 @@
;; Kaleidoscope effect - mandala-like symmetry patterns
(require-primitives "geometry" "image")
(define-effect kaleidoscope
:params (
(segments :type int :default 6 :range [3 16])
(rotation :type int :default 0 :range [0 360])
(rotation_speed :type int :default 0 :range [-180 180])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(zoom :type int :default 1 :range [0.5 3])
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
;; Total rotation including time-based animation
(total_rot (+ rotation (* rotation_speed (or _time 0))))
(coords (geometry:kaleidoscope-coords w h segments total_rot cx cy zoom)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,36 @@
;; Layer effect - composite overlay over background at position
;; Streaming-compatible: frame is background, overlay is foreground
;; Usage: (layer background overlay :x 10 :y 20 :opacity 0.8)
;;
;; Params:
;; overlay - frame to composite on top
;; x, y - position to place overlay
;; opacity - blend amount (0-1)
;; mode - blend mode (alpha, multiply, screen, etc.)
(require-primitives "image" "blending" "core")
(define-effect layer
:params (
(overlay :type frame :default nil)
(x :type int :default 0)
(y :type int :default 0)
(opacity :type float :default 1.0)
(mode :type string :default "alpha")
)
(if (core:is-nil overlay)
frame
(let [bg (copy frame)
fg overlay
fg-w (image:width fg)
fg-h (image:height fg)]
(if (= opacity 1.0)
;; Simple paste
(paste bg fg x y)
;; Blend with opacity
(let [blended (if (= mode "alpha")
(blending:blend-images (image:crop bg x y fg-w fg-h) fg opacity)
(blending:blend-images (image:crop bg x y fg-w fg-h)
(blending:blend-mode (image:crop bg x y fg-w fg-h) fg mode)
opacity))]
(paste bg blended x y))))))

View File

@@ -0,0 +1,33 @@
;; Mirror effect - mirrors half of image
(require-primitives "geometry" "image")
(define-effect mirror
:params (
(mode :type string :default "left_right")
)
(let* ((w (image:width frame))
(h (image:height frame))
(hw (floor (/ w 2)))
(hh (floor (/ h 2))))
(cond
((= mode "left_right")
(let ((left (image:crop frame 0 0 hw h))
(result (copy frame)))
(paste result (geometry:flip-img left "horizontal") hw 0)))
((= mode "right_left")
(let ((right (image:crop frame hw 0 hw h))
(result (copy frame)))
(paste result (geometry:flip-img right "horizontal") 0 0)))
((= mode "top_bottom")
(let ((top (image:crop frame 0 0 w hh))
(result (copy frame)))
(paste result (geometry:flip-img top "vertical") 0 hh)))
((= mode "bottom_top")
(let ((bottom (image:crop frame 0 hh w hh))
(result (copy frame)))
(paste result (geometry:flip-img bottom "vertical") 0 0)))
(else frame))))

View File

@@ -0,0 +1,23 @@
;; Neon Glow effect - glowing edge effect
(require-primitives "image" "blending")
(define-effect neon_glow
:params (
(edge_low :type int :default 50 :range [10 200])
(edge_high :type int :default 150 :range [50 300])
(glow_radius :type int :default 15 :range [1 50])
(glow_intensity :type int :default 2 :range [0.5 5])
(background :type float :default 0.3 :range [0 1])
)
(let* ((edge-img (image:edge-detect frame edge_low edge_high))
(glow (image:blur edge-img glow_radius))
;; Intensify the glow
(bright-glow (map-pixels glow
(lambda (x y c)
(rgb (clamp (* (red c) glow_intensity) 0 255)
(clamp (* (green c) glow_intensity) 0 255)
(clamp (* (blue c) glow_intensity) 0 255))))))
(blending:blend-mode (blending:blend-images frame (make-image (image:width frame) (image:height frame) (list 0 0 0))
(- 1 background))
bright-glow
"screen")))

View File

@@ -0,0 +1,8 @@
;; Noise effect - adds random noise
;; Uses vectorized add-noise primitive for fast processing
(define-effect noise
:params (
(amount :type int :default 20 :range [0 100])
)
(add-noise frame amount))

View File

@@ -0,0 +1,24 @@
;; Outline effect - shows only edges
(require-primitives "image")
(define-effect outline
:params (
(thickness :type int :default 2 :range [1 10])
(threshold :type int :default 100 :range [20 300])
(color :type list :default (list 0 0 0)
)
(fill_mode "original"))
(let* ((edge-img (image:edge-detect frame (/ threshold 2) threshold))
(dilated (if (> thickness 1)
(dilate edge-img thickness)
edge-img))
(base (cond
((= fill_mode "original") (copy frame))
((= fill_mode "white") (make-image (image:width frame) (image:height frame) (list 255 255 255)))
(else (make-image (image:width frame) (image:height frame) (list 0 0 0))))))
(map-pixels base
(lambda (x y c)
(let ((edge-val (luminance (pixel dilated x y))))
(if (> edge-val 128)
color
c))))))

View File

@@ -0,0 +1,13 @@
;; Pixelate effect - creates blocky pixels
(require-primitives "image")
(define-effect pixelate
:params (
(block_size :type int :default 8 :range [2 64])
)
(let* ((w (image:width frame))
(h (image:height frame))
(small-w (max 1 (floor (/ w block_size))))
(small-h (max 1 (floor (/ h block_size))))
(small (image:resize frame small-w small-h "area")))
(image:resize small w h "nearest")))

View File

@@ -0,0 +1,11 @@
;; Pixelsort effect - glitch art pixel sorting
(define-effect pixelsort
:params (
(sort_by :type string :default "lightness")
(threshold_low :type int :default 50 :range [0 255])
(threshold_high :type int :default 200 :range [0 255])
(angle :type int :default 0 :range [0 180])
(reverse :type bool :default false)
)
(pixelsort frame sort_by threshold_low threshold_high angle reverse))

View File

@@ -0,0 +1,8 @@
;; Posterize effect - reduces color levels
(require-primitives "color_ops")
(define-effect posterize
:params (
(levels :type int :default 8 :range [2 32])
)
(color_ops:posterize frame levels))

View File

@@ -0,0 +1,11 @@
;; Resize effect - replaces RESIZE node
;; Note: uses target-w/target-h to avoid conflict with width/height primitives
(require-primitives "image")
(define-effect resize-frame
:params (
(target-w :type int :default 640 :desc "Target width in pixels")
(target-h :type int :default 480 :desc "Target height in pixels")
(mode :type string :default "linear" :choices [linear nearest area] :desc "Interpolation mode")
)
(image:resize frame target-w target-h mode))

View File

@@ -0,0 +1,13 @@
;; RGB Split effect - chromatic aberration
(define-effect rgb_split
:params (
(offset_x :type int :default 10 :range [-50 50])
(offset_y :type int :default 0 :range [-50 50])
)
(let* ((r (channel frame 0))
(g (channel frame 1))
(b (channel frame 2))
(r-shifted (translate (merge-channels r r r) offset_x offset_y))
(b-shifted (translate (merge-channels b b b) (- offset_x) (- offset_y))))
(merge-channels (channel r-shifted 0) g (channel b-shifted 0))))

View File

@@ -0,0 +1,19 @@
;; Ripple effect - radial wave distortion from center
(require-primitives "geometry" "image" "math")
(define-effect ripple
:params (
(frequency :type int :default 5 :range [1 20])
(amplitude :type int :default 10 :range [0 50])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(decay :type int :default 1 :range [0 5])
(speed :type int :default 1 :range [0 10])
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
(phase (* (or t 0) speed 2 pi))
(coords (geometry:ripple-displace w h frequency amplitude cx cy decay phase)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,11 @@
;; Rotate effect - rotates image
(require-primitives "geometry")
(define-effect rotate
:params (
(angle :type int :default 0 :range [-360 360])
(speed :type int :default 0 :desc "rotation per second")
)
(let ((total-angle (+ angle (* speed t))))
(geometry:rotate-img frame total-angle)))

View File

@@ -0,0 +1,9 @@
;; Saturation effect - adjusts color saturation
;; Uses vectorized shift-hsv primitive for fast processing
(require-primitives "color_ops")
(define-effect saturation
:params (
(amount :type int :default 1 :range [0 3])
)
(color_ops:adjust-saturation frame amount))

View File

@@ -0,0 +1,15 @@
;; Scanlines effect - VHS-style horizontal line shifting
(require-primitives "core")
(define-effect scanlines
:params (
(amplitude :type int :default 10 :range [0 100])
(frequency :type int :default 10 :range [1 100])
(randomness :type float :default 0.5 :range [0 1])
)
(map-rows frame
(lambda (y row)
(let* ((sine-shift (* amplitude (sin (/ (* y 6.28) (max 1 frequency)))))
(rand-shift (core:rand-range (- amplitude) amplitude))
(shift (floor (lerp sine-shift rand-shift randomness))))
(roll row shift 0)))))

View File

@@ -0,0 +1,7 @@
;; Sepia effect - applies sepia tone
;; Classic warm vintage look
(require-primitives "color_ops")
(define-effect sepia
:params ()
(color_ops:sepia frame))

View File

@@ -0,0 +1,8 @@
;; Sharpen effect - sharpens edges
(require-primitives "image")
(define-effect sharpen
:params (
(amount :type int :default 1 :range [0 5])
)
(image:sharpen frame amount))

View File

@@ -0,0 +1,16 @@
;; Strobe effect - holds frames for choppy look
(require-primitives "core")
(define-effect strobe
:params (
(frame_rate :type int :default 12 :range [1 60])
)
(let* ((held (state-get 'held nil))
(held-until (state-get 'held-until 0))
(frame-duration (/ 1 frame_rate)))
(if (or (core:is-nil held) (>= t held-until))
(begin
(state-set 'held (copy frame))
(state-set 'held-until (+ t frame-duration))
frame)
held)))

View File

@@ -0,0 +1,17 @@
;; Swirl effect - spiral vortex distortion
(require-primitives "geometry" "image")
(define-effect swirl
:params (
(strength :type int :default 1 :range [-10 10])
(radius :type float :default 0.5 :range [0.1 2])
(center_x :type float :default 0.5 :range [0 1])
(center_y :type float :default 0.5 :range [0 1])
(falloff :type string :default "quadratic")
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (* w center_x))
(cy (* h center_y))
(coords (geometry:swirl-coords w h strength radius cx cy falloff)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,9 @@
;; Threshold effect - converts to black and white
(require-primitives "color_ops")
(define-effect threshold
:params (
(level :type int :default 128 :range [0 255])
(invert :type bool :default false)
)
(color_ops:threshold frame level invert))

View File

@@ -0,0 +1,29 @@
;; Tile Grid effect - tiles image in grid
(require-primitives "geometry" "image")
(define-effect tile_grid
:params (
(rows :type int :default 2 :range [1 10])
(cols :type int :default 2 :range [1 10])
(gap :type int :default 0 :range [0 50])
)
(let* ((w (image:width frame))
(h (image:height frame))
(tile-w (floor (/ (- w (* gap (- cols 1))) cols)))
(tile-h (floor (/ (- h (* gap (- rows 1))) rows)))
(tile (image:resize frame tile-w tile-h "area"))
(result (make-image w h (list 0 0 0))))
(begin
;; Manually place tiles using nested iteration
;; This is a simplified version - full version would loop
(paste result tile 0 0)
(if (> cols 1)
(paste result tile (+ tile-w gap) 0)
nil)
(if (> rows 1)
(paste result tile 0 (+ tile-h gap))
nil)
(if (and (> cols 1) (> rows 1))
(paste result tile (+ tile-w gap) (+ tile-h gap))
nil)
result)))

View File

@@ -0,0 +1,20 @@
;; Trails effect - persistent motion trails
(require-primitives "image" "blending")
(define-effect trails
:params (
(persistence :type float :default 0.8 :range [0 0.99])
)
(let* ((buffer (state-get 'buffer nil))
(current frame))
(if (= buffer nil)
(begin
(state-set 'buffer (copy frame))
frame)
(let* ((faded (blending:blend-images buffer
(make-image (image:width frame) (image:height frame) (list 0 0 0))
(- 1 persistence)))
(result (blending:blend-mode faded current "lighten")))
(begin
(state-set 'buffer result)
result)))))

View File

@@ -0,0 +1,23 @@
;; Vignette effect - darkens corners
(require-primitives "image")
(define-effect vignette
:params (
(strength :type float :default 0.5 :range [0 1])
(radius :type int :default 1 :range [0.5 2])
)
(let* ((w (image:width frame))
(h (image:height frame))
(cx (/ w 2))
(cy (/ h 2))
(max-dist (* (sqrt (+ (* cx cx) (* cy cy))) radius)))
(map-pixels frame
(lambda (x y c)
(let* ((dx (- x cx))
(dy (- y cy))
(dist (sqrt (+ (* dx dx) (* dy dy))))
(factor (- 1 (* (/ dist max-dist) strength)))
(factor (clamp factor 0 1)))
(rgb (* (red c) factor)
(* (green c) factor)
(* (blue c) factor)))))))

View File

@@ -0,0 +1,22 @@
;; Wave effect - sine wave displacement distortion
(require-primitives "geometry" "image")
(define-effect wave
:params (
(amplitude :type int :default 10 :range [0 100])
(wavelength :type int :default 50 :range [10 500])
(speed :type int :default 1 :range [0 10])
(direction :type string :default "horizontal")
)
(let* ((w (image:width frame))
(h (image:height frame))
;; Use _time for animation phase
(phase (* (or _time 0) speed 2 pi))
;; Calculate frequency: waves per dimension
(freq (/ (if (= direction "vertical") w h) wavelength))
(axis (cond
((= direction "horizontal") "x")
((= direction "vertical") "y")
(else "both")))
(coords (geometry:wave-coords w h axis freq amplitude phase)))
(geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords))))

View File

@@ -0,0 +1,8 @@
;; Zoom effect - zooms in/out from center
(require-primitives "geometry")
(define-effect zoom
:params (
(amount :type int :default 1 :range [0.1 5])
)
(geometry:scale-img frame amount amount))

1016
sexp_effects/interpreter.py Normal file

File diff suppressed because it is too large Load Diff

168
sexp_effects/parser.py Normal file
View File

@@ -0,0 +1,168 @@
"""
S-Expression Parser
Parses S-expressions into Python data structures:
- Lists become Python lists
- Symbols become Symbol objects
- Numbers become int/float
- Strings become str
- Keywords (:foo) become Keyword objects
"""
import re
from dataclasses import dataclass
from typing import Any, List, Union
@dataclass(frozen=True)
class Symbol:
"""A symbol (identifier) in the S-expression."""
name: str
def __repr__(self):
return self.name
@dataclass(frozen=True)
class Keyword:
"""A keyword like :foo in the S-expression."""
name: str
def __repr__(self):
return f":{self.name}"
# Token patterns
TOKEN_PATTERNS = [
(r'\s+', None), # Whitespace (skip)
(r';[^\n]*', None), # Comments (skip)
(r'\(', 'LPAREN'),
(r'\)', 'RPAREN'),
(r'\[', 'LBRACKET'),
(r'\]', 'RBRACKET'),
(r"'", 'QUOTE'),
(r'"([^"\\]|\\.)*"', 'STRING'),
(r':[a-zA-Z_][a-zA-Z0-9_\-]*', 'KEYWORD'),
(r'-?[0-9]+\.[0-9]+', 'FLOAT'),
(r'-?[0-9]+', 'INT'),
(r'#t|#f|true|false', 'BOOL'),
(r'[a-zA-Z_+\-*/<>=!?][a-zA-Z0-9_+\-*/<>=!?]*', 'SYMBOL'),
]
TOKEN_REGEX = '|'.join(f'(?P<{name}>{pattern})' if name else f'(?:{pattern})'
for pattern, name in TOKEN_PATTERNS)
def tokenize(source: str) -> List[tuple]:
"""Tokenize S-expression source code."""
tokens = []
for match in re.finditer(TOKEN_REGEX, source):
kind = match.lastgroup
value = match.group()
if kind:
tokens.append((kind, value))
return tokens
def parse(source: str) -> Any:
"""Parse S-expression source into Python data structures."""
tokens = tokenize(source)
pos = [0] # Use list for mutability in nested function
def parse_expr():
if pos[0] >= len(tokens):
raise SyntaxError("Unexpected end of input")
kind, value = tokens[pos[0]]
if kind == 'LPAREN':
pos[0] += 1
items = []
while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RPAREN':
items.append(parse_expr())
if pos[0] >= len(tokens):
raise SyntaxError("Missing closing parenthesis")
pos[0] += 1 # Skip RPAREN
return items
if kind == 'LBRACKET':
pos[0] += 1
items = []
while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RBRACKET':
items.append(parse_expr())
if pos[0] >= len(tokens):
raise SyntaxError("Missing closing bracket")
pos[0] += 1 # Skip RBRACKET
return items
elif kind == 'RPAREN':
raise SyntaxError("Unexpected closing parenthesis")
elif kind == 'QUOTE':
pos[0] += 1
return [Symbol('quote'), parse_expr()]
elif kind == 'STRING':
pos[0] += 1
# Remove quotes and unescape
return value[1:-1].replace('\\"', '"').replace('\\n', '\n')
elif kind == 'INT':
pos[0] += 1
return int(value)
elif kind == 'FLOAT':
pos[0] += 1
return float(value)
elif kind == 'BOOL':
pos[0] += 1
return value in ('#t', 'true')
elif kind == 'KEYWORD':
pos[0] += 1
return Keyword(value[1:]) # Remove leading :
elif kind == 'SYMBOL':
pos[0] += 1
return Symbol(value)
else:
raise SyntaxError(f"Unknown token: {kind} {value}")
result = parse_expr()
# Check for multiple top-level expressions
if pos[0] < len(tokens):
# Allow multiple top-level expressions, return as list
results = [result]
while pos[0] < len(tokens):
results.append(parse_expr())
return results
return result
def parse_file(path: str) -> Any:
"""Parse an S-expression file."""
with open(path, 'r') as f:
return parse(f.read())
# Convenience for pretty-printing
def to_sexp(obj: Any) -> str:
"""Convert Python object back to S-expression string."""
if isinstance(obj, list):
return '(' + ' '.join(to_sexp(x) for x in obj) + ')'
elif isinstance(obj, Symbol):
return obj.name
elif isinstance(obj, Keyword):
return f':{obj.name}'
elif isinstance(obj, str):
return f'"{obj}"'
elif isinstance(obj, bool):
return '#t' if obj else '#f'
elif isinstance(obj, (int, float)):
return str(obj)
else:
return repr(obj)

View File

@@ -0,0 +1,102 @@
"""
Primitive Libraries System
Provides modular loading of primitives. Core primitives are always available,
additional primitive libraries can be loaded on-demand with scoped availability.
Usage in sexp:
;; Load at recipe level - available throughout
(primitives math :path "primitive_libs/math.py")
;; Or use with-primitives for scoped access
(with-primitives "image"
(blur frame 3)) ;; blur only available inside
;; Nested scopes work
(with-primitives "math"
(with-primitives "color"
(hue-shift frame (* (sin t) 30))))
Library file format (primitive_libs/math.py):
import math
def prim_sin(x): return math.sin(x)
def prim_cos(x): return math.cos(x)
PRIMITIVES = {
'sin': prim_sin,
'cos': prim_cos,
}
"""
import importlib.util
from pathlib import Path
from typing import Dict, Callable, Any, Optional
# Cache of loaded primitive libraries
_library_cache: Dict[str, Dict[str, Any]] = {}
# Core primitives - always available, cannot be overridden
CORE_PRIMITIVES: Dict[str, Any] = {}
def register_core_primitive(name: str, fn: Callable):
"""Register a core primitive that's always available."""
CORE_PRIMITIVES[name] = fn
def load_primitive_library(name: str, path: Optional[str] = None) -> Dict[str, Any]:
"""
Load a primitive library by name or path.
Args:
name: Library name (e.g., "math", "image", "color")
path: Optional explicit path to library file
Returns:
Dict of primitive name -> function
"""
# Check cache first
cache_key = path or name
if cache_key in _library_cache:
return _library_cache[cache_key]
# Find library file
if path:
lib_path = Path(path)
else:
# Look in standard locations
lib_dir = Path(__file__).parent
lib_path = lib_dir / f"{name}.py"
if not lib_path.exists():
raise ValueError(f"Primitive library '{name}' not found at {lib_path}")
if not lib_path.exists():
raise ValueError(f"Primitive library file not found: {lib_path}")
# Load the module
spec = importlib.util.spec_from_file_location(f"prim_lib_{name}", lib_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Get PRIMITIVES dict from module
if not hasattr(module, 'PRIMITIVES'):
raise ValueError(f"Primitive library '{name}' missing PRIMITIVES dict")
primitives = module.PRIMITIVES
# Cache and return
_library_cache[cache_key] = primitives
return primitives
def get_library_names() -> list:
"""Get names of available primitive libraries."""
lib_dir = Path(__file__).parent
return [p.stem for p in lib_dir.glob("*.py") if p.stem != "__init__"]
def clear_cache():
"""Clear the library cache (useful for testing)."""
_library_cache.clear()

View File

@@ -0,0 +1,196 @@
"""
Array Primitives Library
Vectorized operations on numpy arrays for coordinate transformations.
"""
import numpy as np
# Arithmetic
def prim_arr_add(a, b):
return np.add(a, b)
def prim_arr_sub(a, b):
return np.subtract(a, b)
def prim_arr_mul(a, b):
return np.multiply(a, b)
def prim_arr_div(a, b):
return np.divide(a, b)
def prim_arr_mod(a, b):
return np.mod(a, b)
def prim_arr_neg(a):
return np.negative(a)
# Math functions
def prim_arr_sin(a):
return np.sin(a)
def prim_arr_cos(a):
return np.cos(a)
def prim_arr_tan(a):
return np.tan(a)
def prim_arr_sqrt(a):
return np.sqrt(np.maximum(a, 0))
def prim_arr_pow(a, b):
return np.power(a, b)
def prim_arr_abs(a):
return np.abs(a)
def prim_arr_exp(a):
return np.exp(a)
def prim_arr_log(a):
return np.log(np.maximum(a, 1e-10))
def prim_arr_atan2(y, x):
return np.arctan2(y, x)
# Comparison / selection
def prim_arr_min(a, b):
return np.minimum(a, b)
def prim_arr_max(a, b):
return np.maximum(a, b)
def prim_arr_clip(a, lo, hi):
return np.clip(a, lo, hi)
def prim_arr_where(cond, a, b):
return np.where(cond, a, b)
def prim_arr_floor(a):
return np.floor(a)
def prim_arr_ceil(a):
return np.ceil(a)
def prim_arr_round(a):
return np.round(a)
# Interpolation
def prim_arr_lerp(a, b, t):
return a + (b - a) * t
def prim_arr_smoothstep(edge0, edge1, x):
t = prim_arr_clip((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t * t * (3 - 2 * t)
# Creation
def prim_arr_zeros(shape):
return np.zeros(shape, dtype=np.float32)
def prim_arr_ones(shape):
return np.ones(shape, dtype=np.float32)
def prim_arr_full(shape, value):
return np.full(shape, value, dtype=np.float32)
def prim_arr_arange(start, stop, step=1):
return np.arange(start, stop, step, dtype=np.float32)
def prim_arr_linspace(start, stop, num):
return np.linspace(start, stop, num, dtype=np.float32)
def prim_arr_meshgrid(x, y):
return np.meshgrid(x, y)
# Coordinate transforms
def prim_polar_from_center(map_x, map_y, cx, cy):
"""Convert Cartesian to polar coordinates centered at (cx, cy)."""
dx = map_x - cx
dy = map_y - cy
r = np.sqrt(dx**2 + dy**2)
theta = np.arctan2(dy, dx)
return (r, theta)
def prim_cart_from_polar(r, theta, cx, cy):
"""Convert polar to Cartesian, adding center offset."""
x = r * np.cos(theta) + cx
y = r * np.sin(theta) + cy
return (x, y)
PRIMITIVES = {
# Arithmetic
'arr+': prim_arr_add,
'arr-': prim_arr_sub,
'arr*': prim_arr_mul,
'arr/': prim_arr_div,
'arr-mod': prim_arr_mod,
'arr-neg': prim_arr_neg,
# Math
'arr-sin': prim_arr_sin,
'arr-cos': prim_arr_cos,
'arr-tan': prim_arr_tan,
'arr-sqrt': prim_arr_sqrt,
'arr-pow': prim_arr_pow,
'arr-abs': prim_arr_abs,
'arr-exp': prim_arr_exp,
'arr-log': prim_arr_log,
'arr-atan2': prim_arr_atan2,
# Selection
'arr-min': prim_arr_min,
'arr-max': prim_arr_max,
'arr-clip': prim_arr_clip,
'arr-where': prim_arr_where,
'arr-floor': prim_arr_floor,
'arr-ceil': prim_arr_ceil,
'arr-round': prim_arr_round,
# Interpolation
'arr-lerp': prim_arr_lerp,
'arr-smoothstep': prim_arr_smoothstep,
# Creation
'arr-zeros': prim_arr_zeros,
'arr-ones': prim_arr_ones,
'arr-full': prim_arr_full,
'arr-arange': prim_arr_arange,
'arr-linspace': prim_arr_linspace,
'arr-meshgrid': prim_arr_meshgrid,
# Coordinates
'polar-from-center': prim_polar_from_center,
'cart-from-polar': prim_cart_from_polar,
}

View File

@@ -0,0 +1,388 @@
"""
ASCII Art Primitives Library
ASCII art rendering with per-zone expression evaluation and cell effects.
"""
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
from typing import Any, Dict, List, Optional, Callable
import colorsys
# Character sets
CHAR_SETS = {
"standard": " .:-=+*#%@",
"blocks": " ░▒▓█",
"simple": " .:oO@",
"digits": "0123456789",
"binary": "01",
"ascii": " `.-':_,^=;><+!rc*/z?sLTv)J7(|Fi{C}fI31tlu[neoZ5Yxjya]2ESwqkP6h9d4VpOGbUAKXHm8RD#$Bg0MNWQ%&@",
}
# Default font
_default_font = None
def _get_font(size: int):
"""Get monospace font at given size."""
global _default_font
try:
return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size)
except:
return ImageFont.load_default()
def _parse_color(color_str: str) -> tuple:
"""Parse color string to RGB tuple."""
if color_str.startswith('#'):
hex_color = color_str[1:]
if len(hex_color) == 3:
hex_color = ''.join(c*2 for c in hex_color)
return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4))
colors = {
'black': (0, 0, 0), 'white': (255, 255, 255),
'red': (255, 0, 0), 'green': (0, 255, 0), 'blue': (0, 0, 255),
'yellow': (255, 255, 0), 'cyan': (0, 255, 255), 'magenta': (255, 0, 255),
'gray': (128, 128, 128), 'grey': (128, 128, 128),
}
return colors.get(color_str.lower(), (0, 0, 0))
def _cell_sample(frame: np.ndarray, cell_size: int):
"""Sample frame into cells, returning colors and luminances.
Uses cv2.resize with INTER_AREA (pixel-area averaging) which is
~25x faster than numpy reshape+mean for block downsampling.
"""
h, w = frame.shape[:2]
rows = h // cell_size
cols = w // cell_size
# Crop to exact grid then block-average via cv2 area interpolation.
cropped = frame[:rows * cell_size, :cols * cell_size]
colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
luminances = ((0.299 * colors[:, :, 0] +
0.587 * colors[:, :, 1] +
0.114 * colors[:, :, 2]) / 255.0).astype(np.float32)
return colors, luminances
def _luminance_to_char(lum: float, alphabet: str, contrast: float) -> str:
"""Map luminance to character."""
chars = CHAR_SETS.get(alphabet, alphabet)
lum = ((lum - 0.5) * contrast + 0.5)
lum = max(0, min(1, lum))
idx = int(lum * (len(chars) - 1))
return chars[idx]
def _render_char_cell(char: str, cell_size: int, color: tuple, bg_color: tuple) -> np.ndarray:
"""Render a single character to a cell image."""
img = Image.new('RGB', (cell_size, cell_size), bg_color)
draw = ImageDraw.Draw(img)
font = _get_font(cell_size)
# Center the character
bbox = draw.textbbox((0, 0), char, font=font)
text_w = bbox[2] - bbox[0]
text_h = bbox[3] - bbox[1]
x = (cell_size - text_w) // 2
y = (cell_size - text_h) // 2 - bbox[1]
draw.text((x, y), char, fill=color, font=font)
return np.array(img)
def prim_ascii_fx_zone(
frame: np.ndarray,
cols: int = 80,
char_size: int = None,
alphabet: str = "standard",
color_mode: str = "color",
background: str = "black",
contrast: float = 1.5,
char_hue = None,
char_saturation = None,
char_brightness = None,
char_scale = None,
char_rotation = None,
char_jitter = None,
cell_effect = None,
energy: float = None,
rotation_scale: float = 0,
_interp = None,
_env = None,
**extra_params
) -> np.ndarray:
"""
Render frame as ASCII art with per-zone effects.
Args:
frame: Input image
cols: Number of character columns
char_size: Cell size in pixels (overrides cols if set)
alphabet: Character set name or custom string
color_mode: "color", "mono", "invert", or color name
background: Background color name or hex
contrast: Contrast for character selection
char_hue/saturation/brightness/scale/rotation/jitter: Per-zone expressions
cell_effect: Lambda (cell, zone) -> cell for per-cell effects
energy: Energy value from audio analysis
rotation_scale: Max rotation degrees
_interp: Interpreter (auto-injected)
_env: Environment (auto-injected)
**extra_params: Additional params passed to zone dict
"""
h, w = frame.shape[:2]
# Calculate cell size
if char_size is None or char_size == 0:
cell_size = max(4, w // cols)
else:
cell_size = max(4, int(char_size))
# Sample cells
colors, luminances = _cell_sample(frame, cell_size)
rows, cols_actual = luminances.shape
# Parse background color
bg_color = _parse_color(background)
# Create output image
out_h = rows * cell_size
out_w = cols_actual * cell_size
output = np.full((out_h, out_w, 3), bg_color, dtype=np.uint8)
# Check if we have cell_effect
has_cell_effect = cell_effect is not None
# Process each cell
for r in range(rows):
for c in range(cols_actual):
lum = luminances[r, c]
cell_color = tuple(colors[r, c])
# Build zone context
zone = {
'row': r,
'col': c,
'row-norm': r / max(1, rows - 1),
'col-norm': c / max(1, cols_actual - 1),
'lum': float(lum),
'r': cell_color[0] / 255,
'g': cell_color[1] / 255,
'b': cell_color[2] / 255,
'cell_size': cell_size,
}
# Add HSV
r_f, g_f, b_f = cell_color[0]/255, cell_color[1]/255, cell_color[2]/255
hsv = colorsys.rgb_to_hsv(r_f, g_f, b_f)
zone['hue'] = hsv[0] * 360
zone['sat'] = hsv[1]
# Add energy and rotation_scale
if energy is not None:
zone['energy'] = energy
zone['rotation_scale'] = rotation_scale
# Add extra params
for k, v in extra_params.items():
if isinstance(v, (int, float, str, bool)) or v is None:
zone[k] = v
# Get character
char = _luminance_to_char(lum, alphabet, contrast)
zone['char'] = char
# Determine cell color based on mode
if color_mode == "mono":
render_color = (255, 255, 255)
elif color_mode == "invert":
render_color = tuple(255 - c for c in cell_color)
elif color_mode == "color":
render_color = cell_color
else:
render_color = _parse_color(color_mode)
zone['color'] = render_color
# Render character to cell
cell_img = _render_char_cell(char, cell_size, render_color, bg_color)
# Apply cell_effect if provided
if has_cell_effect and _interp is not None:
cell_img = _apply_cell_effect(cell_img, zone, cell_effect, _interp, _env, extra_params)
# Paste cell to output
y1, y2 = r * cell_size, (r + 1) * cell_size
x1, x2 = c * cell_size, (c + 1) * cell_size
output[y1:y2, x1:x2] = cell_img
# Resize to match input dimensions
if output.shape[:2] != frame.shape[:2]:
output = cv2.resize(output, (w, h), interpolation=cv2.INTER_LINEAR)
return output
def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params):
"""Apply cell_effect lambda to a cell image.
cell_effect is a Lambda object with params and body.
We create a child environment with zone variables and cell,
then evaluate the lambda body.
"""
# Get Environment class from the interpreter's module
Environment = type(env)
# Create child environment with zone variables
cell_env = Environment(env)
# Bind zone variables
for k, v in zone.items():
cell_env.set(k, v)
# Also bind with zone- prefix for consistency
cell_env.set('zone-row', zone.get('row', 0))
cell_env.set('zone-col', zone.get('col', 0))
cell_env.set('zone-row-norm', zone.get('row-norm', 0))
cell_env.set('zone-col-norm', zone.get('col-norm', 0))
cell_env.set('zone-lum', zone.get('lum', 0))
cell_env.set('zone-sat', zone.get('sat', 0))
cell_env.set('zone-hue', zone.get('hue', 0))
cell_env.set('zone-r', zone.get('r', 0))
cell_env.set('zone-g', zone.get('g', 0))
cell_env.set('zone-b', zone.get('b', 0))
# Inject loaded effects as callable functions
if hasattr(interp, 'effects'):
for effect_name in interp.effects:
def make_effect_fn(name):
def effect_fn(frame, *args):
params = {}
if name == 'blur' and len(args) >= 1:
params['radius'] = args[0]
elif name == 'rotate' and len(args) >= 1:
params['angle'] = args[0]
elif name == 'brightness' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'contrast' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'saturation' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'hue_shift' and len(args) >= 1:
params['degrees'] = args[0]
elif name == 'rgb_split' and len(args) >= 2:
params['offset_x'] = args[0]
params['offset_y'] = args[1]
elif name == 'pixelate' and len(args) >= 1:
params['size'] = args[0]
elif name == 'invert':
pass
result, _ = interp.run_effect(name, frame, params, {})
return result
return effect_fn
cell_env.set(effect_name, make_effect_fn(effect_name))
# Bind cell image and zone dict
cell_env.set('cell', cell_img)
cell_env.set('zone', zone)
# Evaluate the cell_effect lambda
# Lambda has params and body - we need to bind the params then evaluate
if hasattr(cell_effect, 'params') and hasattr(cell_effect, 'body'):
# Bind lambda parameters: (lambda [cell zone] body)
if len(cell_effect.params) >= 1:
cell_env.set(cell_effect.params[0], cell_img)
if len(cell_effect.params) >= 2:
cell_env.set(cell_effect.params[1], zone)
result = interp.eval(cell_effect.body, cell_env)
elif isinstance(cell_effect, list):
# Raw S-expression lambda like (lambda [cell zone] body) or (fn [cell zone] body)
# Check if it's a lambda expression
head = cell_effect[0] if cell_effect else None
head_name = head.name if head and hasattr(head, 'name') else str(head) if head else None
is_lambda = head_name in ('lambda', 'fn')
if is_lambda:
# (lambda [params...] body)
params = cell_effect[1] if len(cell_effect) > 1 else []
body = cell_effect[2] if len(cell_effect) > 2 else None
# Bind lambda parameters
if isinstance(params, list) and len(params) >= 1:
param_name = params[0].name if hasattr(params[0], 'name') else str(params[0])
cell_env.set(param_name, cell_img)
if isinstance(params, list) and len(params) >= 2:
param_name = params[1].name if hasattr(params[1], 'name') else str(params[1])
cell_env.set(param_name, zone)
result = interp.eval(body, cell_env) if body else cell_img
else:
# Some other expression - just evaluate it
result = interp.eval(cell_effect, cell_env)
elif callable(cell_effect):
# It's a callable
result = cell_effect(cell_img, zone)
else:
raise ValueError(f"cell_effect must be a Lambda, list, or callable, got {type(cell_effect)}")
if isinstance(result, np.ndarray) and result.shape == cell_img.shape:
return result
elif isinstance(result, np.ndarray):
# Shape mismatch - resize to fit
result = cv2.resize(result, (cell_img.shape[1], cell_img.shape[0]))
return result
raise ValueError(f"cell_effect must return an image array, got {type(result)}")
def _get_legacy_ascii_primitives():
"""Import ASCII primitives from legacy primitives module.
These are loaded lazily to avoid import issues during module loading.
By the time a primitive library is loaded, sexp_effects.primitives
is already in sys.modules (imported by sexp_effects.__init__).
"""
from sexp_effects.primitives import (
prim_cell_sample,
prim_luminance_to_chars,
prim_render_char_grid,
prim_render_char_grid_fx,
prim_alphabet_char,
prim_alphabet_length,
prim_map_char_grid,
prim_map_colors,
prim_make_char_grid,
prim_set_char,
prim_get_char,
prim_char_grid_dimensions,
cell_sample_extended,
)
return {
'cell-sample': prim_cell_sample,
'cell-sample-extended': cell_sample_extended,
'luminance-to-chars': prim_luminance_to_chars,
'render-char-grid': prim_render_char_grid,
'render-char-grid-fx': prim_render_char_grid_fx,
'alphabet-char': prim_alphabet_char,
'alphabet-length': prim_alphabet_length,
'map-char-grid': prim_map_char_grid,
'map-colors': prim_map_colors,
'make-char-grid': prim_make_char_grid,
'set-char': prim_set_char,
'get-char': prim_get_char,
'char-grid-dimensions': prim_char_grid_dimensions,
}
PRIMITIVES = {
'ascii-fx-zone': prim_ascii_fx_zone,
**_get_legacy_ascii_primitives(),
}

View File

@@ -0,0 +1,116 @@
"""
Blending Primitives Library
Image blending and compositing operations.
"""
import numpy as np
def prim_blend_images(a, b, alpha):
"""Blend two images: a * (1-alpha) + b * alpha."""
alpha = max(0.0, min(1.0, alpha))
return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8)
def prim_blend_mode(a, b, mode):
"""Blend using Photoshop-style blend modes."""
a = a.astype(float) / 255
b = b.astype(float) / 255
if mode == "multiply":
result = a * b
elif mode == "screen":
result = 1 - (1 - a) * (1 - b)
elif mode == "overlay":
mask = a < 0.5
result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b))
elif mode == "soft-light":
mask = b < 0.5
result = np.where(mask,
a - (1 - 2 * b) * a * (1 - a),
a + (2 * b - 1) * (np.sqrt(a) - a))
elif mode == "hard-light":
mask = b < 0.5
result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b))
elif mode == "color-dodge":
result = np.clip(a / (1 - b + 0.001), 0, 1)
elif mode == "color-burn":
result = 1 - np.clip((1 - a) / (b + 0.001), 0, 1)
elif mode == "difference":
result = np.abs(a - b)
elif mode == "exclusion":
result = a + b - 2 * a * b
elif mode == "add":
result = np.clip(a + b, 0, 1)
elif mode == "subtract":
result = np.clip(a - b, 0, 1)
elif mode == "darken":
result = np.minimum(a, b)
elif mode == "lighten":
result = np.maximum(a, b)
else:
# Default to normal (just return b)
result = b
return (result * 255).astype(np.uint8)
def prim_mask(img, mask_img):
"""Apply grayscale mask to image (white=opaque, black=transparent)."""
if len(mask_img.shape) == 3:
mask = mask_img[:, :, 0].astype(float) / 255
else:
mask = mask_img.astype(float) / 255
mask = mask[:, :, np.newaxis]
return (img.astype(float) * mask).astype(np.uint8)
def prim_alpha_composite(base, overlay, alpha_channel):
"""Composite overlay onto base using alpha channel."""
if len(alpha_channel.shape) == 3:
alpha = alpha_channel[:, :, 0].astype(float) / 255
else:
alpha = alpha_channel.astype(float) / 255
alpha = alpha[:, :, np.newaxis]
result = base.astype(float) * (1 - alpha) + overlay.astype(float) * alpha
return result.astype(np.uint8)
def prim_overlay(base, overlay, x, y, alpha=1.0):
"""Overlay image at position (x, y) with optional alpha."""
result = base.copy()
x, y = int(x), int(y)
oh, ow = overlay.shape[:2]
bh, bw = base.shape[:2]
# Clip to bounds
sx1 = max(0, -x)
sy1 = max(0, -y)
dx1 = max(0, x)
dy1 = max(0, y)
sx2 = min(ow, bw - x)
sy2 = min(oh, bh - y)
if sx2 > sx1 and sy2 > sy1:
src = overlay[sy1:sy2, sx1:sx2]
dst = result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)]
blended = (dst.astype(float) * (1 - alpha) + src.astype(float) * alpha)
result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = blended.astype(np.uint8)
return result
PRIMITIVES = {
# Basic blending
'blend-images': prim_blend_images,
'blend-mode': prim_blend_mode,
# Masking
'mask': prim_mask,
'alpha-composite': prim_alpha_composite,
# Overlay
'overlay': prim_overlay,
}

View File

@@ -0,0 +1,137 @@
"""
Color Primitives Library
Color manipulation: RGB, HSV, blending, luminance.
"""
import numpy as np
import colorsys
def prim_rgb(r, g, b):
"""Create RGB color as [r, g, b] (0-255)."""
return [int(max(0, min(255, r))),
int(max(0, min(255, g))),
int(max(0, min(255, b)))]
def prim_red(c):
return c[0]
def prim_green(c):
return c[1]
def prim_blue(c):
return c[2]
def prim_luminance(c):
"""Perceived luminance (0-1) using standard weights."""
return (0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]) / 255
def prim_rgb_to_hsv(c):
"""Convert RGB [0-255] to HSV [h:0-360, s:0-1, v:0-1]."""
r, g, b = c[0] / 255, c[1] / 255, c[2] / 255
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return [h * 360, s, v]
def prim_hsv_to_rgb(hsv):
"""Convert HSV [h:0-360, s:0-1, v:0-1] to RGB [0-255]."""
h, s, v = hsv[0] / 360, hsv[1], hsv[2]
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return [int(r * 255), int(g * 255), int(b * 255)]
def prim_rgb_to_hsl(c):
"""Convert RGB [0-255] to HSL [h:0-360, s:0-1, l:0-1]."""
r, g, b = c[0] / 255, c[1] / 255, c[2] / 255
h, l, s = colorsys.rgb_to_hls(r, g, b)
return [h * 360, s, l]
def prim_hsl_to_rgb(hsl):
"""Convert HSL [h:0-360, s:0-1, l:0-1] to RGB [0-255]."""
h, s, l = hsl[0] / 360, hsl[1], hsl[2]
r, g, b = colorsys.hls_to_rgb(h, l, s)
return [int(r * 255), int(g * 255), int(b * 255)]
def prim_blend_color(c1, c2, alpha):
"""Blend two colors: c1 * (1-alpha) + c2 * alpha."""
return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)]
def prim_average_color(img):
"""Get average color of an image."""
mean = np.mean(img, axis=(0, 1))
return [int(mean[0]), int(mean[1]), int(mean[2])]
def prim_dominant_color(img, k=1):
"""Get dominant color using k-means (simplified: just average for now)."""
return prim_average_color(img)
def prim_invert_color(c):
"""Invert a color."""
return [255 - c[0], 255 - c[1], 255 - c[2]]
def prim_grayscale_color(c):
"""Convert color to grayscale."""
gray = int(0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2])
return [gray, gray, gray]
def prim_saturate(c, amount):
"""Adjust saturation of color. amount=0 is grayscale, 1 is unchanged, >1 is more saturated."""
hsv = prim_rgb_to_hsv(c)
hsv[1] = max(0, min(1, hsv[1] * amount))
return prim_hsv_to_rgb(hsv)
def prim_brighten(c, amount):
"""Adjust brightness. amount=0 is black, 1 is unchanged, >1 is brighter."""
return [int(max(0, min(255, c[i] * amount))) for i in range(3)]
def prim_shift_hue(c, degrees):
"""Shift hue by degrees."""
hsv = prim_rgb_to_hsv(c)
hsv[0] = (hsv[0] + degrees) % 360
return prim_hsv_to_rgb(hsv)
PRIMITIVES = {
# Construction
'rgb': prim_rgb,
# Component access
'red': prim_red,
'green': prim_green,
'blue': prim_blue,
'luminance': prim_luminance,
# Color space conversion
'rgb->hsv': prim_rgb_to_hsv,
'hsv->rgb': prim_hsv_to_rgb,
'rgb->hsl': prim_rgb_to_hsl,
'hsl->rgb': prim_hsl_to_rgb,
# Blending
'blend-color': prim_blend_color,
# Analysis
'average-color': prim_average_color,
'dominant-color': prim_dominant_color,
# Manipulation
'invert-color': prim_invert_color,
'grayscale-color': prim_grayscale_color,
'saturate': prim_saturate,
'brighten': prim_brighten,
'shift-hue': prim_shift_hue,
}

View File

@@ -0,0 +1,90 @@
"""
Color Operations Primitives Library
Vectorized color adjustments: brightness, contrast, saturation, invert, HSV.
These operate on entire images for fast processing.
"""
import numpy as np
import cv2
def prim_adjust(img, brightness=0, contrast=1):
"""Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+."""
result = (img.astype(np.float32) - 128) * contrast + 128 + brightness
return np.clip(result, 0, 255).astype(np.uint8)
def prim_mix_gray(img, amount):
"""Mix image with its grayscale version. 0=original, 1=grayscale."""
gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]
gray_rgb = np.stack([gray, gray, gray], axis=-1)
result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount
return np.clip(result, 0, 255).astype(np.uint8)
def prim_invert_img(img):
"""Invert all pixel values."""
return (255 - img).astype(np.uint8)
def prim_shift_hsv(img, h=0, s=1, v=1):
"""Shift HSV: h=degrees offset, s/v=multipliers."""
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32)
hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255)
hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255)
return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)
def prim_add_noise(img, amount):
"""Add gaussian noise to image."""
noise = np.random.normal(0, amount, img.shape)
result = img.astype(np.float32) + noise
return np.clip(result, 0, 255).astype(np.uint8)
def prim_quantize(img, levels):
"""Reduce to N color levels per channel."""
levels = max(2, int(levels))
factor = 256 / levels
result = (img // factor) * factor + factor // 2
return np.clip(result, 0, 255).astype(np.uint8)
def prim_sepia(img, intensity=1.0):
"""Apply sepia tone effect."""
sepia_matrix = np.array([
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
])
sepia = np.dot(img, sepia_matrix.T)
result = img.astype(np.float32) * (1 - intensity) + sepia * intensity
return np.clip(result, 0, 255).astype(np.uint8)
def prim_grayscale(img):
"""Convert to grayscale (still RGB output)."""
gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]
return np.stack([gray, gray, gray], axis=-1).astype(np.uint8)
PRIMITIVES = {
# Brightness/Contrast
'adjust': prim_adjust,
# Saturation
'mix-gray': prim_mix_gray,
'grayscale': prim_grayscale,
# HSV manipulation
'shift-hsv': prim_shift_hsv,
# Inversion
'invert-img': prim_invert_img,
# Effects
'add-noise': prim_add_noise,
'quantize': prim_quantize,
'sepia': prim_sepia,
}

View File

@@ -0,0 +1,271 @@
"""
Core Primitives - Always available, minimal essential set.
These are the primitives that form the foundation of the language.
They cannot be overridden by libraries.
"""
# Arithmetic
def prim_add(*args):
if len(args) == 0:
return 0
result = args[0]
for arg in args[1:]:
result = result + arg
return result
def prim_sub(a, b=None):
if b is None:
return -a
return a - b
def prim_mul(*args):
if len(args) == 0:
return 1
result = args[0]
for arg in args[1:]:
result = result * arg
return result
def prim_div(a, b):
return a / b
def prim_mod(a, b):
return a % b
def prim_abs(x):
return abs(x)
def prim_min(*args):
return min(args)
def prim_max(*args):
return max(args)
def prim_round(x):
return round(x)
def prim_floor(x):
import math
return math.floor(x)
def prim_ceil(x):
import math
return math.ceil(x)
# Comparison
def prim_lt(a, b):
return a < b
def prim_gt(a, b):
return a > b
def prim_le(a, b):
return a <= b
def prim_ge(a, b):
return a >= b
def prim_eq(a, b):
if isinstance(a, float) or isinstance(b, float):
return abs(a - b) < 1e-9
return a == b
def prim_ne(a, b):
return not prim_eq(a, b)
# Logic
def prim_not(x):
return not x
def prim_and(*args):
for a in args:
if not a:
return False
return True
def prim_or(*args):
for a in args:
if a:
return True
return False
# Basic data access
def prim_get(obj, key, default=None):
"""Get value from dict or list."""
if isinstance(obj, dict):
return obj.get(key, default)
elif isinstance(obj, (list, tuple)):
try:
return obj[int(key)]
except (IndexError, ValueError):
return default
return default
def prim_nth(seq, i):
i = int(i)
if 0 <= i < len(seq):
return seq[i]
return None
def prim_first(seq):
return seq[0] if seq else None
def prim_length(seq):
return len(seq)
def prim_list(*args):
return list(args)
# Type checking
def prim_is_number(x):
return isinstance(x, (int, float))
def prim_is_string(x):
return isinstance(x, str)
def prim_is_list(x):
return isinstance(x, (list, tuple))
def prim_is_dict(x):
return isinstance(x, dict)
def prim_is_nil(x):
return x is None
# Higher-order / iteration
def prim_reduce(seq, init, fn):
"""(reduce seq init fn) — fold left: fn(fn(fn(init, s0), s1), s2) ..."""
acc = init
for item in seq:
acc = fn(acc, item)
return acc
def prim_map(seq, fn):
"""(map seq fn) — apply fn to each element, return new list."""
return [fn(item) for item in seq]
def prim_range(*args):
"""(range end), (range start end), or (range start end step) — integer range."""
if len(args) == 1:
return list(range(int(args[0])))
elif len(args) == 2:
return list(range(int(args[0]), int(args[1])))
elif len(args) >= 3:
return list(range(int(args[0]), int(args[1]), int(args[2])))
return []
# Random
import random
_rng = random.Random()
def prim_rand():
"""Return random float in [0, 1)."""
return _rng.random()
def prim_rand_int(lo, hi):
"""Return random integer in [lo, hi]."""
return _rng.randint(int(lo), int(hi))
def prim_rand_range(lo, hi):
"""Return random float in [lo, hi)."""
return lo + _rng.random() * (hi - lo)
def prim_map_range(val, from_lo, from_hi, to_lo, to_hi):
"""Map value from one range to another."""
if from_hi == from_lo:
return to_lo
t = (val - from_lo) / (from_hi - from_lo)
return to_lo + t * (to_hi - to_lo)
# Core primitives dict
PRIMITIVES = {
# Arithmetic
'+': prim_add,
'-': prim_sub,
'*': prim_mul,
'/': prim_div,
'mod': prim_mod,
'abs': prim_abs,
'min': prim_min,
'max': prim_max,
'round': prim_round,
'floor': prim_floor,
'ceil': prim_ceil,
# Comparison
'<': prim_lt,
'>': prim_gt,
'<=': prim_le,
'>=': prim_ge,
'=': prim_eq,
'!=': prim_ne,
# Logic
'not': prim_not,
'and': prim_and,
'or': prim_or,
# Data access
'get': prim_get,
'nth': prim_nth,
'first': prim_first,
'length': prim_length,
'len': prim_length,
'list': prim_list,
# Type predicates
'number?': prim_is_number,
'string?': prim_is_string,
'list?': prim_is_list,
'dict?': prim_is_dict,
'nil?': prim_is_nil,
'is-nil': prim_is_nil,
# Higher-order / iteration
'reduce': prim_reduce,
'fold': prim_reduce,
'map': prim_map,
'range': prim_range,
# Random
'rand': prim_rand,
'rand-int': prim_rand_int,
'rand-range': prim_rand_range,
'map-range': prim_map_range,
}

View File

@@ -0,0 +1,136 @@
"""
Drawing Primitives Library
Draw shapes, text, and characters on images.
"""
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
# Default font (will be loaded lazily)
_default_font = None
def _get_default_font(size=16):
"""Get default font, creating if needed."""
global _default_font
if _default_font is None or _default_font.size != size:
try:
_default_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size)
except:
_default_font = ImageFont.load_default()
return _default_font
def prim_draw_char(img, char, x, y, font_size=16, color=None):
"""Draw a single character at (x, y)."""
if color is None:
color = [255, 255, 255]
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
font = _get_default_font(font_size)
draw.text((x, y), char, fill=tuple(color), font=font)
return np.array(pil_img)
def prim_draw_text(img, text, x, y, font_size=16, color=None):
"""Draw text string at (x, y)."""
if color is None:
color = [255, 255, 255]
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
font = _get_default_font(font_size)
draw.text((x, y), text, fill=tuple(color), font=font)
return np.array(pil_img)
def prim_fill_rect(img, x, y, w, h, color=None):
"""Fill a rectangle with color."""
if color is None:
color = [255, 255, 255]
result = img.copy()
x, y, w, h = int(x), int(y), int(w), int(h)
result[y:y+h, x:x+w] = color
return result
def prim_draw_rect(img, x, y, w, h, color=None, thickness=1):
"""Draw rectangle outline."""
if color is None:
color = [255, 255, 255]
result = img.copy()
cv2.rectangle(result, (int(x), int(y)), (int(x+w), int(y+h)),
tuple(color), thickness)
return result
def prim_draw_line(img, x1, y1, x2, y2, color=None, thickness=1):
"""Draw a line from (x1, y1) to (x2, y2)."""
if color is None:
color = [255, 255, 255]
result = img.copy()
cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)),
tuple(color), thickness)
return result
def prim_draw_circle(img, cx, cy, radius, color=None, thickness=1, fill=False):
"""Draw a circle."""
if color is None:
color = [255, 255, 255]
result = img.copy()
t = -1 if fill else thickness
cv2.circle(result, (int(cx), int(cy)), int(radius), tuple(color), t)
return result
def prim_draw_ellipse(img, cx, cy, rx, ry, angle=0, color=None, thickness=1, fill=False):
"""Draw an ellipse."""
if color is None:
color = [255, 255, 255]
result = img.copy()
t = -1 if fill else thickness
cv2.ellipse(result, (int(cx), int(cy)), (int(rx), int(ry)),
angle, 0, 360, tuple(color), t)
return result
def prim_draw_polygon(img, points, color=None, thickness=1, fill=False):
"""Draw a polygon from list of [x, y] points."""
if color is None:
color = [255, 255, 255]
result = img.copy()
pts = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
if fill:
cv2.fillPoly(result, [pts], tuple(color))
else:
cv2.polylines(result, [pts], True, tuple(color), thickness)
return result
PRIMITIVES = {
# Text
'draw-char': prim_draw_char,
'draw-text': prim_draw_text,
# Rectangles
'fill-rect': prim_fill_rect,
'draw-rect': prim_draw_rect,
# Lines and shapes
'draw-line': prim_draw_line,
'draw-circle': prim_draw_circle,
'draw-ellipse': prim_draw_ellipse,
'draw-polygon': prim_draw_polygon,
}

View File

@@ -0,0 +1,119 @@
"""
Filters Primitives Library
Image filters: blur, sharpen, edges, convolution.
"""
import numpy as np
import cv2
def prim_blur(img, radius):
"""Gaussian blur with given radius."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.GaussianBlur(img, (ksize, ksize), 0)
def prim_box_blur(img, radius):
"""Box blur with given radius."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.blur(img, (ksize, ksize))
def prim_median_blur(img, radius):
"""Median blur (good for noise removal)."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.medianBlur(img, ksize)
def prim_bilateral(img, d=9, sigma_color=75, sigma_space=75):
"""Bilateral filter (edge-preserving blur)."""
return cv2.bilateralFilter(img, d, sigma_color, sigma_space)
def prim_sharpen(img, amount=1.0):
"""Sharpen image using unsharp mask."""
blurred = cv2.GaussianBlur(img, (0, 0), 3)
return cv2.addWeighted(img, 1.0 + amount, blurred, -amount, 0)
def prim_edges(img, low=50, high=150):
"""Canny edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, low, high)
return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
def prim_sobel(img, ksize=3):
"""Sobel edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize)
mag = np.sqrt(sobelx**2 + sobely**2)
mag = np.clip(mag, 0, 255).astype(np.uint8)
return cv2.cvtColor(mag, cv2.COLOR_GRAY2RGB)
def prim_laplacian(img, ksize=3):
"""Laplacian edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lap = cv2.Laplacian(gray, cv2.CV_64F, ksize=ksize)
lap = np.abs(lap)
lap = np.clip(lap, 0, 255).astype(np.uint8)
return cv2.cvtColor(lap, cv2.COLOR_GRAY2RGB)
def prim_emboss(img):
"""Emboss effect."""
kernel = np.array([[-2, -1, 0],
[-1, 1, 1],
[ 0, 1, 2]])
result = cv2.filter2D(img, -1, kernel)
return np.clip(result + 128, 0, 255).astype(np.uint8)
def prim_dilate(img, size=1):
"""Morphological dilation."""
kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8)
return cv2.dilate(img, kernel)
def prim_erode(img, size=1):
"""Morphological erosion."""
kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8)
return cv2.erode(img, kernel)
def prim_convolve(img, kernel):
"""Apply custom convolution kernel."""
kernel = np.array(kernel, dtype=np.float32)
return cv2.filter2D(img, -1, kernel)
PRIMITIVES = {
# Blur
'blur': prim_blur,
'box-blur': prim_box_blur,
'median-blur': prim_median_blur,
'bilateral': prim_bilateral,
# Sharpen
'sharpen': prim_sharpen,
# Edges
'edges': prim_edges,
'sobel': prim_sobel,
'laplacian': prim_laplacian,
# Effects
'emboss': prim_emboss,
# Morphology
'dilate': prim_dilate,
'erode': prim_erode,
# Custom
'convolve': prim_convolve,
}

View File

@@ -0,0 +1,143 @@
"""
Geometry Primitives Library
Geometric transforms: rotate, scale, flip, translate, remap.
"""
import numpy as np
import cv2
def prim_translate(img, dx, dy):
"""Translate image by (dx, dy) pixels."""
h, w = img.shape[:2]
M = np.float32([[1, 0, dx], [0, 1, dy]])
return cv2.warpAffine(img, M, (w, h))
def prim_rotate(img, angle, cx=None, cy=None):
"""Rotate image by angle degrees around center (cx, cy)."""
h, w = img.shape[:2]
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)
return cv2.warpAffine(img, M, (w, h))
def prim_scale(img, sx, sy, cx=None, cy=None):
"""Scale image by (sx, sy) around center (cx, cy)."""
h, w = img.shape[:2]
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
# Build transform matrix
M = np.float32([
[sx, 0, cx * (1 - sx)],
[0, sy, cy * (1 - sy)]
])
return cv2.warpAffine(img, M, (w, h))
def prim_flip_h(img):
"""Flip image horizontally."""
return cv2.flip(img, 1)
def prim_flip_v(img):
"""Flip image vertically."""
return cv2.flip(img, 0)
def prim_flip(img, direction="horizontal"):
"""Flip image in given direction."""
if direction in ("horizontal", "h"):
return prim_flip_h(img)
elif direction in ("vertical", "v"):
return prim_flip_v(img)
elif direction in ("both", "hv", "vh"):
return cv2.flip(img, -1)
return img
def prim_transpose(img):
"""Transpose image (swap x and y)."""
return np.transpose(img, (1, 0, 2))
def prim_remap(img, map_x, map_y):
"""Remap image using coordinate maps."""
return cv2.remap(img, map_x.astype(np.float32),
map_y.astype(np.float32),
cv2.INTER_LINEAR)
def prim_make_coords(w, h):
"""Create coordinate grids for remapping."""
x = np.arange(w, dtype=np.float32)
y = np.arange(h, dtype=np.float32)
map_x, map_y = np.meshgrid(x, y)
return (map_x, map_y)
def prim_perspective(img, src_pts, dst_pts):
"""Apply perspective transform."""
src = np.float32(src_pts)
dst = np.float32(dst_pts)
M = cv2.getPerspectiveTransform(src, dst)
h, w = img.shape[:2]
return cv2.warpPerspective(img, M, (w, h))
def prim_affine(img, src_pts, dst_pts):
"""Apply affine transform using 3 point pairs."""
src = np.float32(src_pts)
dst = np.float32(dst_pts)
M = cv2.getAffineTransform(src, dst)
h, w = img.shape[:2]
return cv2.warpAffine(img, M, (w, h))
def _get_legacy_geometry_primitives():
"""Import geometry primitives from legacy primitives module."""
from sexp_effects.primitives import (
prim_coords_x,
prim_coords_y,
prim_ripple_displace,
prim_fisheye_displace,
prim_kaleidoscope_displace,
)
return {
'coords-x': prim_coords_x,
'coords-y': prim_coords_y,
'ripple-displace': prim_ripple_displace,
'fisheye-displace': prim_fisheye_displace,
'kaleidoscope-displace': prim_kaleidoscope_displace,
}
PRIMITIVES = {
# Basic transforms
'translate': prim_translate,
'rotate-img': prim_rotate,
'scale-img': prim_scale,
# Flips
'flip-h': prim_flip_h,
'flip-v': prim_flip_v,
'flip': prim_flip,
'transpose': prim_transpose,
# Remapping
'remap': prim_remap,
'make-coords': prim_make_coords,
# Advanced transforms
'perspective': prim_perspective,
'affine': prim_affine,
# Displace / coordinate ops (from legacy primitives)
**_get_legacy_geometry_primitives(),
}

View File

@@ -0,0 +1,144 @@
"""
Image Primitives Library
Basic image operations: dimensions, pixels, resize, crop, paste.
"""
import numpy as np
import cv2
def prim_width(img):
return img.shape[1]
def prim_height(img):
return img.shape[0]
def prim_make_image(w, h, color=None):
"""Create a new image filled with color (default black)."""
if color is None:
color = [0, 0, 0]
img = np.zeros((h, w, 3), dtype=np.uint8)
img[:] = color
return img
def prim_copy(img):
return img.copy()
def prim_pixel(img, x, y):
"""Get pixel color at (x, y) as [r, g, b]."""
h, w = img.shape[:2]
if 0 <= x < w and 0 <= y < h:
return list(img[int(y), int(x)])
return [0, 0, 0]
def prim_set_pixel(img, x, y, color):
"""Set pixel at (x, y) to color, returns modified image."""
result = img.copy()
h, w = result.shape[:2]
if 0 <= x < w and 0 <= y < h:
result[int(y), int(x)] = color
return result
def prim_sample(img, x, y):
"""Bilinear sample at float coordinates, returns [r, g, b] as floats."""
h, w = img.shape[:2]
x = max(0, min(w - 1.001, x))
y = max(0, min(h - 1.001, y))
x0, y0 = int(x), int(y)
x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1)
fx, fy = x - x0, y - y0
c00 = img[y0, x0].astype(float)
c10 = img[y0, x1].astype(float)
c01 = img[y1, x0].astype(float)
c11 = img[y1, x1].astype(float)
top = c00 * (1 - fx) + c10 * fx
bottom = c01 * (1 - fx) + c11 * fx
return list(top * (1 - fy) + bottom * fy)
def prim_channel(img, c):
"""Extract single channel (0=R, 1=G, 2=B)."""
return img[:, :, c]
def prim_merge_channels(r, g, b):
"""Merge three single-channel arrays into RGB image."""
return np.stack([r, g, b], axis=2).astype(np.uint8)
def prim_resize(img, w, h, mode="linear"):
"""Resize image to w x h."""
interp = cv2.INTER_LINEAR
if mode == "nearest":
interp = cv2.INTER_NEAREST
elif mode == "cubic":
interp = cv2.INTER_CUBIC
elif mode == "area":
interp = cv2.INTER_AREA
return cv2.resize(img, (int(w), int(h)), interpolation=interp)
def prim_crop(img, x, y, w, h):
"""Crop rectangle from image."""
x, y, w, h = int(x), int(y), int(w), int(h)
ih, iw = img.shape[:2]
x = max(0, min(x, iw - 1))
y = max(0, min(y, ih - 1))
w = min(w, iw - x)
h = min(h, ih - y)
return img[y:y+h, x:x+w].copy()
def prim_paste(dst, src, x, y):
"""Paste src onto dst at position (x, y)."""
result = dst.copy()
x, y = int(x), int(y)
sh, sw = src.shape[:2]
dh, dw = dst.shape[:2]
# Clip to bounds
sx1 = max(0, -x)
sy1 = max(0, -y)
dx1 = max(0, x)
dy1 = max(0, y)
sx2 = min(sw, dw - x)
sy2 = min(sh, dh - y)
if sx2 > sx1 and sy2 > sy1:
result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = src[sy1:sy2, sx1:sx2]
return result
PRIMITIVES = {
# Dimensions
'width': prim_width,
'height': prim_height,
# Creation
'make-image': prim_make_image,
'copy': prim_copy,
# Pixel access
'pixel': prim_pixel,
'set-pixel': prim_set_pixel,
'sample': prim_sample,
# Channels
'channel': prim_channel,
'merge-channels': prim_merge_channels,
# Geometry
'resize': prim_resize,
'crop': prim_crop,
'paste': prim_paste,
}

View File

@@ -0,0 +1,164 @@
"""
Math Primitives Library
Trigonometry, rounding, clamping, random numbers, etc.
"""
import math
import random as rand_module
def prim_sin(x):
return math.sin(x)
def prim_cos(x):
return math.cos(x)
def prim_tan(x):
return math.tan(x)
def prim_asin(x):
return math.asin(x)
def prim_acos(x):
return math.acos(x)
def prim_atan(x):
return math.atan(x)
def prim_atan2(y, x):
return math.atan2(y, x)
def prim_sqrt(x):
return math.sqrt(x)
def prim_pow(x, y):
return math.pow(x, y)
def prim_exp(x):
return math.exp(x)
def prim_log(x, base=None):
if base is None:
return math.log(x)
return math.log(x, base)
def prim_abs(x):
return abs(x)
def prim_floor(x):
return math.floor(x)
def prim_ceil(x):
return math.ceil(x)
def prim_round(x):
return round(x)
def prim_min(*args):
if len(args) == 1 and hasattr(args[0], '__iter__'):
return min(args[0])
return min(args)
def prim_max(*args):
if len(args) == 1 and hasattr(args[0], '__iter__'):
return max(args[0])
return max(args)
def prim_clamp(x, lo, hi):
return max(lo, min(hi, x))
def prim_lerp(a, b, t):
"""Linear interpolation: a + (b - a) * t"""
return a + (b - a) * t
def prim_smoothstep(edge0, edge1, x):
"""Smooth interpolation between 0 and 1."""
t = prim_clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0)
return t * t * (3 - 2 * t)
def prim_random(lo=0.0, hi=1.0):
return rand_module.uniform(lo, hi)
def prim_randint(lo, hi):
return rand_module.randint(lo, hi)
def prim_gaussian(mean=0.0, std=1.0):
return rand_module.gauss(mean, std)
def prim_sign(x):
if x > 0:
return 1
elif x < 0:
return -1
return 0
def prim_fract(x):
"""Fractional part of x."""
return x - math.floor(x)
PRIMITIVES = {
# Trigonometry
'sin': prim_sin,
'cos': prim_cos,
'tan': prim_tan,
'asin': prim_asin,
'acos': prim_acos,
'atan': prim_atan,
'atan2': prim_atan2,
# Powers and roots
'sqrt': prim_sqrt,
'pow': prim_pow,
'exp': prim_exp,
'log': prim_log,
# Rounding
'abs': prim_abs,
'floor': prim_floor,
'ceil': prim_ceil,
'round': prim_round,
'sign': prim_sign,
'fract': prim_fract,
# Min/max/clamp
'min': prim_min,
'max': prim_max,
'clamp': prim_clamp,
'lerp': prim_lerp,
'smoothstep': prim_smoothstep,
# Random
'random': prim_random,
'randint': prim_randint,
'gaussian': prim_gaussian,
# Constants
'pi': math.pi,
'tau': math.tau,
'e': math.e,
}

View File

@@ -0,0 +1,304 @@
"""
Streaming primitives for video/audio processing.
These primitives handle video source reading and audio analysis,
keeping the interpreter completely generic.
"""
import numpy as np
import subprocess
import json
from pathlib import Path
class VideoSource:
"""Video source with persistent streaming pipe for fast sequential reads."""
def __init__(self, path: str, fps: float = 30):
self.path = Path(path)
self.fps = fps # Output fps for the stream
self._frame_size = None
self._duration = None
self._proc = None # Persistent ffmpeg process
self._stream_time = 0.0 # Current position in stream
self._frame_time = 1.0 / fps # Time per frame at output fps
self._last_read_time = -1
self._cached_frame = None
# Get video info
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
"-show_streams", str(self.path)]
result = subprocess.run(cmd, capture_output=True, text=True)
info = json.loads(result.stdout)
for stream in info.get("streams", []):
if stream.get("codec_type") == "video":
self._frame_size = (stream.get("width", 720), stream.get("height", 720))
# Try direct duration field first
if "duration" in stream:
self._duration = float(stream["duration"])
# Fall back to tags.DURATION (webm format: "00:01:00.124000000")
elif "tags" in stream and "DURATION" in stream["tags"]:
dur_str = stream["tags"]["DURATION"]
parts = dur_str.split(":")
if len(parts) == 3:
h, m, s = parts
self._duration = int(h) * 3600 + int(m) * 60 + float(s)
break
if not self._frame_size:
self._frame_size = (720, 720)
def _start_stream(self, seek_time: float = 0):
"""Start or restart the ffmpeg streaming process."""
if self._proc:
self._proc.kill()
self._proc = None
w, h = self._frame_size
cmd = [
"ffmpeg", "-v", "quiet",
"-ss", f"{seek_time:.3f}",
"-i", str(self.path),
"-f", "rawvideo", "-pix_fmt", "rgb24",
"-s", f"{w}x{h}",
"-r", str(self.fps), # Output at specified fps
"-"
]
self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
self._stream_time = seek_time
def _read_frame_from_stream(self) -> np.ndarray:
"""Read one frame from the stream."""
w, h = self._frame_size
frame_size = w * h * 3
if not self._proc or self._proc.poll() is not None:
return None
data = self._proc.stdout.read(frame_size)
if len(data) < frame_size:
return None
return np.frombuffer(data, dtype=np.uint8).reshape((h, w, 3)).copy()
def read(self) -> np.ndarray:
"""Read frame (uses last cached or t=0)."""
if self._cached_frame is not None:
return self._cached_frame
return self.read_at(0)
def read_at(self, t: float) -> np.ndarray:
"""Read frame at specific time using streaming with smart seeking."""
# Cache check - return same frame for same time
if t == self._last_read_time and self._cached_frame is not None:
return self._cached_frame
w, h = self._frame_size
# Loop time if video is shorter
seek_time = t
if self._duration and self._duration > 0:
seek_time = t % self._duration
# Decide whether to seek or continue streaming
# Seek if: no stream, going backwards (more than 1 frame), or jumping more than 2 seconds ahead
# Allow small backward tolerance to handle floating point and timing jitter
need_seek = (
self._proc is None or
self._proc.poll() is not None or
seek_time < self._stream_time - self._frame_time or # More than 1 frame backward
seek_time > self._stream_time + 2.0
)
if need_seek:
import sys
reason = "no proc" if self._proc is None else "proc dead" if self._proc.poll() is not None else "backward" if seek_time < self._stream_time else "jump"
print(f"SEEK {self.path.name}: t={t:.4f} seek={seek_time:.4f} stream={self._stream_time:.4f} ({reason})", file=sys.stderr)
self._start_stream(seek_time)
# Skip frames to reach target time
while self._stream_time + self._frame_time <= seek_time:
frame = self._read_frame_from_stream()
if frame is None:
# Stream ended, restart from seek point
self._start_stream(seek_time)
break
self._stream_time += self._frame_time
# Read the target frame
frame = self._read_frame_from_stream()
if frame is None:
import sys
print(f"NULL FRAME {self.path.name}: t={t:.2f} seek={seek_time:.2f}", file=sys.stderr)
frame = np.zeros((h, w, 3), dtype=np.uint8)
else:
self._stream_time += self._frame_time
self._last_read_time = t
self._cached_frame = frame
return frame
def skip(self):
"""No-op for seek-based reading."""
pass
@property
def size(self):
return self._frame_size
def close(self):
if self._proc:
self._proc.kill()
self._proc = None
class AudioAnalyzer:
"""Audio analyzer for energy and beat detection."""
def __init__(self, path: str, sample_rate: int = 22050):
self.path = Path(path)
self.sample_rate = sample_rate
# Load audio via ffmpeg
cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path),
"-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"]
result = subprocess.run(cmd, capture_output=True)
self._audio = np.frombuffer(result.stdout, dtype=np.float32)
# Get duration
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
"-show_format", str(self.path)]
info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout)
self.duration = float(info.get("format", {}).get("duration", 60))
# Beat detection state
self._flux_history = []
self._last_beat_time = -1
self._beat_count = 0
self._last_beat_check_time = -1
# Cache beat result for current time (so multiple scans see same result)
self._beat_cache_time = -1
self._beat_cache_result = False
def get_energy(self, t: float) -> float:
"""Get energy level at time t (0-1)."""
idx = int(t * self.sample_rate)
start = max(0, idx - 512)
end = min(len(self._audio), idx + 512)
if start >= end:
return 0.0
return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0)
def get_beat(self, t: float) -> bool:
"""Check if there's a beat at time t."""
# Return cached result if same time (multiple scans query same frame)
if t == self._beat_cache_time:
return self._beat_cache_result
idx = int(t * self.sample_rate)
size = 2048
start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2)
if end - start < size/2:
self._beat_cache_time = t
self._beat_cache_result = False
return False
curr = self._audio[start:end]
pstart, pend = max(0, start - 512), max(0, end - 512)
if pend <= pstart:
self._beat_cache_time = t
self._beat_cache_result = False
return False
prev = self._audio[pstart:pend]
curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr))))
prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev))))
n = min(len(curr_spec), len(prev_spec))
flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1)
self._flux_history.append((t, flux))
if len(self._flux_history) > 50:
self._flux_history = self._flux_history[-50:]
if len(self._flux_history) < 5:
self._beat_cache_time = t
self._beat_cache_result = False
return False
recent = [f for _, f in self._flux_history[-20:]]
threshold = np.mean(recent) + 1.5 * np.std(recent)
is_beat = flux > threshold and (t - self._last_beat_time) > 0.1
if is_beat:
self._last_beat_time = t
if t > self._last_beat_check_time:
self._beat_count += 1
self._last_beat_check_time = t
# Cache result for this time
self._beat_cache_time = t
self._beat_cache_result = is_beat
return is_beat
def get_beat_count(self, t: float) -> int:
"""Get cumulative beat count up to time t."""
# Ensure beat detection has run up to this time
self.get_beat(t)
return self._beat_count
# === Primitives ===
def prim_make_video_source(path: str, fps: float = 30):
"""Create a video source from a file path."""
return VideoSource(path, fps)
def prim_source_read(source: VideoSource, t: float = None):
"""Read a frame from a video source."""
import sys
if t is not None:
frame = source.read_at(t)
# Debug: show source and time
if int(t * 10) % 10 == 0: # Every second
print(f"READ {source.path.name}: t={t:.2f} stream={source._stream_time:.2f}", file=sys.stderr)
return frame
return source.read()
def prim_source_skip(source: VideoSource):
"""Skip a frame (keep pipe in sync)."""
source.skip()
def prim_source_size(source: VideoSource):
"""Get (width, height) of source."""
return source.size
def prim_make_audio_analyzer(path: str):
"""Create an audio analyzer from a file path."""
return AudioAnalyzer(path)
def prim_audio_energy(analyzer: AudioAnalyzer, t: float) -> float:
"""Get energy level (0-1) at time t."""
return analyzer.get_energy(t)
def prim_audio_beat(analyzer: AudioAnalyzer, t: float) -> bool:
"""Check if there's a beat at time t."""
return analyzer.get_beat(t)
def prim_audio_beat_count(analyzer: AudioAnalyzer, t: float) -> int:
"""Get cumulative beat count up to time t."""
return analyzer.get_beat_count(t)
def prim_audio_duration(analyzer: AudioAnalyzer) -> float:
"""Get audio duration in seconds."""
return analyzer.duration

3043
sexp_effects/primitives.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,236 @@
#!/usr/bin/env python3
"""
Test the S-expression effect interpreter.
"""
import numpy as np
import sys
from pathlib import Path
# Add parent to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from sexp_effects import (
get_interpreter,
load_effects_dir,
run_effect,
list_effects,
parse,
)
def test_parser():
"""Test S-expression parser."""
print("Testing parser...")
# Simple expressions
assert parse("42") == 42
assert parse("3.14") == 3.14
assert parse('"hello"') == "hello"
assert parse("true") == True
# Lists
assert parse("(+ 1 2)")[0].name == "+"
assert parse("(+ 1 2)")[1] == 1
# Nested
expr = parse("(define x (+ 1 2))")
assert expr[0].name == "define"
print(" Parser OK")
def test_interpreter_basics():
"""Test basic interpreter operations."""
print("Testing interpreter basics...")
interp = get_interpreter()
# Math
assert interp.eval(parse("(+ 1 2)")) == 3
assert interp.eval(parse("(* 3 4)")) == 12
assert interp.eval(parse("(- 10 3)")) == 7
# Comparison
assert interp.eval(parse("(< 1 2)")) == True
assert interp.eval(parse("(> 1 2)")) == False
# Let binding
assert interp.eval(parse("(let ((x 5)) x)")) == 5
assert interp.eval(parse("(let ((x 5) (y 3)) (+ x y))")) == 8
# Lambda
result = interp.eval(parse("((lambda (x) (* x 2)) 5)"))
assert result == 10
# If
assert interp.eval(parse("(if true 1 2)")) == 1
assert interp.eval(parse("(if false 1 2)")) == 2
print(" Interpreter basics OK")
def test_primitives():
"""Test image primitives."""
print("Testing primitives...")
interp = get_interpreter()
# Create test image
img = np.zeros((100, 100, 3), dtype=np.uint8)
img[50, 50] = [255, 128, 64]
interp.global_env.set('test_img', img)
# Width/height
assert interp.eval(parse("(width test_img)")) == 100
assert interp.eval(parse("(height test_img)")) == 100
# Pixel
pixel = interp.eval(parse("(pixel test_img 50 50)"))
assert pixel == [255, 128, 64]
# RGB
color = interp.eval(parse("(rgb 100 150 200)"))
assert color == [100, 150, 200]
# Luminance
lum = interp.eval(parse("(luminance (rgb 100 100 100))"))
assert abs(lum - 100) < 1
print(" Primitives OK")
def test_effect_loading():
"""Test loading effects from .sexp files."""
print("Testing effect loading...")
# Load all effects
effects_dir = Path(__file__).parent / "effects"
load_effects_dir(str(effects_dir))
effects = list_effects()
print(f" Loaded {len(effects)} effects: {', '.join(sorted(effects))}")
assert len(effects) > 0
print(" Effect loading OK")
def test_effect_execution():
"""Test running effects on images."""
print("Testing effect execution...")
# Create test image
img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
# Load effects
effects_dir = Path(__file__).parent / "effects"
load_effects_dir(str(effects_dir))
# Test each effect
effects = list_effects()
passed = 0
failed = []
for name in sorted(effects):
try:
result, state = run_effect(name, img.copy(), {'_time': 0.5}, {})
assert isinstance(result, np.ndarray)
assert result.shape == img.shape
passed += 1
print(f" {name}: OK")
except Exception as e:
failed.append((name, str(e)))
print(f" {name}: FAILED - {e}")
print(f" Passed: {passed}/{len(effects)}")
if failed:
print(f" Failed: {[f[0] for f in failed]}")
return passed, failed
def test_ascii_fx_zone():
"""Test ascii_fx_zone effect with zone expressions."""
print("Testing ascii_fx_zone...")
interp = get_interpreter()
# Load the effect
effects_dir = Path(__file__).parent / "effects"
load_effects_dir(str(effects_dir))
# Create gradient test frame
frame = np.zeros((120, 160, 3), dtype=np.uint8)
for x in range(160):
frame[:, x] = int(x / 160 * 255)
frame = np.stack([frame[:,:,0]]*3, axis=2)
# Test 1: Basic without expressions
result, _ = run_effect('ascii_fx_zone', frame, {'cols': 20}, {})
assert result.shape == frame.shape
print(" Basic run: OK")
# Test 2: With zone-lum expression
expr = parse('(* zone-lum 180)')
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_hue': expr
}, {})
assert result.shape == frame.shape
print(" Zone-lum expression: OK")
# Test 3: With multiple expressions
scale_expr = parse('(+ 0.5 (* zone-lum 0.5))')
rot_expr = parse('(* zone-row-norm 30)')
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_scale': scale_expr,
'char_rotation': rot_expr
}, {})
assert result.shape == frame.shape
print(" Multiple expressions: OK")
# Test 4: With numeric literals
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_hue': 90,
'char_scale': 1.2
}, {})
assert result.shape == frame.shape
print(" Numeric literals: OK")
# Test 5: Zone position expressions
col_expr = parse('(* zone-col-norm 360)')
result, _ = run_effect('ascii_fx_zone', frame, {
'cols': 20,
'char_hue': col_expr
}, {})
assert result.shape == frame.shape
print(" Zone position expression: OK")
print(" ascii_fx_zone OK")
def main():
print("=" * 60)
print("S-Expression Effect Interpreter Tests")
print("=" * 60)
test_parser()
test_interpreter_basics()
test_primitives()
test_effect_loading()
test_ascii_fx_zone()
passed, failed = test_effect_execution()
print("=" * 60)
if not failed:
print("All tests passed!")
else:
print(f"Tests completed with {len(failed)} failures")
print("=" * 60)
if __name__ == "__main__":
main()

44
streaming/__init__.py Normal file
View File

@@ -0,0 +1,44 @@
"""
Streaming video compositor for real-time effect processing.
This module provides a frame-by-frame streaming architecture that:
- Reads from multiple video sources with automatic looping
- Applies effects inline (no intermediate files)
- Composites layers with time-varying weights
- Outputs to display, file, or stream
Usage:
from streaming import StreamingCompositor, VideoSource, AudioAnalyzer
compositor = StreamingCompositor(
sources=["video1.mp4", "video2.mp4"],
effects_per_source=[...],
compositor_config={...},
)
# With live audio
audio = AudioAnalyzer(device=0)
compositor.run(output="output.mp4", duration=60, audio=audio)
# With preview window
compositor.run(output="preview", duration=60)
Backends:
- numpy: Works everywhere, ~3-5 fps (default)
- glsl: Requires GPU, 30+ fps real-time (future)
"""
from .sources import VideoSource, ImageSource
from .compositor import StreamingCompositor
from .backends import NumpyBackend, get_backend
from .output import DisplayOutput, FileOutput
__all__ = [
"StreamingCompositor",
"VideoSource",
"ImageSource",
"NumpyBackend",
"get_backend",
"DisplayOutput",
"FileOutput",
]

486
streaming/audio.py Normal file
View File

@@ -0,0 +1,486 @@
"""
Live audio analysis for reactive effects.
Provides real-time audio features:
- Energy (RMS amplitude)
- Beat detection
- Frequency bands (bass, mid, high)
"""
import numpy as np
from typing import Optional
import threading
import time
class AudioAnalyzer:
"""
Real-time audio analyzer using sounddevice.
Captures audio from microphone/line-in and computes
features in real-time for effect parameter bindings.
Example:
analyzer = AudioAnalyzer(device=0)
analyzer.start()
# In compositor loop:
energy = analyzer.get_energy()
beat = analyzer.get_beat()
analyzer.stop()
"""
def __init__(
self,
device: int = None,
sample_rate: int = 44100,
block_size: int = 1024,
buffer_seconds: float = 0.5,
):
"""
Initialize audio analyzer.
Args:
device: Audio input device index (None = default)
sample_rate: Audio sample rate
block_size: Samples per block
buffer_seconds: Ring buffer duration
"""
self.sample_rate = sample_rate
self.block_size = block_size
self.device = device
# Ring buffer for recent audio
buffer_size = int(sample_rate * buffer_seconds)
self._buffer = np.zeros(buffer_size, dtype=np.float32)
self._buffer_pos = 0
self._lock = threading.Lock()
# Beat detection state
self._last_energy = 0
self._energy_history = []
self._last_beat_time = 0
self._beat_threshold = 1.5 # Energy ratio for beat detection
self._min_beat_interval = 0.1 # Min seconds between beats
# Stream state
self._stream = None
self._running = False
def _audio_callback(self, indata, frames, time_info, status):
"""Called by sounddevice for each audio block."""
with self._lock:
# Add to ring buffer
data = indata[:, 0] if len(indata.shape) > 1 else indata
n = len(data)
if self._buffer_pos + n <= len(self._buffer):
self._buffer[self._buffer_pos:self._buffer_pos + n] = data
else:
# Wrap around
first = len(self._buffer) - self._buffer_pos
self._buffer[self._buffer_pos:] = data[:first]
self._buffer[:n - first] = data[first:]
self._buffer_pos = (self._buffer_pos + n) % len(self._buffer)
def start(self):
"""Start audio capture."""
try:
import sounddevice as sd
except ImportError:
print("Warning: sounddevice not installed. Audio analysis disabled.")
print("Install with: pip install sounddevice")
return
self._stream = sd.InputStream(
device=self.device,
channels=1,
samplerate=self.sample_rate,
blocksize=self.block_size,
callback=self._audio_callback,
)
self._stream.start()
self._running = True
def stop(self):
"""Stop audio capture."""
if self._stream:
self._stream.stop()
self._stream.close()
self._stream = None
self._running = False
def get_energy(self) -> float:
"""
Get current audio energy (RMS amplitude).
Returns:
Energy value normalized to 0-1 range (approximately)
"""
with self._lock:
# Use recent samples
recent = 2048
if self._buffer_pos >= recent:
data = self._buffer[self._buffer_pos - recent:self._buffer_pos]
else:
data = np.concatenate([
self._buffer[-(recent - self._buffer_pos):],
self._buffer[:self._buffer_pos]
])
# RMS energy
rms = np.sqrt(np.mean(data ** 2))
# Normalize (typical mic input is quite low)
normalized = min(1.0, rms * 10)
return normalized
def get_beat(self) -> bool:
"""
Detect if current moment is a beat.
Simple onset detection based on energy spikes.
Returns:
True if beat detected, False otherwise
"""
current_energy = self.get_energy()
now = time.time()
# Update energy history
self._energy_history.append(current_energy)
if len(self._energy_history) > 20:
self._energy_history.pop(0)
# Need enough history
if len(self._energy_history) < 5:
self._last_energy = current_energy
return False
# Average recent energy
avg_energy = np.mean(self._energy_history[:-1])
# Beat if current energy is significantly above average
is_beat = (
current_energy > avg_energy * self._beat_threshold and
now - self._last_beat_time > self._min_beat_interval and
current_energy > self._last_energy # Rising edge
)
if is_beat:
self._last_beat_time = now
self._last_energy = current_energy
return is_beat
def get_spectrum(self, bands: int = 3) -> np.ndarray:
"""
Get frequency spectrum divided into bands.
Args:
bands: Number of frequency bands (default 3: bass, mid, high)
Returns:
Array of band energies, normalized to 0-1
"""
with self._lock:
# Use recent samples for FFT
n = 2048
if self._buffer_pos >= n:
data = self._buffer[self._buffer_pos - n:self._buffer_pos]
else:
data = np.concatenate([
self._buffer[-(n - self._buffer_pos):],
self._buffer[:self._buffer_pos]
])
# FFT
fft = np.abs(np.fft.rfft(data * np.hanning(len(data))))
# Divide into bands
band_size = len(fft) // bands
result = np.zeros(bands)
for i in range(bands):
start = i * band_size
end = start + band_size
result[i] = np.mean(fft[start:end])
# Normalize
max_val = np.max(result)
if max_val > 0:
result = result / max_val
return result
@property
def is_running(self) -> bool:
return self._running
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
class FileAudioAnalyzer:
"""
Audio analyzer that reads from a file (for testing/development).
Pre-computes analysis and plays back in sync with video.
"""
def __init__(self, path: str, analysis_data: dict = None):
"""
Initialize from audio file.
Args:
path: Path to audio file
analysis_data: Pre-computed analysis (times, values, etc.)
"""
self.path = path
self.analysis_data = analysis_data or {}
self._current_time = 0
def set_time(self, t: float):
"""Set current playback time."""
self._current_time = t
def get_energy(self) -> float:
"""Get energy at current time from pre-computed data."""
track = self.analysis_data.get("energy", {})
return self._interpolate(track, self._current_time)
def get_beat(self) -> bool:
"""Check if current time is near a beat."""
track = self.analysis_data.get("beats", {})
times = track.get("times", [])
# Check if we're within 50ms of a beat
for beat_time in times:
if abs(beat_time - self._current_time) < 0.05:
return True
return False
def _interpolate(self, track: dict, t: float) -> float:
"""Interpolate value at time t."""
times = track.get("times", [])
values = track.get("values", [])
if not times or not values:
return 0.0
if t <= times[0]:
return values[0]
if t >= times[-1]:
return values[-1]
# Find bracket and interpolate
for i in range(len(times) - 1):
if times[i] <= t <= times[i + 1]:
alpha = (t - times[i]) / (times[i + 1] - times[i])
return values[i] * (1 - alpha) + values[i + 1] * alpha
return values[-1]
@property
def is_running(self) -> bool:
return True
class StreamingAudioAnalyzer:
"""
Real-time audio analyzer that streams from a file.
Reads audio in sync with video time and computes features on-the-fly.
No pre-computation needed - analysis happens as frames are processed.
"""
def __init__(self, path: str, sample_rate: int = 22050, hop_length: int = 512):
"""
Initialize streaming audio analyzer.
Args:
path: Path to audio file
sample_rate: Sample rate for analysis
hop_length: Hop length for feature extraction
"""
import subprocess
import json
self.path = path
self.sample_rate = sample_rate
self.hop_length = hop_length
self._current_time = 0.0
# Get audio duration
cmd = ["ffprobe", "-v", "quiet", "-print_format", "json",
"-show_format", str(path)]
result = subprocess.run(cmd, capture_output=True, text=True)
info = json.loads(result.stdout)
self.duration = float(info["format"]["duration"])
# Audio buffer and state
self._audio_data = None
self._energy_history = []
self._last_energy = 0
self._last_beat_time = -1
self._beat_threshold = 1.5
self._min_beat_interval = 0.15
# Load audio lazily
self._loaded = False
def _load_audio(self):
"""Load audio data on first use."""
if self._loaded:
return
import subprocess
# Use ffmpeg to decode audio to raw PCM
cmd = [
"ffmpeg", "-v", "quiet",
"-i", str(self.path),
"-f", "f32le", # 32-bit float, little-endian
"-ac", "1", # mono
"-ar", str(self.sample_rate),
"-"
]
result = subprocess.run(cmd, capture_output=True)
self._audio_data = np.frombuffer(result.stdout, dtype=np.float32)
self._loaded = True
def set_time(self, t: float):
"""Set current playback time."""
self._current_time = t
def get_energy(self) -> float:
"""Compute energy at current time."""
self._load_audio()
if self._audio_data is None or len(self._audio_data) == 0:
return 0.0
# Get sample index for current time
sample_idx = int(self._current_time * self.sample_rate)
window_size = self.hop_length * 2
start = max(0, sample_idx - window_size // 2)
end = min(len(self._audio_data), sample_idx + window_size // 2)
if start >= end:
return 0.0
# RMS energy
chunk = self._audio_data[start:end]
rms = np.sqrt(np.mean(chunk ** 2))
# Normalize to 0-1 range (approximate)
energy = min(1.0, rms * 3.0)
self._last_energy = energy
return energy
def get_beat(self) -> bool:
"""Detect beat using spectral flux (change in frequency content)."""
self._load_audio()
if self._audio_data is None or len(self._audio_data) == 0:
return False
# Get audio chunks for current and previous frame
sample_idx = int(self._current_time * self.sample_rate)
chunk_size = self.hop_length * 2
# Current chunk
start = max(0, sample_idx - chunk_size // 2)
end = min(len(self._audio_data), sample_idx + chunk_size // 2)
if end - start < chunk_size // 2:
return False
current_chunk = self._audio_data[start:end]
# Previous chunk (one hop back)
prev_start = max(0, start - self.hop_length)
prev_end = max(0, end - self.hop_length)
if prev_end <= prev_start:
return False
prev_chunk = self._audio_data[prev_start:prev_end]
# Compute spectra
current_spec = np.abs(np.fft.rfft(current_chunk * np.hanning(len(current_chunk))))
prev_spec = np.abs(np.fft.rfft(prev_chunk * np.hanning(len(prev_chunk))))
# Spectral flux: sum of positive differences (onset = new frequencies appearing)
min_len = min(len(current_spec), len(prev_spec))
diff = current_spec[:min_len] - prev_spec[:min_len]
flux = np.sum(np.maximum(0, diff)) # Only count increases
# Normalize by spectrum size
flux = flux / (min_len + 1)
# Update flux history
self._energy_history.append((self._current_time, flux))
while self._energy_history and self._energy_history[0][0] < self._current_time - 1.5:
self._energy_history.pop(0)
if len(self._energy_history) < 3:
return False
# Adaptive threshold based on recent flux values
flux_values = [f for t, f in self._energy_history]
mean_flux = np.mean(flux_values)
std_flux = np.std(flux_values) + 0.001 # Avoid division by zero
# Beat if flux is above mean (more sensitive threshold)
threshold = mean_flux + std_flux * 0.3 # Lower = more sensitive
min_interval = 0.1 # Allow up to 600 BPM
time_ok = self._current_time - self._last_beat_time > min_interval
is_beat = flux > threshold and time_ok
if is_beat:
self._last_beat_time = self._current_time
return is_beat
def get_spectrum(self, bands: int = 3) -> np.ndarray:
"""Get frequency spectrum at current time."""
self._load_audio()
if self._audio_data is None or len(self._audio_data) == 0:
return np.zeros(bands)
sample_idx = int(self._current_time * self.sample_rate)
n = 2048
start = max(0, sample_idx - n // 2)
end = min(len(self._audio_data), sample_idx + n // 2)
if end - start < n // 2:
return np.zeros(bands)
chunk = self._audio_data[start:end]
# FFT
fft = np.abs(np.fft.rfft(chunk * np.hanning(len(chunk))))
# Divide into bands
band_size = len(fft) // bands
result = np.zeros(bands)
for i in range(bands):
s, e = i * band_size, (i + 1) * band_size
result[i] = np.mean(fft[s:e])
# Normalize
max_val = np.max(result)
if max_val > 0:
result = result / max_val
return result
@property
def is_running(self) -> bool:
return True

308
streaming/backends.py Normal file
View File

@@ -0,0 +1,308 @@
"""
Effect processing backends.
Provides abstraction over different rendering backends:
- numpy: CPU-based, works everywhere, ~3-5 fps
- glsl: GPU-based, requires OpenGL, 30+ fps (future)
"""
import numpy as np
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
from pathlib import Path
class Backend(ABC):
"""Abstract base class for effect processing backends."""
@abstractmethod
def process_frame(
self,
frames: List[np.ndarray],
effects_per_frame: List[List[Dict]],
compositor_config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""
Process multiple input frames through effects and composite.
Args:
frames: List of input frames (one per source)
effects_per_frame: List of effect chains (one per source)
compositor_config: How to blend the layers
t: Current time in seconds
analysis_data: Analysis data for binding resolution
Returns:
Composited output frame
"""
pass
@abstractmethod
def load_effect(self, effect_path: Path) -> Any:
"""Load an effect definition."""
pass
class NumpyBackend(Backend):
"""
CPU-based effect processing using NumPy.
Uses existing sexp_effects interpreter for effect execution.
Works on any system, but limited to ~3-5 fps for complex effects.
"""
def __init__(self, recipe_dir: Path = None, minimal_primitives: bool = True):
self.recipe_dir = recipe_dir or Path(".")
self.minimal_primitives = minimal_primitives
self._interpreter = None
self._loaded_effects = {}
def _get_interpreter(self):
"""Lazy-load the sexp interpreter."""
if self._interpreter is None:
from sexp_effects import get_interpreter
self._interpreter = get_interpreter(minimal_primitives=self.minimal_primitives)
return self._interpreter
def load_effect(self, effect_path: Path) -> Any:
"""Load an effect from sexp file."""
effect_key = str(effect_path)
if effect_key not in self._loaded_effects:
interp = self._get_interpreter()
interp.load_effect(str(effect_path))
self._loaded_effects[effect_key] = effect_path.stem
return self._loaded_effects[effect_key]
def _resolve_binding(self, value: Any, t: float, analysis_data: Dict) -> Any:
"""Resolve a parameter binding to its value at time t."""
if not isinstance(value, dict):
return value
if "_binding" in value or "_bind" in value:
source = value.get("source") or value.get("_bind")
feature = value.get("feature", "values")
range_map = value.get("range")
track = analysis_data.get(source, {})
times = track.get("times", [])
values = track.get("values", [])
if not times or not values:
return 0.0
# Find value at time t (linear interpolation)
if t <= times[0]:
val = values[0]
elif t >= times[-1]:
val = values[-1]
else:
# Binary search for bracket
for i in range(len(times) - 1):
if times[i] <= t <= times[i + 1]:
alpha = (t - times[i]) / (times[i + 1] - times[i])
val = values[i] * (1 - alpha) + values[i + 1] * alpha
break
else:
val = values[-1]
# Apply range mapping
if range_map and len(range_map) == 2:
val = range_map[0] + val * (range_map[1] - range_map[0])
return val
return value
def _apply_effect(
self,
frame: np.ndarray,
effect_name: str,
params: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""Apply a single effect to a frame."""
# Resolve bindings in params
resolved_params = {"_time": t}
for key, value in params.items():
if key in ("effect", "effect_path", "cid", "analysis_refs"):
continue
resolved_params[key] = self._resolve_binding(value, t, analysis_data)
# Try fast native effects first
result = self._apply_native_effect(frame, effect_name, resolved_params)
if result is not None:
return result
# Fall back to sexp interpreter for complex effects
interp = self._get_interpreter()
if effect_name in interp.effects:
result, _ = interp.run_effect(effect_name, frame, resolved_params, {})
return result
# Unknown effect - pass through
return frame
def _apply_native_effect(
self,
frame: np.ndarray,
effect_name: str,
params: Dict,
) -> Optional[np.ndarray]:
"""Fast native numpy effects for real-time streaming."""
import cv2
if effect_name == "zoom":
amount = float(params.get("amount", 1.0))
if abs(amount - 1.0) < 0.01:
return frame
h, w = frame.shape[:2]
# Crop center and resize
new_w, new_h = int(w / amount), int(h / amount)
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = frame[y1:y1+new_h, x1:x1+new_w]
return cv2.resize(cropped, (w, h))
elif effect_name == "rotate":
angle = float(params.get("angle", 0))
if abs(angle) < 0.5:
return frame
h, w = frame.shape[:2]
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
return cv2.warpAffine(frame, matrix, (w, h))
elif effect_name == "brightness":
amount = float(params.get("amount", 1.0))
return np.clip(frame * amount, 0, 255).astype(np.uint8)
elif effect_name == "invert":
amount = float(params.get("amount", 1.0))
if amount < 0.5:
return frame
return 255 - frame
# Not a native effect
return None
def process_frame(
self,
frames: List[np.ndarray],
effects_per_frame: List[List[Dict]],
compositor_config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""
Process frames through effects and composite.
"""
if not frames:
return np.zeros((720, 1280, 3), dtype=np.uint8)
processed = []
# Apply effects to each input frame
for i, (frame, effects) in enumerate(zip(frames, effects_per_frame)):
result = frame.copy()
for effect_config in effects:
effect_name = effect_config.get("effect", "")
if effect_name:
result = self._apply_effect(
result, effect_name, effect_config, t, analysis_data
)
processed.append(result)
# Composite layers
if len(processed) == 1:
return processed[0]
return self._composite(processed, compositor_config, t, analysis_data)
def _composite(
self,
frames: List[np.ndarray],
config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
"""Composite multiple frames into one."""
mode = config.get("mode", "alpha")
weights = config.get("weights", [1.0 / len(frames)] * len(frames))
# Resolve weight bindings
resolved_weights = []
for w in weights:
resolved_weights.append(self._resolve_binding(w, t, analysis_data))
# Normalize weights
total = sum(resolved_weights)
if total > 0:
resolved_weights = [w / total for w in resolved_weights]
else:
resolved_weights = [1.0 / len(frames)] * len(frames)
# Resize frames to match first frame
target_h, target_w = frames[0].shape[:2]
resized = []
for frame in frames:
if frame.shape[:2] != (target_h, target_w):
import cv2
frame = cv2.resize(frame, (target_w, target_h))
resized.append(frame.astype(np.float32))
# Weighted blend
result = np.zeros_like(resized[0])
for frame, weight in zip(resized, resolved_weights):
result += frame * weight
return np.clip(result, 0, 255).astype(np.uint8)
class GLSLBackend(Backend):
"""
GPU-based effect processing using OpenGL/GLSL.
Requires GPU with OpenGL 3.3+ support (or Mesa software renderer).
Achieves 30+ fps real-time processing.
TODO: Implement when ready for GPU acceleration.
"""
def __init__(self):
raise NotImplementedError(
"GLSL backend not yet implemented. Use NumpyBackend for now."
)
def load_effect(self, effect_path: Path) -> Any:
pass
def process_frame(
self,
frames: List[np.ndarray],
effects_per_frame: List[List[Dict]],
compositor_config: Dict,
t: float,
analysis_data: Dict,
) -> np.ndarray:
pass
def get_backend(name: str = "numpy", **kwargs) -> Backend:
"""
Get a backend by name.
Args:
name: "numpy" or "glsl"
**kwargs: Backend-specific options
Returns:
Backend instance
"""
if name == "numpy":
return NumpyBackend(**kwargs)
elif name == "glsl":
return GLSLBackend(**kwargs)
else:
raise ValueError(f"Unknown backend: {name}")

595
streaming/compositor.py Normal file
View File

@@ -0,0 +1,595 @@
"""
Streaming video compositor.
Main entry point for the streaming pipeline. Combines:
- Multiple video sources (with looping)
- Per-source effect chains
- Layer compositing
- Optional live audio analysis
- Output to display/file/stream
"""
import time
import sys
import numpy as np
from typing import List, Dict, Any, Optional, Union
from pathlib import Path
from .sources import Source, VideoSource
from .backends import Backend, NumpyBackend, get_backend
from .output import Output, DisplayOutput, FileOutput, MultiOutput
class StreamingCompositor:
"""
Real-time streaming video compositor.
Reads frames from multiple sources, applies effects, composites layers,
and outputs the result - all frame-by-frame without intermediate files.
Example:
compositor = StreamingCompositor(
sources=["video1.mp4", "video2.mp4"],
effects_per_source=[
[{"effect": "rotate", "angle": 45}],
[{"effect": "zoom", "amount": 1.5}],
],
compositor_config={"mode": "alpha", "weights": [0.5, 0.5]},
)
compositor.run(output="preview", duration=60)
"""
def __init__(
self,
sources: List[Union[str, Source]],
effects_per_source: List[List[Dict]] = None,
compositor_config: Dict = None,
analysis_data: Dict = None,
backend: str = "numpy",
recipe_dir: Path = None,
fps: float = 30,
audio_source: str = None,
):
"""
Initialize the streaming compositor.
Args:
sources: List of video paths or Source objects
effects_per_source: List of effect chains, one per source
compositor_config: How to blend layers (mode, weights)
analysis_data: Pre-computed analysis data for bindings
backend: "numpy" or "glsl"
recipe_dir: Directory for resolving relative effect paths
fps: Output frame rate
audio_source: Path to audio file for streaming analysis
"""
self.fps = fps
self.recipe_dir = recipe_dir or Path(".")
self.analysis_data = analysis_data or {}
# Initialize streaming audio analyzer if audio source provided
self._audio_analyzer = None
self._audio_source = audio_source
if audio_source:
from .audio import StreamingAudioAnalyzer
self._audio_analyzer = StreamingAudioAnalyzer(audio_source)
print(f"Streaming audio: {audio_source}", file=sys.stderr)
# Initialize sources
self.sources: List[Source] = []
for src in sources:
if isinstance(src, Source):
self.sources.append(src)
elif isinstance(src, (str, Path)):
self.sources.append(VideoSource(str(src), target_fps=fps))
else:
raise ValueError(f"Unknown source type: {type(src)}")
# Effect chains (default: no effects)
self.effects_per_source = effects_per_source or [[] for _ in self.sources]
if len(self.effects_per_source) != len(self.sources):
raise ValueError(
f"effects_per_source length ({len(self.effects_per_source)}) "
f"must match sources length ({len(self.sources)})"
)
# Compositor config (default: equal blend)
self.compositor_config = compositor_config or {
"mode": "alpha",
"weights": [1.0 / len(self.sources)] * len(self.sources),
}
# Initialize backend
self.backend: Backend = get_backend(
backend,
recipe_dir=self.recipe_dir,
)
# Load effects
self._load_effects()
def _load_effects(self):
"""Pre-load all effect definitions."""
for effects in self.effects_per_source:
for effect_config in effects:
effect_path = effect_config.get("effect_path")
if effect_path:
full_path = self.recipe_dir / effect_path
if full_path.exists():
self.backend.load_effect(full_path)
def _create_output(
self,
output: Union[str, Output],
size: tuple,
) -> Output:
"""Create output target from string or Output object."""
if isinstance(output, Output):
return output
if output == "preview":
return DisplayOutput("Streaming Preview", size,
audio_source=self._audio_source, fps=self.fps)
elif output == "null":
from .output import NullOutput
return NullOutput()
elif isinstance(output, str):
return FileOutput(output, size, fps=self.fps, audio_source=self._audio_source)
else:
raise ValueError(f"Unknown output type: {output}")
def run(
self,
output: Union[str, Output] = "preview",
duration: float = None,
audio_analyzer=None,
show_fps: bool = True,
recipe_executor=None,
):
"""
Run the streaming compositor.
Args:
output: Output target - "preview", filename, or Output object
duration: Duration in seconds (None = run until quit)
audio_analyzer: Optional AudioAnalyzer for live audio reactivity
show_fps: Show FPS counter in console
recipe_executor: Optional StreamingRecipeExecutor for full recipe logic
"""
# Determine output size from first source
output_size = self.sources[0].size
# Create output
out = self._create_output(output, output_size)
# Determine duration
if duration is None:
# Run until stopped (or min source duration if not looping)
duration = min(s.duration for s in self.sources)
if duration == float('inf'):
duration = 3600 # 1 hour max for live sources
total_frames = int(duration * self.fps)
frame_time = 1.0 / self.fps
print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr)
print(f"Duration: {duration:.1f}s, {total_frames} frames @ {self.fps}fps", file=sys.stderr)
print(f"Output size: {output_size[0]}x{output_size[1]}", file=sys.stderr)
print(f"Press 'q' to quit (if preview)", file=sys.stderr)
# Frame loop
start_time = time.time()
frame_count = 0
fps_update_interval = 30 # Update FPS display every N frames
last_fps_time = start_time
last_fps_count = 0
try:
for frame_num in range(total_frames):
if not out.is_open:
print(f"\nOutput closed at frame {frame_num}", file=sys.stderr)
break
t = frame_num * frame_time
try:
# Update analysis data from streaming audio (file-based)
energy = 0.0
is_beat = False
if self._audio_analyzer:
self._update_from_audio(self._audio_analyzer, t)
energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0]
is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5
elif audio_analyzer:
self._update_from_audio(audio_analyzer, t)
energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0]
is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5
# Read frames from all sources
frames = [src.read_frame(t) for src in self.sources]
# Process through recipe executor if provided
if recipe_executor:
result = self._process_with_executor(
frames, recipe_executor, energy, is_beat, t
)
else:
# Simple backend processing
result = self.backend.process_frame(
frames,
self.effects_per_source,
self.compositor_config,
t,
self.analysis_data,
)
# Output
out.write(result, t)
frame_count += 1
# FPS display
if show_fps and frame_count % fps_update_interval == 0:
now = time.time()
elapsed = now - last_fps_time
if elapsed > 0:
current_fps = (frame_count - last_fps_count) / elapsed
progress = frame_num / total_frames * 100
print(
f"\r {progress:5.1f}% | {current_fps:5.1f} fps | "
f"frame {frame_num}/{total_frames}",
end="", file=sys.stderr
)
last_fps_time = now
last_fps_count = frame_count
except Exception as e:
print(f"\nError at frame {frame_num}, t={t:.1f}s: {e}", file=sys.stderr)
import traceback
traceback.print_exc()
break
except KeyboardInterrupt:
print("\nInterrupted", file=sys.stderr)
finally:
out.close()
for src in self.sources:
if hasattr(src, 'close'):
src.close()
# Final stats
elapsed = time.time() - start_time
avg_fps = frame_count / elapsed if elapsed > 0 else 0
print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", file=sys.stderr)
def _process_with_executor(
self,
frames: List[np.ndarray],
executor,
energy: float,
is_beat: bool,
t: float,
) -> np.ndarray:
"""
Process frames using the recipe executor for full pipeline.
Implements:
1. process-pair: two clips per source with effects, blended
2. cycle-crossfade: dynamic composition with zoom and weights
3. Final effects: whole-spin, ripple
"""
import cv2
# Target size from first source
target_h, target_w = frames[0].shape[:2]
# Resize all frames to target size (letterbox to preserve aspect ratio)
resized_frames = []
for frame in frames:
fh, fw = frame.shape[:2]
if (fh, fw) != (target_h, target_w):
# Calculate scale to fit while preserving aspect ratio
scale = min(target_w / fw, target_h / fh)
new_w, new_h = int(fw * scale), int(fh * scale)
resized = cv2.resize(frame, (new_w, new_h))
# Center on black canvas
canvas = np.zeros((target_h, target_w, 3), dtype=np.uint8)
x_off = (target_w - new_w) // 2
y_off = (target_h - new_h) // 2
canvas[y_off:y_off+new_h, x_off:x_off+new_w] = resized
resized_frames.append(canvas)
else:
resized_frames.append(frame)
frames = resized_frames
# Update executor state
executor.on_frame(energy, is_beat, t)
# Get weights to know which sources are active
weights = executor.get_cycle_weights()
# Process each source as a "pair" (clip A and B with different effects)
processed_pairs = []
for i, frame in enumerate(frames):
# Skip sources with zero weight (but still need placeholder)
if i < len(weights) and weights[i] < 0.001:
processed_pairs.append(None)
continue
# Get effect params for clip A and B
params_a = executor.get_effect_params(i, "a", energy)
params_b = executor.get_effect_params(i, "b", energy)
pair_params = executor.get_pair_params(i)
# Process clip A
clip_a = self._apply_clip_effects(frame.copy(), params_a, t)
# Process clip B
clip_b = self._apply_clip_effects(frame.copy(), params_b, t)
# Blend A and B using pair_mix opacity
opacity = pair_params["blend_opacity"]
blended = cv2.addWeighted(
clip_a, 1 - opacity,
clip_b, opacity,
0
)
# Apply pair rotation
h, w = blended.shape[:2]
center = (w // 2, h // 2)
angle = pair_params["pair_rotation"]
if abs(angle) > 0.5:
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
blended = cv2.warpAffine(blended, matrix, (w, h))
processed_pairs.append(blended)
# Cycle-crossfade composition
weights = executor.get_cycle_weights()
zooms = executor.get_cycle_zooms()
# Apply zoom per pair and composite
h, w = target_h, target_w
result = np.zeros((h, w, 3), dtype=np.float32)
for idx, (pair, weight, zoom) in enumerate(zip(processed_pairs, weights, zooms)):
# Skip zero-weight sources
if pair is None or weight < 0.001:
continue
orig_shape = pair.shape
# Apply zoom
if zoom > 1.01:
# Zoom in: crop center and resize up
new_w, new_h = int(w / zoom), int(h / zoom)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = pair[y1:y1+new_h, x1:x1+new_w]
pair = cv2.resize(cropped, (w, h))
elif zoom < 0.99:
# Zoom out: shrink video and center on black
scaled_w, scaled_h = int(w * zoom), int(h * zoom)
if scaled_w > 0 and scaled_h > 0:
shrunk = cv2.resize(pair, (scaled_w, scaled_h))
canvas = np.zeros((h, w, 3), dtype=np.uint8)
x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2
canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk
pair = canvas.copy()
# Draw colored border - size indicates zoom level
border_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
color = border_colors[idx % 4]
thickness = max(3, int(10 * weight)) # Thicker border = higher weight
pair = np.ascontiguousarray(pair)
pair[:thickness, :] = color
pair[-thickness:, :] = color
pair[:, :thickness] = color
pair[:, -thickness:] = color
result += pair.astype(np.float32) * weight
result = np.clip(result, 0, 255).astype(np.uint8)
# Apply final effects (whole-spin, ripple)
final_params = executor.get_final_effects(energy)
# Whole spin
spin_angle = final_params["whole_spin_angle"]
if abs(spin_angle) > 0.5:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, spin_angle, 1.0)
result = cv2.warpAffine(result, matrix, (w, h))
# Ripple effect
amp = final_params["ripple_amplitude"]
if amp > 1:
result = self._apply_ripple(result, amp,
final_params["ripple_cx"],
final_params["ripple_cy"],
t)
return result
def _apply_clip_effects(self, frame: np.ndarray, params: dict, t: float) -> np.ndarray:
"""Apply per-clip effects: rotate, zoom, invert, hue_shift, ascii."""
import cv2
h, w = frame.shape[:2]
# Rotate
angle = params["rotate_angle"]
if abs(angle) > 0.5:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
frame = cv2.warpAffine(frame, matrix, (w, h))
# Zoom
zoom = params["zoom_amount"]
if abs(zoom - 1.0) > 0.01:
new_w, new_h = int(w / zoom), int(h / zoom)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
x1, y1 = max(0, x1), max(0, y1)
x2, y2 = min(w, x1 + new_w), min(h, y1 + new_h)
if x2 > x1 and y2 > y1:
cropped = frame[y1:y2, x1:x2]
frame = cv2.resize(cropped, (w, h))
# Invert
if params["invert_amount"] > 0.5:
frame = 255 - frame
# Hue shift
hue_deg = params["hue_degrees"]
if abs(hue_deg) > 1:
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int32) + int(hue_deg / 2)) % 180
frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
# ASCII art
if params["ascii_mix"] > 0.5:
char_size = max(4, int(params["ascii_char_size"]))
frame = self._apply_ascii(frame, char_size)
return frame
def _apply_ascii(self, frame: np.ndarray, char_size: int) -> np.ndarray:
"""Apply ASCII art effect."""
import cv2
from PIL import Image, ImageDraw, ImageFont
h, w = frame.shape[:2]
chars = " .:-=+*#%@"
# Get font
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", char_size)
except:
font = ImageFont.load_default()
# Sample cells using area interpolation (fast block average)
rows = h // char_size
cols = w // char_size
if rows < 1 or cols < 1:
return frame
# Crop to exact grid and downsample
cropped = frame[:rows * char_size, :cols * char_size]
cell_colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
# Compute luminance
luminances = (0.299 * cell_colors[:, :, 0] +
0.587 * cell_colors[:, :, 1] +
0.114 * cell_colors[:, :, 2]) / 255.0
# Create output image
out_h = rows * char_size
out_w = cols * char_size
output = Image.new('RGB', (out_w, out_h), (0, 0, 0))
draw = ImageDraw.Draw(output)
# Draw characters
for r in range(rows):
for c in range(cols):
lum = luminances[r, c]
color = tuple(cell_colors[r, c])
# Map luminance to character
idx = int(lum * (len(chars) - 1))
char = chars[idx]
# Draw character
x = c * char_size
y = r * char_size
draw.text((x, y), char, fill=color, font=font)
# Convert back to numpy and resize to original
result = np.array(output)
if result.shape[:2] != (h, w):
result = cv2.resize(result, (w, h), interpolation=cv2.INTER_LINEAR)
return result
def _apply_ripple(self, frame: np.ndarray, amplitude: float,
cx: float, cy: float, t: float = 0) -> np.ndarray:
"""Apply ripple distortion effect."""
import cv2
h, w = frame.shape[:2]
center_x, center_y = cx * w, cy * h
max_dim = max(w, h)
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Distance from center
dx = x_coords - center_x
dy = y_coords - center_y
dist = np.sqrt(dx*dx + dy*dy)
# Ripple parameters (matching recipe: frequency=8, decay=2, speed=5)
freq = 8
decay = 2
speed = 5
phase = t * speed * 2 * np.pi
# Ripple displacement (matching original formula)
ripple = np.sin(2 * np.pi * freq * dist / max_dim + phase) * amplitude
# Apply decay
if decay > 0:
ripple = ripple * np.exp(-dist * decay / max_dim)
# Displace along radial direction
with np.errstate(divide='ignore', invalid='ignore'):
norm_dx = np.where(dist > 0, dx / dist, 0)
norm_dy = np.where(dist > 0, dy / dist, 0)
map_x = (x_coords + ripple * norm_dx).astype(np.float32)
map_y = (y_coords + ripple * norm_dy).astype(np.float32)
return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT)
def _update_from_audio(self, analyzer, t: float):
"""Update analysis data from audio analyzer (streaming or live)."""
# Set time for file-based streaming analyzers
if hasattr(analyzer, 'set_time'):
analyzer.set_time(t)
# Get current audio features
energy = analyzer.get_energy() if hasattr(analyzer, 'get_energy') else 0
beat = analyzer.get_beat() if hasattr(analyzer, 'get_beat') else False
# Update analysis tracks - these can be referenced by effect bindings
self.analysis_data["live_energy"] = {
"times": [t],
"values": [energy],
"duration": float('inf'),
}
self.analysis_data["live_beat"] = {
"times": [t],
"values": [1.0 if beat else 0.0],
"duration": float('inf'),
}
def quick_preview(
sources: List[str],
effects: List[List[Dict]] = None,
duration: float = 10,
fps: float = 30,
):
"""
Quick preview helper - show sources with optional effects.
Example:
quick_preview(["video1.mp4", "video2.mp4"], duration=30)
"""
compositor = StreamingCompositor(
sources=sources,
effects_per_source=effects,
fps=fps,
)
compositor.run(output="preview", duration=duration)

125
streaming/demo.py Normal file
View File

@@ -0,0 +1,125 @@
#!/usr/bin/env python3
"""
Demo script for streaming compositor.
Usage:
# Preview two videos blended
python -m streaming.demo preview video1.mp4 video2.mp4
# Record output to file
python -m streaming.demo record video1.mp4 video2.mp4 -o output.mp4
# Benchmark (no output)
python -m streaming.demo benchmark video1.mp4 --duration 10
"""
import argparse
import sys
from pathlib import Path
# Add parent to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
from streaming import StreamingCompositor, VideoSource
from streaming.output import NullOutput
def demo_preview(sources: list, duration: float, effects: bool = False):
"""Preview sources with optional simple effects."""
effects_config = None
if effects:
effects_config = [
[{"effect": "rotate", "angle": 15}],
[{"effect": "zoom", "amount": 1.2}],
][:len(sources)]
compositor = StreamingCompositor(
sources=sources,
effects_per_source=effects_config,
recipe_dir=Path(__file__).parent.parent,
)
compositor.run(output="preview", duration=duration)
def demo_record(sources: list, output_path: str, duration: float):
"""Record blended output to file."""
compositor = StreamingCompositor(
sources=sources,
recipe_dir=Path(__file__).parent.parent,
)
compositor.run(output=output_path, duration=duration)
def demo_benchmark(sources: list, duration: float):
"""Benchmark processing speed (no output)."""
compositor = StreamingCompositor(
sources=sources,
recipe_dir=Path(__file__).parent.parent,
)
compositor.run(output="null", duration=duration)
def demo_audio_reactive(sources: list, duration: float):
"""Preview with live audio reactivity."""
from streaming.audio import AudioAnalyzer
# Create compositor with energy-reactive effects
effects_config = [
[{
"effect": "zoom",
"amount": {"_binding": True, "source": "live_energy", "feature": "values", "range": [1.0, 1.5]},
}]
for _ in sources
]
compositor = StreamingCompositor(
sources=sources,
effects_per_source=effects_config,
recipe_dir=Path(__file__).parent.parent,
)
# Start audio analyzer
try:
with AudioAnalyzer() as audio:
print("Audio analyzer started. Make some noise!", file=sys.stderr)
compositor.run(output="preview", duration=duration, audio_analyzer=audio)
except Exception as e:
print(f"Audio not available: {e}", file=sys.stderr)
print("Running without audio...", file=sys.stderr)
compositor.run(output="preview", duration=duration)
def main():
parser = argparse.ArgumentParser(description="Streaming compositor demo")
parser.add_argument("mode", choices=["preview", "record", "benchmark", "audio"],
help="Demo mode")
parser.add_argument("sources", nargs="+", help="Video source files")
parser.add_argument("-o", "--output", help="Output file (for record mode)")
parser.add_argument("-d", "--duration", type=float, default=30,
help="Duration in seconds")
parser.add_argument("--effects", action="store_true",
help="Apply simple effects (for preview)")
args = parser.parse_args()
# Verify sources exist
for src in args.sources:
if not Path(src).exists():
print(f"Error: Source not found: {src}", file=sys.stderr)
sys.exit(1)
if args.mode == "preview":
demo_preview(args.sources, args.duration, args.effects)
elif args.mode == "record":
if not args.output:
print("Error: --output required for record mode", file=sys.stderr)
sys.exit(1)
demo_record(args.sources, args.output, args.duration)
elif args.mode == "benchmark":
demo_benchmark(args.sources, args.duration)
elif args.mode == "audio":
demo_audio_reactive(args.sources, args.duration)
if __name__ == "__main__":
main()

369
streaming/output.py Normal file
View File

@@ -0,0 +1,369 @@
"""
Output targets for streaming compositor.
Supports:
- Display window (preview)
- File output (recording)
- Stream output (RTMP, etc.) - future
"""
import numpy as np
import subprocess
from abc import ABC, abstractmethod
from typing import Tuple, Optional
from pathlib import Path
class Output(ABC):
"""Abstract base class for output targets."""
@abstractmethod
def write(self, frame: np.ndarray, t: float):
"""Write a frame to the output."""
pass
@abstractmethod
def close(self):
"""Close the output and clean up resources."""
pass
@property
@abstractmethod
def is_open(self) -> bool:
"""Check if output is still open/valid."""
pass
class DisplayOutput(Output):
"""
Display frames using mpv (handles Wayland properly).
Useful for live preview. Press 'q' to quit.
"""
def __init__(self, title: str = "Streaming Preview", size: Tuple[int, int] = None,
audio_source: str = None, fps: float = 30):
self.title = title
self.size = size
self.audio_source = audio_source
self.fps = fps
self._is_open = True
self._process = None
self._audio_process = None
def _start_mpv(self, frame_size: Tuple[int, int]):
"""Start mpv process for display."""
import sys
w, h = frame_size
cmd = [
"mpv",
"--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={self.fps}",
f"--title={self.title}",
"-",
]
print(f"Starting mpv: {' '.join(cmd)}", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Start audio playback if we have an audio source
if self.audio_source:
audio_cmd = [
"ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
str(self.audio_source)
]
print(f"Starting audio: {self.audio_source}", file=sys.stderr)
self._audio_process = subprocess.Popen(
audio_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def write(self, frame: np.ndarray, t: float):
"""Display frame."""
if not self._is_open:
return
# Ensure frame is correct format
if frame.dtype != np.uint8:
frame = np.clip(frame, 0, 255).astype(np.uint8)
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
# Start mpv on first frame
if self._process is None:
self._start_mpv((frame.shape[1], frame.shape[0]))
# Check if mpv is still running
if self._process.poll() is not None:
self._is_open = False
return
try:
self._process.stdin.write(frame.tobytes())
self._process.stdin.flush() # Prevent buffering
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close the display and audio."""
if self._process:
try:
self._process.stdin.close()
except:
pass
self._process.terminate()
self._process.wait()
if self._audio_process:
self._audio_process.terminate()
self._audio_process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
if self._process and self._process.poll() is not None:
self._is_open = False
return self._is_open
class FileOutput(Output):
"""
Write frames to a video file using ffmpeg.
"""
def __init__(
self,
path: str,
size: Tuple[int, int],
fps: float = 30,
codec: str = "libx264",
crf: int = 18,
preset: str = "fast",
audio_source: str = None,
):
self.path = Path(path)
self.size = size
self.fps = fps
self._is_open = True
# Build ffmpeg command
cmd = [
"ffmpeg", "-y",
"-f", "rawvideo",
"-vcodec", "rawvideo",
"-pix_fmt", "rgb24",
"-s", f"{size[0]}x{size[1]}",
"-r", str(fps),
"-i", "-",
]
# Add audio input if provided
if audio_source:
cmd.extend(["-i", str(audio_source)])
# Explicitly map: video from input 0 (rawvideo), audio from input 1
cmd.extend(["-map", "0:v", "-map", "1:a"])
cmd.extend([
"-c:v", codec,
"-preset", preset,
"-crf", str(crf),
"-pix_fmt", "yuv420p",
])
# Add audio codec if we have audio
if audio_source:
cmd.extend(["-c:a", "aac", "-b:a", "192k", "-shortest"])
cmd.append(str(self.path))
import sys
print(f"FileOutput cmd: {' '.join(cmd)}", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=None, # Show errors for debugging
)
def write(self, frame: np.ndarray, t: float):
"""Write frame to video file."""
if not self._is_open or self._process.poll() is not None:
self._is_open = False
return
# Resize if needed
if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]:
import cv2
frame = cv2.resize(frame, self.size)
try:
self._process.stdin.write(frame.tobytes())
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close the video file."""
if self._process:
self._process.stdin.close()
self._process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
return self._is_open and self._process.poll() is None
class MultiOutput(Output):
"""
Write to multiple outputs simultaneously.
Useful for recording while showing preview.
"""
def __init__(self, outputs: list):
self.outputs = outputs
def write(self, frame: np.ndarray, t: float):
for output in self.outputs:
if output.is_open:
output.write(frame, t)
def close(self):
for output in self.outputs:
output.close()
@property
def is_open(self) -> bool:
return any(o.is_open for o in self.outputs)
class NullOutput(Output):
"""
Discard frames (for benchmarking).
"""
def __init__(self):
self._is_open = True
self.frame_count = 0
def write(self, frame: np.ndarray, t: float):
self.frame_count += 1
def close(self):
self._is_open = False
@property
def is_open(self) -> bool:
return self._is_open
class PipeOutput(Output):
"""
Pipe frames directly to mpv.
Launches mpv with rawvideo demuxer and writes frames to stdin.
"""
def __init__(self, size: Tuple[int, int], fps: float = 30, audio_source: str = None):
self.size = size
self.fps = fps
self.audio_source = audio_source
self._is_open = True
self._process = None
self._audio_process = None
self._started = False
def _start(self):
"""Start mpv and audio on first frame."""
if self._started:
return
self._started = True
import sys
w, h = self.size
# Start mpv
cmd = [
"mpv", "--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={self.fps}",
"--title=Streaming",
"-"
]
print(f"Starting mpv: {w}x{h} @ {self.fps}fps", file=sys.stderr)
self._process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
# Start audio
if self.audio_source:
audio_cmd = [
"ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
str(self.audio_source)
]
print(f"Starting audio: {self.audio_source}", file=sys.stderr)
self._audio_process = subprocess.Popen(
audio_cmd,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def write(self, frame: np.ndarray, t: float):
"""Write frame to mpv."""
if not self._is_open:
return
self._start()
# Check mpv still running
if self._process.poll() is not None:
self._is_open = False
return
# Resize if needed
if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]:
import cv2
frame = cv2.resize(frame, self.size)
# Ensure correct format
if frame.dtype != np.uint8:
frame = np.clip(frame, 0, 255).astype(np.uint8)
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
try:
self._process.stdin.write(frame.tobytes())
self._process.stdin.flush()
except BrokenPipeError:
self._is_open = False
def close(self):
"""Close mpv and audio."""
if self._process:
try:
self._process.stdin.close()
except:
pass
self._process.terminate()
self._process.wait()
if self._audio_process:
self._audio_process.terminate()
self._audio_process.wait()
self._is_open = False
@property
def is_open(self) -> bool:
if self._process and self._process.poll() is not None:
self._is_open = False
return self._is_open

846
streaming/pipeline.py Normal file
View File

@@ -0,0 +1,846 @@
"""
Streaming pipeline executor.
Directly executes compiled sexp recipes frame-by-frame.
No adapter layer - frames and analysis flow through the DAG.
"""
import sys
import time
import numpy as np
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
from .sources import VideoSource
from .audio import StreamingAudioAnalyzer
from .output import DisplayOutput, FileOutput
from .sexp_interp import SexpInterpreter
@dataclass
class FrameContext:
"""Context passed through the pipeline for each frame."""
t: float # Current time
energy: float = 0.0
is_beat: bool = False
beat_count: int = 0
analysis: Dict[str, Any] = field(default_factory=dict)
class StreamingPipeline:
"""
Executes a compiled sexp recipe as a streaming pipeline.
Frames flow through the DAG directly - no adapter needed.
Each node is evaluated lazily when its output is requested.
"""
def __init__(self, compiled_recipe, recipe_dir: Path = None, fps: float = 30, seed: int = 42,
output_size: tuple = None):
self.recipe = compiled_recipe
self.recipe_dir = recipe_dir or Path(".")
self.fps = fps
self.seed = seed
# Build node lookup
self.nodes = {n['id']: n for n in compiled_recipe.nodes}
# Runtime state
self.sources: Dict[str, VideoSource] = {}
self.audio_analyzer: Optional[StreamingAudioAnalyzer] = None
self.audio_source_path: Optional[str] = None
# Sexp interpreter for expressions
self.interp = SexpInterpreter()
# Scan state (node_id -> current value)
self.scan_state: Dict[str, Any] = {}
self.scan_emit: Dict[str, Any] = {}
# SLICE_ON state
self.slice_on_acc: Dict[str, Any] = {}
self.slice_on_result: Dict[str, Any] = {}
# Frame cache for current timestep (cleared each frame)
self._frame_cache: Dict[str, np.ndarray] = {}
# Context for current frame
self.ctx = FrameContext(t=0.0)
# Output size (w, h) - set after sources are initialized
self._output_size = output_size
# Initialize
self._init_sources()
self._init_scans()
self._init_slice_on()
# Set output size from first source if not specified
if self._output_size is None and self.sources:
first_source = next(iter(self.sources.values()))
self._output_size = first_source._size
def _init_sources(self):
"""Initialize video and audio sources."""
for node in self.recipe.nodes:
if node.get('type') == 'SOURCE':
config = node.get('config', {})
path = config.get('path')
if path:
full_path = (self.recipe_dir / path).resolve()
suffix = full_path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
if not full_path.exists():
print(f"Warning: video not found: {full_path}", file=sys.stderr)
continue
self.sources[node['id']] = VideoSource(
str(full_path),
target_fps=self.fps
)
elif suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
if not full_path.exists():
print(f"Warning: audio not found: {full_path}", file=sys.stderr)
continue
self.audio_source_path = str(full_path)
self.audio_analyzer = StreamingAudioAnalyzer(str(full_path))
def _init_scans(self):
"""Initialize scan nodes with their initial state."""
import random
seed_offset = 0
for node in self.recipe.nodes:
if node.get('type') == 'SCAN':
config = node.get('config', {})
# Create RNG for this scan
scan_seed = config.get('seed', self.seed + seed_offset)
rng = random.Random(scan_seed)
seed_offset += 1
# Evaluate initial value
init_expr = config.get('init', 0)
init_value = self.interp.eval(init_expr, {})
self.scan_state[node['id']] = {
'value': init_value,
'rng': rng,
'config': config,
}
# Compute initial emit
self._update_scan_emit(node['id'])
def _update_scan_emit(self, node_id: str):
"""Update the emit value for a scan."""
state = self.scan_state[node_id]
config = state['config']
emit_expr = config.get('emit_expr', config.get('emit', None))
if emit_expr is None:
# No emit expression - emit the value directly
self.scan_emit[node_id] = state['value']
return
# Build environment from state
env = {}
if isinstance(state['value'], dict):
env.update(state['value'])
else:
env['acc'] = state['value']
env['beat_count'] = self.ctx.beat_count
env['time'] = self.ctx.t
# Set RNG for interpreter
self.interp.rng = state['rng']
self.scan_emit[node_id] = self.interp.eval(emit_expr, env)
def _step_scan(self, node_id: str):
"""Step a scan forward on beat."""
state = self.scan_state[node_id]
config = state['config']
step_expr = config.get('step_expr', config.get('step', None))
if step_expr is None:
return
# Build environment
env = {}
if isinstance(state['value'], dict):
env.update(state['value'])
else:
env['acc'] = state['value']
env['beat_count'] = self.ctx.beat_count
env['time'] = self.ctx.t
# Set RNG
self.interp.rng = state['rng']
# Evaluate step
new_value = self.interp.eval(step_expr, env)
state['value'] = new_value
# Update emit
self._update_scan_emit(node_id)
def _init_slice_on(self):
"""Initialize SLICE_ON nodes."""
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
config = node.get('config', {})
init = config.get('init', {})
self.slice_on_acc[node['id']] = dict(init)
# Evaluate initial state
self._eval_slice_on(node['id'])
def _eval_slice_on(self, node_id: str):
"""Evaluate a SLICE_ON node's Lambda."""
node = self.nodes[node_id]
config = node.get('config', {})
fn = config.get('fn')
videos = config.get('videos', [])
if not fn:
return
acc = self.slice_on_acc[node_id]
n_videos = len(videos)
# Set up environment
self.interp.globals['videos'] = list(range(n_videos))
try:
from .sexp_interp import eval_slice_on_lambda
result = eval_slice_on_lambda(
fn, acc, self.ctx.beat_count, 0, 1,
list(range(n_videos)), self.interp
)
self.slice_on_result[node_id] = result
# Update accumulator
if 'acc' in result:
self.slice_on_acc[node_id] = result['acc']
except Exception as e:
print(f"SLICE_ON eval error: {e}", file=sys.stderr)
def _on_beat(self):
"""Called when a beat is detected."""
self.ctx.beat_count += 1
# Step all scans
for node_id in self.scan_state:
self._step_scan(node_id)
# Step all SLICE_ON nodes
for node_id in self.slice_on_acc:
self._eval_slice_on(node_id)
def _get_frame(self, node_id: str) -> Optional[np.ndarray]:
"""
Get the output frame for a node at current time.
Recursively evaluates inputs as needed.
Results are cached for the current timestep.
"""
if node_id in self._frame_cache:
return self._frame_cache[node_id]
node = self.nodes.get(node_id)
if not node:
return None
node_type = node.get('type')
if node_type == 'SOURCE':
frame = self._eval_source(node)
elif node_type == 'SEGMENT':
frame = self._eval_segment(node)
elif node_type == 'EFFECT':
frame = self._eval_effect(node)
elif node_type == 'SLICE_ON':
frame = self._eval_slice_on_frame(node)
else:
# Unknown node type - try to pass through input
inputs = node.get('inputs', [])
frame = self._get_frame(inputs[0]) if inputs else None
self._frame_cache[node_id] = frame
return frame
def _eval_source(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SOURCE node."""
source = self.sources.get(node['id'])
if source:
return source.read_frame(self.ctx.t)
return None
def _eval_segment(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SEGMENT node (time segment of source)."""
inputs = node.get('inputs', [])
if not inputs:
return None
config = node.get('config', {})
start = config.get('start', 0)
duration = config.get('duration')
# Resolve any bindings
if isinstance(start, dict):
start = self._resolve_binding(start) if start.get('_binding') else 0
if isinstance(duration, dict):
duration = self._resolve_binding(duration) if duration.get('_binding') else None
# Adjust time for segment
t_local = self.ctx.t + (start if isinstance(start, (int, float)) else 0)
if duration and isinstance(duration, (int, float)):
t_local = t_local % duration # Loop within segment
# Get source frame at adjusted time
source_id = inputs[0]
source = self.sources.get(source_id)
if source:
return source.read_frame(t_local)
return self._get_frame(source_id)
def _eval_effect(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate an EFFECT node."""
import cv2
inputs = node.get('inputs', [])
config = node.get('config', {})
effect_name = config.get('effect')
# Get input frame(s)
input_frames = [self._get_frame(inp) for inp in inputs]
input_frames = [f for f in input_frames if f is not None]
if not input_frames:
return None
frame = input_frames[0]
# Resolve bindings in config
params = self._resolve_config(config)
# Apply effect based on name
if effect_name == 'rotate':
angle = params.get('angle', 0)
if abs(angle) > 0.5:
h, w = frame.shape[:2]
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
frame = cv2.warpAffine(frame, matrix, (w, h))
elif effect_name == 'zoom':
amount = params.get('amount', 1.0)
if abs(amount - 1.0) > 0.01:
frame = self._apply_zoom(frame, amount)
elif effect_name == 'invert':
amount = params.get('amount', 0)
if amount > 0.01:
inverted = 255 - frame
frame = cv2.addWeighted(frame, 1 - amount, inverted, amount, 0)
elif effect_name == 'hue_shift':
degrees = params.get('degrees', 0)
if abs(degrees) > 1:
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + int(degrees / 2)) % 180
frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
elif effect_name == 'blend':
if len(input_frames) >= 2:
opacity = params.get('opacity', 0.5)
frame = cv2.addWeighted(input_frames[0], 1 - opacity,
input_frames[1], opacity, 0)
elif effect_name == 'blend_multi':
weights = params.get('weights', [])
if len(input_frames) > 1 and weights:
h, w = input_frames[0].shape[:2]
result = np.zeros((h, w, 3), dtype=np.float32)
for f, wt in zip(input_frames, weights):
if f is not None and wt > 0.001:
if f.shape[:2] != (h, w):
f = cv2.resize(f, (w, h))
result += f.astype(np.float32) * wt
frame = np.clip(result, 0, 255).astype(np.uint8)
elif effect_name == 'ripple':
amp = params.get('amplitude', 0)
if amp > 1:
frame = self._apply_ripple(frame, amp,
params.get('center_x', 0.5),
params.get('center_y', 0.5),
params.get('frequency', 8),
params.get('decay', 2),
params.get('speed', 5))
return frame
def _eval_slice_on_frame(self, node: dict) -> Optional[np.ndarray]:
"""Evaluate a SLICE_ON node - returns composited frame."""
import cv2
config = node.get('config', {})
video_ids = config.get('videos', [])
result = self.slice_on_result.get(node['id'], {})
if not result:
# No result yet - return first video
if video_ids:
return self._get_frame(video_ids[0])
return None
# Get layers and compose info
layers = result.get('layers', [])
compose = result.get('compose', {})
weights = compose.get('weights', [])
if not layers or not weights:
if video_ids:
return self._get_frame(video_ids[0])
return None
# Get frames for each layer
frames = []
for i, layer in enumerate(layers):
video_idx = layer.get('video', i)
if video_idx < len(video_ids):
frame = self._get_frame(video_ids[video_idx])
# Apply layer effects (zoom)
effects = layer.get('effects', [])
for eff in effects:
eff_name = eff.get('effect')
if hasattr(eff_name, 'name'):
eff_name = eff_name.name
if eff_name == 'zoom':
zoom_amt = eff.get('amount', 1.0)
if frame is not None:
frame = self._apply_zoom(frame, zoom_amt)
frames.append(frame)
else:
frames.append(None)
# Composite with weights - use consistent output size
if self._output_size:
w, h = self._output_size
else:
# Fallback to first non-None frame size
for f in frames:
if f is not None:
h, w = f.shape[:2]
break
else:
return None
output = np.zeros((h, w, 3), dtype=np.float32)
for frame, weight in zip(frames, weights):
if frame is None or weight < 0.001:
continue
# Resize to output size
if frame.shape[1] != w or frame.shape[0] != h:
frame = cv2.resize(frame, (w, h))
output += frame.astype(np.float32) * weight
# Normalize weights
total_weight = sum(wt for wt in weights if wt > 0.001)
if total_weight > 0 and abs(total_weight - 1.0) > 0.01:
output /= total_weight
return np.clip(output, 0, 255).astype(np.uint8)
def _resolve_config(self, config: dict) -> dict:
"""Resolve bindings in effect config to actual values."""
resolved = {}
for key, value in config.items():
if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry',
'analysis_refs', 'inputs', 'cid'):
continue
if isinstance(value, dict) and value.get('_binding'):
resolved[key] = self._resolve_binding(value)
elif isinstance(value, dict) and value.get('_expr'):
resolved[key] = self._resolve_expr(value)
else:
resolved[key] = value
return resolved
def _resolve_binding(self, binding: dict) -> Any:
"""Resolve a binding to its current value."""
source_id = binding.get('source')
feature = binding.get('feature', 'values')
range_map = binding.get('range')
# Get raw value from scan or analysis
if source_id in self.scan_emit:
value = self.scan_emit[source_id]
elif source_id in self.ctx.analysis:
data = self.ctx.analysis[source_id]
value = data.get(feature, data.get('values', [0]))[0] if isinstance(data, dict) else data
else:
# Fallback to energy
value = self.ctx.energy
# Extract feature from dict
if isinstance(value, dict) and feature in value:
value = value[feature]
# Apply range mapping
if range_map and isinstance(value, (int, float)):
lo, hi = range_map
value = lo + value * (hi - lo)
return value
def _resolve_expr(self, expr: dict) -> Any:
"""Resolve a compiled expression."""
env = {
'energy': self.ctx.energy,
'beat_count': self.ctx.beat_count,
't': self.ctx.t,
}
# Add scan values
for scan_id, value in self.scan_emit.items():
# Use short form if available
env[scan_id] = value
# Extract the actual expression from _expr wrapper
actual_expr = expr.get('_expr', expr)
return self.interp.eval(actual_expr, env)
def _apply_zoom(self, frame: np.ndarray, amount: float) -> np.ndarray:
"""Apply zoom to frame."""
import cv2
h, w = frame.shape[:2]
if amount > 1.01:
# Zoom in: crop center
new_w, new_h = int(w / amount), int(h / amount)
if new_w > 0 and new_h > 0:
x1, y1 = (w - new_w) // 2, (h - new_h) // 2
cropped = frame[y1:y1+new_h, x1:x1+new_w]
return cv2.resize(cropped, (w, h))
elif amount < 0.99:
# Zoom out: shrink and center
scaled_w, scaled_h = int(w * amount), int(h * amount)
if scaled_w > 0 and scaled_h > 0:
shrunk = cv2.resize(frame, (scaled_w, scaled_h))
canvas = np.zeros((h, w, 3), dtype=np.uint8)
x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2
canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk
return canvas
return frame
def _apply_ripple(self, frame: np.ndarray, amplitude: float,
cx: float, cy: float, frequency: float,
decay: float, speed: float) -> np.ndarray:
"""Apply ripple effect."""
import cv2
h, w = frame.shape[:2]
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize to center
center_x, center_y = w * cx, h * cy
dx = x_coords - center_x
dy = y_coords - center_y
dist = np.sqrt(dx**2 + dy**2)
# Ripple displacement
phase = self.ctx.t * speed
ripple = amplitude * np.sin(dist / frequency - phase) * np.exp(-dist * decay / max(w, h))
# Displace coordinates
angle = np.arctan2(dy, dx)
map_x = (x_coords + ripple * np.cos(angle)).astype(np.float32)
map_y = (y_coords + ripple * np.sin(angle)).astype(np.float32)
return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
def _find_output_node(self) -> Optional[str]:
"""Find the final output node (MUX or last EFFECT)."""
# Look for MUX node
for node in self.recipe.nodes:
if node.get('type') == 'MUX':
return node['id']
# Otherwise find last EFFECT after SLICE_ON
last_effect = None
found_slice_on = False
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
found_slice_on = True
elif node.get('type') == 'EFFECT' and found_slice_on:
last_effect = node['id']
return last_effect
def render_frame(self, t: float) -> Optional[np.ndarray]:
"""Render a single frame at time t."""
# Clear frame cache
self._frame_cache.clear()
# Update context
self.ctx.t = t
# Update audio analysis
if self.audio_analyzer:
self.audio_analyzer.set_time(t)
energy = self.audio_analyzer.get_energy()
is_beat = self.audio_analyzer.get_beat()
# Beat edge detection
was_beat = self.ctx.is_beat
self.ctx.energy = energy
self.ctx.is_beat = is_beat
if is_beat and not was_beat:
self._on_beat()
# Store in analysis dict
self.ctx.analysis['live_energy'] = {'values': [energy]}
self.ctx.analysis['live_beat'] = {'values': [1.0 if is_beat else 0.0]}
# Find output node and render
output_node = self._find_output_node()
if output_node:
frame = self._get_frame(output_node)
# Normalize to output size
if frame is not None and self._output_size:
w, h = self._output_size
if frame.shape[1] != w or frame.shape[0] != h:
import cv2
frame = cv2.resize(frame, (w, h))
return frame
return None
def run(self, output: str = "preview", duration: float = None):
"""
Run the pipeline.
Args:
output: "preview", filename, or Output object
duration: Duration in seconds (default: audio duration or 60s)
"""
# Determine duration
if duration is None:
if self.audio_analyzer:
duration = self.audio_analyzer.duration
else:
duration = 60.0
# Create output
if output == "preview":
# Get frame size from first source
first_source = next(iter(self.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
out = DisplayOutput(size=(w, h), fps=self.fps, audio_source=self.audio_source_path)
elif isinstance(output, str):
first_source = next(iter(self.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
out = FileOutput(output, size=(w, h), fps=self.fps, audio_source=self.audio_source_path)
else:
out = output
frame_time = 1.0 / self.fps
n_frames = int(duration * self.fps)
print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr)
print(f"Duration: {duration:.1f}s, {n_frames} frames @ {self.fps}fps", file=sys.stderr)
start_time = time.time()
frame_count = 0
try:
for frame_num in range(n_frames):
t = frame_num * frame_time
frame = self.render_frame(t)
if frame is not None:
out.write(frame, t)
frame_count += 1
# Progress
if frame_num % 50 == 0:
elapsed = time.time() - start_time
fps = frame_count / elapsed if elapsed > 0 else 0
pct = 100 * frame_num / n_frames
print(f"\r{pct:5.1f}% | {fps:5.1f} fps | frame {frame_num}/{n_frames}",
end="", file=sys.stderr)
except KeyboardInterrupt:
print("\nInterrupted", file=sys.stderr)
finally:
out.close()
for src in self.sources.values():
src.close()
elapsed = time.time() - start_time
avg_fps = frame_count / elapsed if elapsed > 0 else 0
print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)",
file=sys.stderr)
def run_pipeline(recipe_path: str, output: str = "preview",
duration: float = None, fps: float = None):
"""
Run a recipe through the streaming pipeline.
No adapter layer - directly executes the compiled recipe.
"""
from pathlib import Path
# Add artdag to path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.compiler import compile_string
recipe_path = Path(recipe_path)
recipe_text = recipe_path.read_text()
compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent)
pipeline = StreamingPipeline(
compiled,
recipe_dir=recipe_path.parent,
fps=fps or compiled.encoding.get('fps', 30),
)
pipeline.run(output=output, duration=duration)
def run_pipeline_piped(recipe_path: str, duration: float = None, fps: float = None):
"""
Run pipeline and pipe directly to mpv with audio.
"""
import subprocess
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.compiler import compile_string
recipe_path = Path(recipe_path)
recipe_text = recipe_path.read_text()
compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent)
pipeline = StreamingPipeline(
compiled,
recipe_dir=recipe_path.parent,
fps=fps or compiled.encoding.get('fps', 30),
)
# Get frame info
first_source = next(iter(pipeline.sources.values()), None)
if first_source:
w, h = first_source._size
else:
w, h = 720, 720
# Determine duration
if duration is None:
if pipeline.audio_analyzer:
duration = pipeline.audio_analyzer.duration
else:
duration = 60.0
actual_fps = fps or compiled.encoding.get('fps', 30)
n_frames = int(duration * actual_fps)
frame_time = 1.0 / actual_fps
print(f"Streaming {n_frames} frames @ {actual_fps}fps to mpv", file=sys.stderr)
# Start mpv
mpv_cmd = [
"mpv", "--no-cache",
"--demuxer=rawvideo",
f"--demuxer-rawvideo-w={w}",
f"--demuxer-rawvideo-h={h}",
"--demuxer-rawvideo-mp-format=rgb24",
f"--demuxer-rawvideo-fps={actual_fps}",
"--title=Streaming Pipeline",
"-"
]
mpv = subprocess.Popen(mpv_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL)
# Start audio if available
audio_proc = None
if pipeline.audio_source_path:
audio_cmd = ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet",
pipeline.audio_source_path]
audio_proc = subprocess.Popen(audio_cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
try:
import cv2
for frame_num in range(n_frames):
if mpv.poll() is not None:
break # mpv closed
t = frame_num * frame_time
frame = pipeline.render_frame(t)
if frame is not None:
# Ensure consistent frame size
if frame.shape[1] != w or frame.shape[0] != h:
frame = cv2.resize(frame, (w, h))
if not frame.flags['C_CONTIGUOUS']:
frame = np.ascontiguousarray(frame)
try:
mpv.stdin.write(frame.tobytes())
mpv.stdin.flush()
except BrokenPipeError:
break
except KeyboardInterrupt:
pass
finally:
if mpv.stdin:
mpv.stdin.close()
mpv.terminate()
if audio_proc:
audio_proc.terminate()
for src in pipeline.sources.values():
src.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run sexp recipe through streaming pipeline")
parser.add_argument("recipe", help="Path to .sexp recipe file")
parser.add_argument("-o", "--output", default="pipe",
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
parser.add_argument("-d", "--duration", type=float, default=None,
help="Duration in seconds (default: audio duration)")
parser.add_argument("--fps", type=float, default=None,
help="Frame rate (default: from recipe)")
args = parser.parse_args()
if args.output == "pipe":
run_pipeline_piped(args.recipe, duration=args.duration, fps=args.fps)
else:
run_pipeline(args.recipe, output=args.output, duration=args.duration, fps=args.fps)

470
streaming/recipe_adapter.py Normal file
View File

@@ -0,0 +1,470 @@
"""
Adapter to run sexp recipes through the streaming compositor.
Bridges the gap between:
- Existing recipe format (sexp files with stages, effects, analysis)
- Streaming compositor (sources, effect chains, compositor config)
"""
import sys
from pathlib import Path
from typing import Dict, List, Any, Optional
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from .compositor import StreamingCompositor
from .sources import VideoSource
from .audio import FileAudioAnalyzer
class RecipeAdapter:
"""
Adapts a compiled sexp recipe to run through the streaming compositor.
Example:
adapter = RecipeAdapter("effects/quick_test.sexp")
adapter.run(output="preview", duration=60)
"""
def __init__(
self,
recipe_path: str,
params: Dict[str, Any] = None,
backend: str = "numpy",
):
"""
Load and prepare a recipe for streaming.
Args:
recipe_path: Path to .sexp recipe file
params: Parameter overrides
backend: "numpy" or "glsl"
"""
self.recipe_path = Path(recipe_path)
self.recipe_dir = self.recipe_path.parent
self.params = params or {}
self.backend = backend
# Compile recipe
self._compile()
def _compile(self):
"""Compile the recipe and extract structure."""
from artdag.sexp.compiler import compile_string
recipe_text = self.recipe_path.read_text()
self.compiled = compile_string(recipe_text, self.params, recipe_dir=self.recipe_dir)
# Extract key info
self.sources = {} # name -> path
self.effects_registry = {} # effect_name -> path
self.analyzers = {} # name -> analyzer info
# Walk nodes to find sources and structure
# nodes is a list in CompiledRecipe
for node in self.compiled.nodes:
node_type = node.get("type", "")
if node_type == "SOURCE":
config = node.get("config", {})
path = config.get("path")
if path:
self.sources[node["id"]] = self.recipe_dir / path
elif node_type == "ANALYZE":
config = node.get("config", {})
self.analyzers[node["id"]] = {
"analyzer": config.get("analyzer"),
"path": config.get("analyzer_path"),
}
# Get effects registry from compiled recipe
# registry has 'effects' sub-dict
effects_dict = self.compiled.registry.get("effects", {})
for name, info in effects_dict.items():
if info.get("path"):
self.effects_registry[name] = Path(info["path"])
def run_analysis(self) -> Dict[str, Any]:
"""
Run analysis phase (energy, beats, etc.).
Returns:
Dict of analysis track name -> {times, values, duration}
"""
print(f"Running analysis...", file=sys.stderr)
# Use existing planner's analysis execution
from artdag.sexp.planner import create_plan
analysis_data = {}
def on_analysis(node_id: str, results: dict):
analysis_data[node_id] = results
print(f" {node_id[:16]}...: {len(results.get('times', []))} samples", file=sys.stderr)
# Create plan (runs analysis as side effect)
plan = create_plan(
self.compiled,
inputs={},
recipe_dir=self.recipe_dir,
on_analysis=on_analysis,
)
# Also store named analysis tracks
for name, data in plan.analysis.items():
analysis_data[name] = data
return analysis_data
def build_compositor(
self,
analysis_data: Dict[str, Any] = None,
fps: float = None,
) -> StreamingCompositor:
"""
Build a streaming compositor from the recipe.
This is a simplified version that handles common patterns.
Complex recipes may need manual configuration.
Args:
analysis_data: Pre-computed analysis data
Returns:
Configured StreamingCompositor
"""
# Extract video and audio sources in SLICE_ON input order
video_sources = []
audio_source = None
# Find audio source first
for node_id, path in self.sources.items():
suffix = path.suffix.lower()
if suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'):
audio_source = str(path)
break
# Find SLICE_ON node to get correct video order
slice_on_inputs = None
for node in self.compiled.nodes:
if node.get('type') == 'SLICE_ON':
# Use 'videos' config key which has the correct order
config = node.get('config', {})
slice_on_inputs = config.get('videos', [])
break
if slice_on_inputs:
# Trace each SLICE_ON input back to its SOURCE
node_lookup = {n['id']: n for n in self.compiled.nodes}
def trace_to_source(node_id, visited=None):
"""Trace a node back to its SOURCE, return source path."""
if visited is None:
visited = set()
if node_id in visited:
return None
visited.add(node_id)
node = node_lookup.get(node_id)
if not node:
return None
if node.get('type') == 'SOURCE':
return self.sources.get(node_id)
# Recurse through inputs
for inp in node.get('inputs', []):
result = trace_to_source(inp, visited)
if result:
return result
return None
# Build video_sources in SLICE_ON input order
for inp_id in slice_on_inputs:
source_path = trace_to_source(inp_id)
if source_path:
suffix = source_path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
video_sources.append(str(source_path))
# Fallback to definition order if no SLICE_ON
if not video_sources:
for node_id, path in self.sources.items():
suffix = path.suffix.lower()
if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
video_sources.append(str(path))
if not video_sources:
raise ValueError("No video sources found in recipe")
# Build effect chains - use live audio bindings (matching video_sources count)
effects_per_source = self._build_streaming_effects(n_sources=len(video_sources))
# Build compositor config from recipe
compositor_config = self._extract_compositor_config(analysis_data)
return StreamingCompositor(
sources=video_sources,
effects_per_source=effects_per_source,
compositor_config=compositor_config,
analysis_data=analysis_data or {},
backend=self.backend,
recipe_dir=self.recipe_dir,
fps=fps or self.compiled.encoding.get("fps", 30),
audio_source=audio_source,
)
def _build_streaming_effects(self, n_sources: int = None) -> List[List[Dict]]:
"""
Build effect chains for streaming with live audio bindings.
Replicates the recipe's effect pipeline:
- Per source: rotate, zoom, invert, hue_shift, ascii_art
- All driven by live_energy and live_beat
"""
if n_sources is None:
n_sources = len([p for p in self.sources.values()
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
effects_per_source = []
for i in range(n_sources):
# Alternate rotation direction per source
rot_dir = 1 if i % 2 == 0 else -1
effects = [
# Rotate - energy drives angle
{
"effect": "rotate",
"effect_path": str(self.effects_registry.get("rotate", "")),
"angle": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [0, 45 * rot_dir],
},
},
# Zoom - energy drives amount
{
"effect": "zoom",
"effect_path": str(self.effects_registry.get("zoom", "")),
"amount": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [1.0, 1.5] if i % 2 == 0 else [1.0, 0.7],
},
},
# Invert - beat triggers
{
"effect": "invert",
"effect_path": str(self.effects_registry.get("invert", "")),
"amount": {
"_binding": True,
"source": "live_beat",
"feature": "values",
"range": [0, 1],
},
},
# Hue shift - energy drives hue
{
"effect": "hue_shift",
"effect_path": str(self.effects_registry.get("hue_shift", "")),
"degrees": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [0, 180],
},
},
# ASCII art - energy drives char size, beat triggers mix
{
"effect": "ascii_art",
"effect_path": str(self.effects_registry.get("ascii_art", "")),
"char_size": {
"_binding": True,
"source": "live_energy",
"feature": "values",
"range": [4, 32],
},
"mix": {
"_binding": True,
"source": "live_beat",
"feature": "values",
"range": [0, 1],
},
},
]
effects_per_source.append(effects)
return effects_per_source
def _extract_effects(self) -> List[List[Dict]]:
"""Extract effect chains for each source (legacy, pre-computed analysis)."""
# Simplified: find EFFECT nodes and their configs
effects_per_source = []
for node_id, path in self.sources.items():
if path.suffix.lower() not in ('.mp4', '.webm', '.mov', '.avi', '.mkv'):
continue
# Find effects that depend on this source
# This is simplified - real implementation would trace the DAG
effects = []
for node in self.compiled.nodes:
if node.get("type") == "EFFECT":
config = node.get("config", {})
effect_name = config.get("effect")
if effect_name and effect_name in self.effects_registry:
effect_config = {
"effect": effect_name,
"effect_path": str(self.effects_registry[effect_name]),
}
# Copy only effect params (filter out internal fields)
internal_fields = (
"effect", "effect_path", "cid", "effect_cid",
"effects_registry", "analysis_refs", "inputs",
)
for k, v in config.items():
if k not in internal_fields:
effect_config[k] = v
effects.append(effect_config)
break # One effect per source for now
effects_per_source.append(effects)
return effects_per_source
def _extract_compositor_config(self, analysis_data: Dict) -> Dict:
"""Extract compositor configuration."""
# Look for blend_multi or similar composition nodes
for node in self.compiled.nodes:
if node.get("type") == "EFFECT":
config = node.get("config", {})
if config.get("effect") == "blend_multi":
return {
"mode": config.get("mode", "alpha"),
"weights": config.get("weights", []),
}
# Default: equal blend
n_sources = len([p for p in self.sources.values()
if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')])
return {
"mode": "alpha",
"weights": [1.0 / n_sources] * n_sources if n_sources > 0 else [1.0],
}
def run(
self,
output: str = "preview",
duration: float = None,
fps: float = None,
):
"""
Run the recipe through streaming compositor.
Everything streams: video frames read on-demand, audio analyzed in real-time.
No pre-computation.
Args:
output: "preview", filename, or Output object
duration: Duration in seconds (default: audio duration)
fps: Frame rate (default from recipe, or 30)
"""
# Build compositor with recipe executor for full pipeline
from .recipe_executor import StreamingRecipeExecutor
compositor = self.build_compositor(analysis_data={}, fps=fps)
# Use audio duration if not specified
if duration is None:
if compositor._audio_analyzer:
duration = compositor._audio_analyzer.duration
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
else:
# Live mode - run until quit
print("Live mode - press 'q' to quit", file=sys.stderr)
# Create sexp executor that interprets the recipe
from .sexp_executor import SexpStreamingExecutor
executor = SexpStreamingExecutor(self.compiled, seed=42)
compositor.run(output=output, duration=duration, recipe_executor=executor)
def run_recipe(
recipe_path: str,
output: str = "preview",
duration: float = None,
params: Dict = None,
fps: float = None,
):
"""
Run a recipe through streaming compositor.
Everything streams in real-time: video frames, audio analysis.
No pre-computation - starts immediately.
Example:
run_recipe("effects/quick_test.sexp", output="preview", duration=30)
run_recipe("effects/quick_test.sexp", output="preview", fps=5) # Lower fps for slow systems
"""
adapter = RecipeAdapter(recipe_path, params=params)
adapter.run(output=output, duration=duration, fps=fps)
def run_recipe_piped(
recipe_path: str,
duration: float = None,
params: Dict = None,
fps: float = None,
):
"""
Run recipe and pipe directly to mpv.
"""
from .output import PipeOutput
adapter = RecipeAdapter(recipe_path, params=params)
compositor = adapter.build_compositor(analysis_data={}, fps=fps)
# Get frame size
if compositor.sources:
first_source = compositor.sources[0]
w, h = first_source._size
else:
w, h = 720, 720
actual_fps = fps or adapter.compiled.encoding.get('fps', 30)
# Create pipe output
pipe_out = PipeOutput(
size=(w, h),
fps=actual_fps,
audio_source=compositor._audio_source
)
# Create executor
from .sexp_executor import SexpStreamingExecutor
executor = SexpStreamingExecutor(adapter.compiled, seed=42)
# Run with pipe output
compositor.run(output=pipe_out, duration=duration, recipe_executor=executor)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor")
parser.add_argument("recipe", help="Path to .sexp recipe file")
parser.add_argument("-o", "--output", default="pipe",
help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)")
parser.add_argument("-d", "--duration", type=float, default=None,
help="Duration in seconds (default: audio duration)")
parser.add_argument("--fps", type=float, default=None,
help="Frame rate (default: from recipe)")
args = parser.parse_args()
if args.output == "pipe":
run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps)
else:
run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps)

View File

@@ -0,0 +1,415 @@
"""
Streaming recipe executor.
Implements the full recipe logic for real-time streaming:
- Scans (state machines that evolve on beats)
- Process-pair template (two clips with sporadic effects, blended)
- Cycle-crossfade (dynamic composition cycling through video pairs)
"""
import random
import numpy as np
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
@dataclass
class ScanState:
"""State for a scan (beat-driven state machine)."""
value: Any = 0
rng: random.Random = field(default_factory=random.Random)
class StreamingScans:
"""
Real-time scan executor.
Scans are state machines that evolve on each beat.
They drive effect parameters like invert triggers, hue shifts, etc.
"""
def __init__(self, seed: int = 42, n_sources: int = 4):
self.master_seed = seed
self.n_sources = n_sources
self.scans: Dict[str, ScanState] = {}
self.beat_count = 0
self.current_time = 0.0
self.last_beat_time = 0.0
self._init_scans()
def _init_scans(self):
"""Initialize all scans with their own RNG seeds."""
scan_names = []
# Per-pair scans (dynamic based on n_sources)
for i in range(self.n_sources):
scan_names.extend([
f"inv_a_{i}", f"inv_b_{i}", f"hue_a_{i}", f"hue_b_{i}",
f"ascii_a_{i}", f"ascii_b_{i}", f"pair_mix_{i}", f"pair_rot_{i}",
])
# Global scans
scan_names.extend(["whole_spin", "ripple_gate", "cycle"])
for i, name in enumerate(scan_names):
rng = random.Random(self.master_seed + i)
self.scans[name] = ScanState(value=self._init_value(name), rng=rng)
def _init_value(self, name: str) -> Any:
"""Get initial value for a scan."""
if name.startswith("inv_") or name.startswith("ascii_"):
return 0 # Counter for remaining beats
elif name.startswith("hue_"):
return {"rem": 0, "hue": 0}
elif name.startswith("pair_mix"):
return {"rem": 0, "opacity": 0.5}
elif name.startswith("pair_rot"):
pair_idx = int(name.split("_")[-1])
rot_dir = 1 if pair_idx % 2 == 0 else -1
return {"beat": 0, "clen": 25, "dir": rot_dir, "angle": 0}
elif name == "whole_spin":
return {
"phase": 0, # 0 = waiting, 1 = spinning
"beat": 0, # beats into current phase
"plen": 20, # beats in this phase
"dir": 1, # spin direction
"total_angle": 0.0, # cumulative angle after all spins
"spin_start_angle": 0.0, # angle when current spin started
"spin_start_time": 0.0, # time when current spin started
"spin_end_time": 0.0, # estimated time when spin ends
}
elif name == "ripple_gate":
return {"rem": 0, "cx": 0.5, "cy": 0.5}
elif name == "cycle":
return {"cycle": 0, "beat": 0, "clen": 60}
return 0
def on_beat(self):
"""Update all scans on a beat."""
self.beat_count += 1
# Estimate beat interval from last two beats
beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5
self.last_beat_time = self.current_time
for name, state in self.scans.items():
state.value = self._step_scan(name, state.value, state.rng, beat_interval)
def _step_scan(self, name: str, value: Any, rng: random.Random, beat_interval: float = 0.5) -> Any:
"""Step a scan forward by one beat."""
# Invert scan: 10% chance, lasts 1-5 beats
if name.startswith("inv_"):
if value > 0:
return value - 1
elif rng.random() < 0.1:
return rng.randint(1, 5)
return 0
# Hue scan: 10% chance, random hue 30-330, lasts 1-5 beats
elif name.startswith("hue_"):
if value["rem"] > 0:
return {"rem": value["rem"] - 1, "hue": value["hue"]}
elif rng.random() < 0.1:
return {"rem": rng.randint(1, 5), "hue": rng.uniform(30, 330)}
return {"rem": 0, "hue": 0}
# ASCII scan: 5% chance, lasts 1-3 beats
elif name.startswith("ascii_"):
if value > 0:
return value - 1
elif rng.random() < 0.05:
return rng.randint(1, 3)
return 0
# Pair mix: changes every 1-11 beats
elif name.startswith("pair_mix"):
if value["rem"] > 0:
return {"rem": value["rem"] - 1, "opacity": value["opacity"]}
return {"rem": rng.randint(1, 11), "opacity": rng.choice([0, 0.5, 1.0])}
# Pair rotation: full rotation every 20-30 beats
elif name.startswith("pair_rot"):
beat = value["beat"]
clen = value["clen"]
dir_ = value["dir"]
angle = value["angle"]
if beat + 1 < clen:
new_angle = angle + dir_ * (360 / clen)
return {"beat": beat + 1, "clen": clen, "dir": dir_, "angle": new_angle}
else:
return {"beat": 0, "clen": rng.randint(20, 30), "dir": -dir_, "angle": angle}
# Whole spin: sporadic 720 degree spins (cumulative - stays rotated)
elif name == "whole_spin":
phase = value["phase"]
beat = value["beat"]
plen = value["plen"]
dir_ = value["dir"]
total_angle = value.get("total_angle", 0.0)
spin_start_angle = value.get("spin_start_angle", 0.0)
spin_start_time = value.get("spin_start_time", 0.0)
spin_end_time = value.get("spin_end_time", 0.0)
if phase == 1:
# Currently spinning
if beat + 1 < plen:
return {
"phase": 1, "beat": beat + 1, "plen": plen, "dir": dir_,
"total_angle": total_angle,
"spin_start_angle": spin_start_angle,
"spin_start_time": spin_start_time,
"spin_end_time": spin_end_time,
}
else:
# Spin complete - update total_angle with final spin
new_total = spin_start_angle + dir_ * 720.0
return {
"phase": 0, "beat": 0, "plen": rng.randint(20, 40), "dir": dir_,
"total_angle": new_total,
"spin_start_angle": new_total,
"spin_start_time": self.current_time,
"spin_end_time": self.current_time,
}
else:
# Waiting phase
if beat + 1 < plen:
return {
"phase": 0, "beat": beat + 1, "plen": plen, "dir": dir_,
"total_angle": total_angle,
"spin_start_angle": spin_start_angle,
"spin_start_time": spin_start_time,
"spin_end_time": spin_end_time,
}
else:
# Start new spin
new_dir = 1 if rng.random() < 0.5 else -1
new_plen = rng.randint(10, 25)
spin_duration = new_plen * beat_interval
return {
"phase": 1, "beat": 0, "plen": new_plen, "dir": new_dir,
"total_angle": total_angle,
"spin_start_angle": total_angle,
"spin_start_time": self.current_time,
"spin_end_time": self.current_time + spin_duration,
}
# Ripple gate: 5% chance, lasts 1-20 beats
elif name == "ripple_gate":
if value["rem"] > 0:
return {"rem": value["rem"] - 1, "cx": value["cx"], "cy": value["cy"]}
elif rng.random() < 0.05:
return {"rem": rng.randint(1, 20),
"cx": rng.uniform(0.1, 0.9),
"cy": rng.uniform(0.1, 0.9)}
return {"rem": 0, "cx": 0.5, "cy": 0.5}
# Cycle: track which video pair is active
elif name == "cycle":
beat = value["beat"]
clen = value["clen"]
cycle = value["cycle"]
if beat + 1 < clen:
return {"cycle": cycle, "beat": beat + 1, "clen": clen}
else:
# Move to next pair, vary cycle length
return {"cycle": (cycle + 1) % 4, "beat": 0,
"clen": 40 + (self.beat_count * 7) % 41}
return value
def get_emit(self, name: str) -> float:
"""Get emitted value for a scan."""
value = self.scans[name].value
if name.startswith("inv_") or name.startswith("ascii_"):
return 1.0 if value > 0 else 0.0
elif name.startswith("hue_"):
return value["hue"] if value["rem"] > 0 else 0.0
elif name.startswith("pair_mix"):
return value["opacity"]
elif name.startswith("pair_rot"):
return value["angle"]
elif name == "whole_spin":
# Smooth time-based interpolation during spin
phase = value.get("phase", 0)
if phase == 1:
# Currently spinning - interpolate based on time
spin_start_time = value.get("spin_start_time", 0.0)
spin_end_time = value.get("spin_end_time", spin_start_time + 1.0)
spin_start_angle = value.get("spin_start_angle", 0.0)
dir_ = value.get("dir", 1)
duration = spin_end_time - spin_start_time
if duration > 0:
progress = (self.current_time - spin_start_time) / duration
progress = max(0.0, min(1.0, progress)) # clamp to 0-1
else:
progress = 1.0
return spin_start_angle + progress * 720.0 * dir_
else:
# Not spinning - return cumulative angle
return value.get("total_angle", 0.0)
elif name == "ripple_gate":
return 1.0 if value["rem"] > 0 else 0.0
elif name == "cycle":
return value
return 0.0
class StreamingRecipeExecutor:
"""
Executes a recipe in streaming mode.
Implements:
- process-pair: two video clips with opposite effects, blended
- cycle-crossfade: dynamic cycling through video pairs
- Final effects: whole-spin rotation, ripple
"""
def __init__(self, n_sources: int = 4, seed: int = 42):
self.n_sources = n_sources
self.scans = StreamingScans(seed, n_sources=n_sources)
self.last_beat_detected = False
self.current_time = 0.0
def on_frame(self, energy: float, is_beat: bool, t: float = 0.0):
"""Called each frame with current audio analysis."""
self.current_time = t
self.scans.current_time = t
# Update scans on beat
if is_beat and not self.last_beat_detected:
self.scans.on_beat()
self.last_beat_detected = is_beat
def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict:
"""
Get effect parameters for a source clip.
Args:
source_idx: Which video source (0-3)
clip: "a" or "b" (each source has two clips)
energy: Current audio energy (0-1)
"""
suffix = f"_{source_idx}"
# Rotation ranges alternate
if source_idx % 2 == 0:
rot_range = [0, 45] if clip == "a" else [0, -45]
zoom_range = [1, 1.5] if clip == "a" else [1, 0.5]
else:
rot_range = [0, -45] if clip == "a" else [0, 45]
zoom_range = [1, 0.5] if clip == "a" else [1, 1.5]
return {
"rotate_angle": rot_range[0] + energy * (rot_range[1] - rot_range[0]),
"zoom_amount": zoom_range[0] + energy * (zoom_range[1] - zoom_range[0]),
"invert_amount": self.scans.get_emit(f"inv_{clip}{suffix}"),
"hue_degrees": self.scans.get_emit(f"hue_{clip}{suffix}"),
"ascii_mix": 0, # Disabled - too slow without GPU
"ascii_char_size": 4 + energy * 28, # 4-32
}
def get_pair_params(self, source_idx: int) -> Dict:
"""Get blend and rotation params for a video pair."""
suffix = f"_{source_idx}"
return {
"blend_opacity": self.scans.get_emit(f"pair_mix{suffix}"),
"pair_rotation": self.scans.get_emit(f"pair_rot{suffix}"),
}
def get_cycle_weights(self) -> List[float]:
"""Get blend weights for cycle-crossfade composition."""
cycle_state = self.scans.get_emit("cycle")
active = cycle_state["cycle"]
beat = cycle_state["beat"]
clen = cycle_state["clen"]
n = self.n_sources
phase3 = beat * 3
weights = []
for p in range(n):
prev = (p + n - 1) % n
if active == p:
if phase3 < clen:
w = 0.9
elif phase3 < clen * 2:
w = 0.9 - ((phase3 - clen) / clen) * 0.85
else:
w = 0.05
elif active == prev:
if phase3 < clen:
w = 0.05
elif phase3 < clen * 2:
w = 0.05 + ((phase3 - clen) / clen) * 0.85
else:
w = 0.9
else:
w = 0.05
weights.append(w)
# Normalize
total = sum(weights)
if total > 0:
weights = [w / total for w in weights]
return weights
def get_cycle_zooms(self) -> List[float]:
"""Get zoom amounts for cycle-crossfade."""
cycle_state = self.scans.get_emit("cycle")
active = cycle_state["cycle"]
beat = cycle_state["beat"]
clen = cycle_state["clen"]
n = self.n_sources
phase3 = beat * 3
zooms = []
for p in range(n):
prev = (p + n - 1) % n
if active == p:
if phase3 < clen:
z = 1.0
elif phase3 < clen * 2:
z = 1.0 + ((phase3 - clen) / clen) * 1.0
else:
z = 0.1
elif active == prev:
if phase3 < clen:
z = 3.0 # Start big
elif phase3 < clen * 2:
z = 3.0 - ((phase3 - clen) / clen) * 2.0 # Shrink to 1.0
else:
z = 1.0
else:
z = 0.1
zooms.append(z)
return zooms
def get_final_effects(self, energy: float) -> Dict:
"""Get final composition effects (whole-spin, ripple)."""
ripple_gate = self.scans.get_emit("ripple_gate")
ripple_state = self.scans.scans["ripple_gate"].value
return {
"whole_spin_angle": self.scans.get_emit("whole_spin"),
"ripple_amplitude": ripple_gate * (5 + energy * 45), # 5-50
"ripple_cx": ripple_state["cx"],
"ripple_cy": ripple_state["cy"],
}

678
streaming/sexp_executor.py Normal file
View File

@@ -0,0 +1,678 @@
"""
Streaming S-expression executor.
Executes compiled sexp recipes in real-time by:
- Evaluating scan expressions on each beat
- Resolving bindings to get effect parameter values
- Applying effects frame-by-frame
- Evaluating SLICE_ON Lambda for cycle crossfade
"""
import random
import numpy as np
from typing import Dict, List, Any, Optional
from dataclasses import dataclass, field
from .sexp_interp import SexpInterpreter, eval_slice_on_lambda
@dataclass
class ScanState:
"""Runtime state for a scan."""
node_id: str
name: Optional[str]
value: Any
rng: random.Random
init_expr: dict
step_expr: dict
emit_expr: dict
class ExprEvaluator:
"""
Evaluates compiled expression ASTs.
Expressions are dicts with:
- _expr: True (marks as expression)
- op: operation name
- args: list of arguments
- name: for 'var' ops
- keys: for 'dict' ops
"""
def __init__(self, rng: random.Random = None):
self.rng = rng or random.Random()
def eval(self, expr: Any, env: Dict[str, Any]) -> Any:
"""Evaluate an expression in the given environment."""
# Literal values
if not isinstance(expr, dict):
return expr
# Check if it's an expression
if not expr.get('_expr'):
# It's a plain dict - return as-is
return expr
op = expr.get('op')
args = expr.get('args', [])
# Evaluate based on operation
if op == 'var':
name = expr.get('name')
if name in env:
return env[name]
raise KeyError(f"Unknown variable: {name}")
elif op == 'dict':
keys = expr.get('keys', [])
values = [self.eval(a, env) for a in args]
return dict(zip(keys, values))
elif op == 'get':
obj = self.eval(args[0], env)
key = args[1]
return obj.get(key) if isinstance(obj, dict) else obj[key]
elif op == 'if':
cond = self.eval(args[0], env)
if cond:
return self.eval(args[1], env)
elif len(args) > 2:
return self.eval(args[2], env)
return None
# Comparison ops
elif op == '<':
return self.eval(args[0], env) < self.eval(args[1], env)
elif op == '>':
return self.eval(args[0], env) > self.eval(args[1], env)
elif op == '<=':
return self.eval(args[0], env) <= self.eval(args[1], env)
elif op == '>=':
return self.eval(args[0], env) >= self.eval(args[1], env)
elif op == '=':
return self.eval(args[0], env) == self.eval(args[1], env)
elif op == '!=':
return self.eval(args[0], env) != self.eval(args[1], env)
# Arithmetic ops
elif op == '+':
return self.eval(args[0], env) + self.eval(args[1], env)
elif op == '-':
return self.eval(args[0], env) - self.eval(args[1], env)
elif op == '*':
return self.eval(args[0], env) * self.eval(args[1], env)
elif op == '/':
return self.eval(args[0], env) / self.eval(args[1], env)
elif op == 'mod':
return self.eval(args[0], env) % self.eval(args[1], env)
# Random ops
elif op == 'rand':
return self.rng.random()
elif op == 'rand-int':
lo = self.eval(args[0], env)
hi = self.eval(args[1], env)
return self.rng.randint(lo, hi)
elif op == 'rand-range':
lo = self.eval(args[0], env)
hi = self.eval(args[1], env)
return self.rng.uniform(lo, hi)
# Logic ops
elif op == 'and':
return all(self.eval(a, env) for a in args)
elif op == 'or':
return any(self.eval(a, env) for a in args)
elif op == 'not':
return not self.eval(args[0], env)
else:
raise ValueError(f"Unknown operation: {op}")
class SexpStreamingExecutor:
"""
Executes a compiled sexp recipe in streaming mode.
Reads scan definitions, effect chains, and bindings from the
compiled recipe and executes them frame-by-frame.
"""
def __init__(self, compiled_recipe, seed: int = 42):
self.recipe = compiled_recipe
self.master_seed = seed
# Build node lookup
self.nodes = {n['id']: n for n in compiled_recipe.nodes}
# State (must be initialized before _init_scans)
self.beat_count = 0
self.current_time = 0.0
self.last_beat_time = 0.0
self.last_beat_detected = False
self.energy = 0.0
# Initialize scans
self.scans: Dict[str, ScanState] = {}
self.scan_outputs: Dict[str, Any] = {} # Current emit values by node_id
self._init_scans()
# Initialize SLICE_ON interpreter
self.sexp_interp = SexpInterpreter(random.Random(seed))
self._slice_on_lambda = None
self._slice_on_acc = None
self._slice_on_result = None # Last evaluation result {layers, compose, acc}
self._init_slice_on()
def _init_slice_on(self):
"""Initialize SLICE_ON Lambda for cycle crossfade."""
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
config = node.get('config', {})
self._slice_on_lambda = config.get('fn')
init = config.get('init', {})
self._slice_on_acc = {
'cycle': init.get('cycle', 0),
'beat': init.get('beat', 0),
'clen': init.get('clen', 60),
}
# Evaluate initial state
self._eval_slice_on()
break
def _eval_slice_on(self):
"""Evaluate the SLICE_ON Lambda with current state."""
if not self._slice_on_lambda:
return
n = len(self._get_video_sources())
videos = list(range(n)) # Placeholder video indices
try:
result = eval_slice_on_lambda(
self._slice_on_lambda,
self._slice_on_acc,
self.beat_count,
0.0, # start time (not used for weights)
1.0, # end time (not used for weights)
videos,
self.sexp_interp,
)
self._slice_on_result = result
# Update accumulator for next beat
if 'acc' in result:
self._slice_on_acc = result['acc']
except Exception as e:
import sys
print(f"SLICE_ON eval error: {e}", file=sys.stderr)
def _init_scans(self):
"""Initialize all scan nodes from the recipe."""
seed_offset = 0
for node in self.recipe.nodes:
if node.get('type') == 'SCAN':
node_id = node['id']
config = node.get('config', {})
# Create RNG with unique seed
scan_seed = config.get('seed', self.master_seed + seed_offset)
rng = random.Random(scan_seed)
seed_offset += 1
# Evaluate initial value
init_expr = config.get('init', 0)
evaluator = ExprEvaluator(rng)
init_value = evaluator.eval(init_expr, {})
self.scans[node_id] = ScanState(
node_id=node_id,
name=node.get('name'),
value=init_value,
rng=rng,
init_expr=init_expr,
step_expr=config.get('step_expr', {}),
emit_expr=config.get('emit_expr', {}),
)
# Compute initial emit
self._update_emit(node_id)
def _update_emit(self, node_id: str):
"""Update the emit value for a scan."""
scan = self.scans[node_id]
evaluator = ExprEvaluator(scan.rng)
# Build environment from current state
env = self._build_scan_env(scan)
# Evaluate emit expression
emit_value = evaluator.eval(scan.emit_expr, env)
self.scan_outputs[node_id] = emit_value
def _build_scan_env(self, scan: ScanState) -> Dict[str, Any]:
"""Build environment for scan expression evaluation."""
env = {}
# Add state variables
if isinstance(scan.value, dict):
env.update(scan.value)
else:
env['acc'] = scan.value
# Add beat count
env['beat_count'] = self.beat_count
env['time'] = self.current_time
return env
def on_beat(self):
"""Update all scans on a beat."""
self.beat_count += 1
# Estimate beat interval
beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5
self.last_beat_time = self.current_time
# Step each scan
for node_id, scan in self.scans.items():
evaluator = ExprEvaluator(scan.rng)
env = self._build_scan_env(scan)
# Evaluate step expression
new_value = evaluator.eval(scan.step_expr, env)
scan.value = new_value
# Update emit
self._update_emit(node_id)
# Step the cycle state
self._step_cycle()
def on_frame(self, energy: float, is_beat: bool, t: float = 0.0):
"""Called each frame with audio analysis."""
self.current_time = t
self.energy = energy
# Update scans on beat (edge detection)
if is_beat and not self.last_beat_detected:
self.on_beat()
self.last_beat_detected = is_beat
def resolve_binding(self, binding: dict) -> Any:
"""Resolve a binding to get the current value."""
if not isinstance(binding, dict) or not binding.get('_binding'):
return binding
source_id = binding.get('source')
feature = binding.get('feature', 'values')
range_map = binding.get('range')
# Get the raw value
if source_id in self.scan_outputs:
value = self.scan_outputs[source_id]
else:
# Might be an analyzer reference - use energy as fallback
value = self.energy
# Extract feature if value is a dict
if isinstance(value, dict) and feature in value:
value = value[feature]
# Apply range mapping
if range_map and isinstance(value, (int, float)):
lo, hi = range_map
value = lo + value * (hi - lo)
return value
def get_effect_params(self, effect_node: dict) -> Dict[str, Any]:
"""Get resolved parameters for an effect node."""
config = effect_node.get('config', {})
params = {}
for key, value in config.items():
# Skip internal fields
if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', 'analysis_refs'):
continue
# Resolve bindings
params[key] = self.resolve_binding(value)
return params
def get_scan_value(self, name: str) -> Any:
"""Get scan output by name."""
for node_id, scan in self.scans.items():
if scan.name == name:
return self.scan_outputs.get(node_id)
return None
def get_all_scan_values(self) -> Dict[str, Any]:
"""Get all named scan outputs."""
result = {}
for node_id, scan in self.scans.items():
if scan.name:
result[scan.name] = self.scan_outputs.get(node_id)
return result
# === Compositor interface methods ===
def _get_video_sources(self) -> List[str]:
"""Get list of video source node IDs."""
sources = []
for node in self.recipe.nodes:
if node.get('type') == 'SOURCE':
sources.append(node['id'])
# Filter to video only (exclude audio - last one is usually audio)
# Look at file extensions in the paths
return sources[:-1] if len(sources) > 1 else sources
def _trace_effect_chain(self, start_id: str, stop_at_blend: bool = True) -> List[dict]:
"""Trace effect chain from a node, returning effects in order."""
chain = []
current_id = start_id
for _ in range(20): # Max depth
# Find node that uses current as input
next_node = None
for node in self.recipe.nodes:
if current_id in node.get('inputs', []):
if node.get('type') == 'EFFECT':
effect_type = node.get('config', {}).get('effect')
chain.append(node)
if stop_at_blend and effect_type == 'blend':
return chain
next_node = node
break
elif node.get('type') == 'SEGMENT':
next_node = node
break
if next_node is None:
break
current_id = next_node['id']
return chain
def _find_clip_chains(self, source_idx: int) -> tuple:
"""Find effect chains for clip A and B from a source."""
sources = self._get_video_sources()
if source_idx >= len(sources):
return [], []
source_id = sources[source_idx]
# Find SEGMENT node
segment_id = None
for node in self.recipe.nodes:
if node.get('type') == 'SEGMENT' and source_id in node.get('inputs', []):
segment_id = node['id']
break
if not segment_id:
return [], []
# Find the two effect chains from segment (clip A and clip B)
chains = []
for node in self.recipe.nodes:
if segment_id in node.get('inputs', []) and node.get('type') == 'EFFECT':
chain = self._trace_effect_chain(segment_id)
# Get chain starting from this specific branch
branch_chain = [node]
current = node['id']
for _ in range(10):
found = False
for n in self.recipe.nodes:
if current in n.get('inputs', []) and n.get('type') == 'EFFECT':
branch_chain.append(n)
if n.get('config', {}).get('effect') == 'blend':
break
current = n['id']
found = True
break
if not found:
break
chains.append(branch_chain)
# Return first two chains as A and B
chain_a = chains[0] if len(chains) > 0 else []
chain_b = chains[1] if len(chains) > 1 else []
return chain_a, chain_b
def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict:
"""Get effect parameters for a source clip (compositor interface)."""
# Get the correct chain for this clip
chain_a, chain_b = self._find_clip_chains(source_idx)
chain = chain_a if clip == 'a' else chain_b
# Default params
params = {
"rotate_angle": 0,
"zoom_amount": 1.0,
"invert_amount": 0,
"hue_degrees": 0,
"ascii_mix": 0,
"ascii_char_size": 8,
}
# Resolve from effects in chain
for eff in chain:
config = eff.get('config', {})
effect_type = config.get('effect')
if effect_type == 'rotate':
angle_binding = config.get('angle')
if angle_binding:
if isinstance(angle_binding, dict) and angle_binding.get('_binding'):
# Bound to analyzer - use energy with range
range_map = angle_binding.get('range')
if range_map:
lo, hi = range_map
params["rotate_angle"] = lo + energy * (hi - lo)
else:
params["rotate_angle"] = self.resolve_binding(angle_binding)
else:
params["rotate_angle"] = angle_binding if isinstance(angle_binding, (int, float)) else 0
elif effect_type == 'zoom':
amount_binding = config.get('amount')
if amount_binding:
if isinstance(amount_binding, dict) and amount_binding.get('_binding'):
range_map = amount_binding.get('range')
if range_map:
lo, hi = range_map
params["zoom_amount"] = lo + energy * (hi - lo)
else:
params["zoom_amount"] = self.resolve_binding(amount_binding)
else:
params["zoom_amount"] = amount_binding if isinstance(amount_binding, (int, float)) else 1.0
elif effect_type == 'invert':
amount_binding = config.get('amount')
if amount_binding:
val = self.resolve_binding(amount_binding)
params["invert_amount"] = val if isinstance(val, (int, float)) else 0
elif effect_type == 'hue_shift':
deg_binding = config.get('degrees')
if deg_binding:
val = self.resolve_binding(deg_binding)
params["hue_degrees"] = val if isinstance(val, (int, float)) else 0
elif effect_type == 'ascii_art':
mix_binding = config.get('mix')
if mix_binding:
val = self.resolve_binding(mix_binding)
params["ascii_mix"] = val if isinstance(val, (int, float)) else 0
size_binding = config.get('char_size')
if size_binding:
if isinstance(size_binding, dict) and size_binding.get('_binding'):
range_map = size_binding.get('range')
if range_map:
lo, hi = range_map
params["ascii_char_size"] = lo + energy * (hi - lo)
return params
def get_pair_params(self, source_idx: int) -> Dict:
"""Get blend and rotation params for a video pair (compositor interface)."""
params = {
"blend_opacity": 0.5,
"pair_rotation": 0,
}
# Find the blend node for this source
chain_a, _ = self._find_clip_chains(source_idx)
# The last effect in chain_a should be the blend
blend_node = None
for eff in reversed(chain_a):
if eff.get('config', {}).get('effect') == 'blend':
blend_node = eff
break
if blend_node:
config = blend_node.get('config', {})
opacity_binding = config.get('opacity')
if opacity_binding:
val = self.resolve_binding(opacity_binding)
if isinstance(val, (int, float)):
params["blend_opacity"] = val
# Find rotate after blend (pair rotation)
blend_id = blend_node['id']
for node in self.recipe.nodes:
if blend_id in node.get('inputs', []) and node.get('type') == 'EFFECT':
if node.get('config', {}).get('effect') == 'rotate':
angle_binding = node.get('config', {}).get('angle')
if angle_binding:
val = self.resolve_binding(angle_binding)
if isinstance(val, (int, float)):
params["pair_rotation"] = val
break
return params
def _get_cycle_state(self) -> dict:
"""Get current cycle state from SLICE_ON or internal tracking."""
if not hasattr(self, '_cycle_state'):
# Initialize from SLICE_ON node
for node in self.recipe.nodes:
if node.get('type') == 'SLICE_ON':
init = node.get('config', {}).get('init', {})
self._cycle_state = {
'cycle': init.get('cycle', 0),
'beat': init.get('beat', 0),
'clen': init.get('clen', 60),
}
break
else:
self._cycle_state = {'cycle': 0, 'beat': 0, 'clen': 60}
return self._cycle_state
def _step_cycle(self):
"""Step the cycle state forward on beat by evaluating SLICE_ON Lambda."""
# Use interpreter to evaluate the Lambda
self._eval_slice_on()
def get_cycle_weights(self) -> List[float]:
"""Get blend weights for cycle-crossfade from SLICE_ON result."""
n = len(self._get_video_sources())
if n == 0:
return [1.0]
# Get weights from interpreted result
if self._slice_on_result:
compose = self._slice_on_result.get('compose', {})
weights = compose.get('weights', [])
if weights and len(weights) == n:
# Normalize
total = sum(weights)
if total > 0:
return [w / total for w in weights]
# Fallback: equal weights
return [1.0 / n] * n
def get_cycle_zooms(self) -> List[float]:
"""Get zoom amounts for cycle-crossfade from SLICE_ON result."""
n = len(self._get_video_sources())
if n == 0:
return [1.0]
# Get zooms from interpreted result (layers -> effects -> zoom amount)
if self._slice_on_result:
layers = self._slice_on_result.get('layers', [])
if layers and len(layers) == n:
zooms = []
for layer in layers:
effects = layer.get('effects', [])
zoom_amt = 1.0
for eff in effects:
if eff.get('effect') == 'zoom' or (hasattr(eff.get('effect'), 'name') and eff.get('effect').name == 'zoom'):
zoom_amt = eff.get('amount', 1.0)
break
zooms.append(zoom_amt)
return zooms
# Fallback
return [1.0] * n
def _get_final_rotate_scan_id(self) -> str:
"""Find the scan ID that drives the final rotation (after SLICE_ON)."""
if hasattr(self, '_final_rotate_scan_id'):
return self._final_rotate_scan_id
# Find SLICE_ON node index
slice_on_idx = None
for i, node in enumerate(self.recipe.nodes):
if node.get('type') == 'SLICE_ON':
slice_on_idx = i
break
# Find rotate effect after SLICE_ON
if slice_on_idx is not None:
for node in self.recipe.nodes[slice_on_idx + 1:]:
if node.get('type') == 'EFFECT':
config = node.get('config', {})
if config.get('effect') == 'rotate':
angle_binding = config.get('angle', {})
if isinstance(angle_binding, dict) and angle_binding.get('_binding'):
self._final_rotate_scan_id = angle_binding.get('source')
return self._final_rotate_scan_id
self._final_rotate_scan_id = None
return None
def get_final_effects(self, energy: float) -> Dict:
"""Get final composition effects (compositor interface)."""
# Get named scans
scan_values = self.get_all_scan_values()
# Whole spin - get from the specific scan bound to final rotate effect
whole_spin = 0
final_rotate_scan_id = self._get_final_rotate_scan_id()
if final_rotate_scan_id and final_rotate_scan_id in self.scan_outputs:
val = self.scan_outputs[final_rotate_scan_id]
if isinstance(val, dict) and 'angle' in val:
whole_spin = val['angle']
elif isinstance(val, (int, float)):
whole_spin = val
# Ripple
ripple_gate = scan_values.get('ripple-gate', 0)
ripple_cx = scan_values.get('ripple-cx', 0.5)
ripple_cy = scan_values.get('ripple-cy', 0.5)
if isinstance(ripple_gate, dict):
ripple_gate = ripple_gate.get('gate', 0) if 'gate' in ripple_gate else 1
return {
"whole_spin_angle": whole_spin,
"ripple_amplitude": ripple_gate * (5 + energy * 45),
"ripple_cx": ripple_cx if isinstance(ripple_cx, (int, float)) else 0.5,
"ripple_cy": ripple_cy if isinstance(ripple_cy, (int, float)) else 0.5,
}

376
streaming/sexp_interp.py Normal file
View File

@@ -0,0 +1,376 @@
"""
S-expression interpreter for streaming execution.
Evaluates sexp expressions including:
- let bindings
- lambda definitions and calls
- Arithmetic, comparison, logic operators
- dict/list operations
- Random number generation
"""
import random
from typing import Any, Dict, List, Callable
from dataclasses import dataclass
@dataclass
class Lambda:
"""Runtime lambda value."""
params: List[str]
body: Any
closure: Dict[str, Any]
class Symbol:
"""Symbol reference."""
def __init__(self, name: str):
self.name = name
def __repr__(self):
return f"Symbol({self.name})"
class SexpInterpreter:
"""
Interprets S-expressions in real-time.
Handles the full sexp language used in recipes.
"""
def __init__(self, rng: random.Random = None):
self.rng = rng or random.Random()
self.globals: Dict[str, Any] = {}
def eval(self, expr: Any, env: Dict[str, Any] = None) -> Any:
"""Evaluate an expression in the given environment."""
if env is None:
env = {}
# Literals
if isinstance(expr, (int, float, str, bool)) or expr is None:
return expr
# Symbol lookup
if isinstance(expr, Symbol) or (hasattr(expr, 'name') and hasattr(expr, '__class__') and expr.__class__.__name__ == 'Symbol'):
name = expr.name if hasattr(expr, 'name') else str(expr)
if name in env:
return env[name]
if name in self.globals:
return self.globals[name]
raise NameError(f"Undefined symbol: {name}")
# Compiled expression dict (from compiler)
if isinstance(expr, dict):
if expr.get('_expr'):
return self._eval_compiled_expr(expr, env)
# Plain dict - evaluate values that might be expressions
result = {}
for k, v in expr.items():
# Some keys should keep Symbol values as strings (effect names, modes)
if k in ('effect', 'mode') and hasattr(v, 'name'):
result[k] = v.name
else:
result[k] = self.eval(v, env)
return result
# List expression (sexp)
if isinstance(expr, (list, tuple)) and len(expr) > 0:
return self._eval_list(expr, env)
# Empty list
if isinstance(expr, (list, tuple)):
return []
return expr
def _eval_compiled_expr(self, expr: dict, env: Dict[str, Any]) -> Any:
"""Evaluate a compiled expression dict."""
op = expr.get('op')
args = expr.get('args', [])
if op == 'var':
name = expr.get('name')
if name in env:
return env[name]
if name in self.globals:
return self.globals[name]
raise NameError(f"Undefined: {name}")
elif op == 'dict':
keys = expr.get('keys', [])
values = [self.eval(a, env) for a in args]
return dict(zip(keys, values))
elif op == 'get':
obj = self.eval(args[0], env)
key = args[1]
return obj.get(key) if isinstance(obj, dict) else obj[key]
elif op == 'if':
cond = self.eval(args[0], env)
if cond:
return self.eval(args[1], env)
elif len(args) > 2:
return self.eval(args[2], env)
return None
# Comparison
elif op == '<':
return self.eval(args[0], env) < self.eval(args[1], env)
elif op == '>':
return self.eval(args[0], env) > self.eval(args[1], env)
elif op == '<=':
return self.eval(args[0], env) <= self.eval(args[1], env)
elif op == '>=':
return self.eval(args[0], env) >= self.eval(args[1], env)
elif op == '=':
return self.eval(args[0], env) == self.eval(args[1], env)
elif op == '!=':
return self.eval(args[0], env) != self.eval(args[1], env)
# Arithmetic
elif op == '+':
return self.eval(args[0], env) + self.eval(args[1], env)
elif op == '-':
return self.eval(args[0], env) - self.eval(args[1], env)
elif op == '*':
return self.eval(args[0], env) * self.eval(args[1], env)
elif op == '/':
return self.eval(args[0], env) / self.eval(args[1], env)
elif op == 'mod':
return self.eval(args[0], env) % self.eval(args[1], env)
# Random
elif op == 'rand':
return self.rng.random()
elif op == 'rand-int':
return self.rng.randint(self.eval(args[0], env), self.eval(args[1], env))
elif op == 'rand-range':
return self.rng.uniform(self.eval(args[0], env), self.eval(args[1], env))
# Logic
elif op == 'and':
return all(self.eval(a, env) for a in args)
elif op == 'or':
return any(self.eval(a, env) for a in args)
elif op == 'not':
return not self.eval(args[0], env)
else:
raise ValueError(f"Unknown op: {op}")
def _eval_list(self, expr: list, env: Dict[str, Any]) -> Any:
"""Evaluate a list expression (sexp form)."""
if len(expr) == 0:
return []
head = expr[0]
# Get head name
if isinstance(head, Symbol) or (hasattr(head, 'name') and hasattr(head, '__class__')):
head_name = head.name if hasattr(head, 'name') else str(head)
elif isinstance(head, str):
head_name = head
else:
# Not a symbol - check if it's a data list or function call
if isinstance(head, dict):
# List of dicts - evaluate each element as data
return [self.eval(item, env) for item in expr]
# Otherwise evaluate as function call
fn = self.eval(head, env)
args = [self.eval(a, env) for a in expr[1:]]
return self._call(fn, args, env)
# Special forms
if head_name == 'let':
return self._eval_let(expr, env)
elif head_name in ('lambda', 'fn'):
return self._eval_lambda(expr, env)
elif head_name == 'if':
return self._eval_if(expr, env)
elif head_name == 'dict':
return self._eval_dict(expr, env)
elif head_name == 'get':
obj = self.eval(expr[1], env)
key = self.eval(expr[2], env) if len(expr) > 2 else expr[2]
if isinstance(key, str):
return obj.get(key) if isinstance(obj, dict) else getattr(obj, key, None)
return obj[key]
elif head_name == 'len':
return len(self.eval(expr[1], env))
elif head_name == 'range':
start = self.eval(expr[1], env)
end = self.eval(expr[2], env) if len(expr) > 2 else start
if len(expr) == 2:
return list(range(end))
return list(range(start, end))
elif head_name == 'map':
fn = self.eval(expr[1], env)
lst = self.eval(expr[2], env)
return [self._call(fn, [x], env) for x in lst]
elif head_name == 'mod':
return self.eval(expr[1], env) % self.eval(expr[2], env)
# Arithmetic
elif head_name == '+':
return self.eval(expr[1], env) + self.eval(expr[2], env)
elif head_name == '-':
if len(expr) == 2:
return -self.eval(expr[1], env)
return self.eval(expr[1], env) - self.eval(expr[2], env)
elif head_name == '*':
return self.eval(expr[1], env) * self.eval(expr[2], env)
elif head_name == '/':
return self.eval(expr[1], env) / self.eval(expr[2], env)
# Comparison
elif head_name == '<':
return self.eval(expr[1], env) < self.eval(expr[2], env)
elif head_name == '>':
return self.eval(expr[1], env) > self.eval(expr[2], env)
elif head_name == '<=':
return self.eval(expr[1], env) <= self.eval(expr[2], env)
elif head_name == '>=':
return self.eval(expr[1], env) >= self.eval(expr[2], env)
elif head_name == '=':
return self.eval(expr[1], env) == self.eval(expr[2], env)
# Logic
elif head_name == 'and':
return all(self.eval(a, env) for a in expr[1:])
elif head_name == 'or':
return any(self.eval(a, env) for a in expr[1:])
elif head_name == 'not':
return not self.eval(expr[1], env)
# Function call
else:
fn = env.get(head_name) or self.globals.get(head_name)
if fn is None:
raise NameError(f"Undefined function: {head_name}")
args = [self.eval(a, env) for a in expr[1:]]
return self._call(fn, args, env)
def _eval_let(self, expr: list, env: Dict[str, Any]) -> Any:
"""Evaluate (let [bindings...] body)."""
bindings = expr[1]
body = expr[2]
# Create new environment with bindings
new_env = dict(env)
# Process bindings in pairs
i = 0
while i < len(bindings):
name = bindings[i]
if isinstance(name, Symbol) or hasattr(name, 'name'):
name = name.name if hasattr(name, 'name') else str(name)
value = self.eval(bindings[i + 1], new_env)
new_env[name] = value
i += 2
return self.eval(body, new_env)
def _eval_lambda(self, expr: list, env: Dict[str, Any]) -> Lambda:
"""Evaluate (lambda [params] body)."""
params_expr = expr[1]
body = expr[2]
# Extract parameter names
params = []
for p in params_expr:
if isinstance(p, Symbol) or hasattr(p, 'name'):
params.append(p.name if hasattr(p, 'name') else str(p))
else:
params.append(str(p))
return Lambda(params=params, body=body, closure=dict(env))
def _eval_if(self, expr: list, env: Dict[str, Any]) -> Any:
"""Evaluate (if cond then else)."""
cond = self.eval(expr[1], env)
if cond:
return self.eval(expr[2], env)
elif len(expr) > 3:
return self.eval(expr[3], env)
return None
def _eval_dict(self, expr: list, env: Dict[str, Any]) -> dict:
"""Evaluate (dict :key val ...)."""
result = {}
i = 1
while i < len(expr):
key = expr[i]
# Handle keyword syntax (:key) and Keyword objects
if hasattr(key, 'name'):
key = key.name
elif hasattr(key, '__class__') and key.__class__.__name__ == 'Keyword':
key = str(key).lstrip(':')
elif isinstance(key, str) and key.startswith(':'):
key = key[1:]
value = self.eval(expr[i + 1], env)
result[key] = value
i += 2
return result
def _call(self, fn: Any, args: List[Any], env: Dict[str, Any]) -> Any:
"""Call a function with arguments."""
if isinstance(fn, Lambda):
# Our own Lambda type
call_env = dict(fn.closure)
for param, arg in zip(fn.params, args):
call_env[param] = arg
return self.eval(fn.body, call_env)
elif hasattr(fn, 'params') and hasattr(fn, 'body'):
# Lambda from parser (artdag.sexp.parser.Lambda)
call_env = dict(env)
if hasattr(fn, 'closure') and fn.closure:
call_env.update(fn.closure)
# Get param names
params = []
for p in fn.params:
if hasattr(p, 'name'):
params.append(p.name)
else:
params.append(str(p))
for param, arg in zip(params, args):
call_env[param] = arg
return self.eval(fn.body, call_env)
elif callable(fn):
return fn(*args)
else:
raise TypeError(f"Not callable: {type(fn).__name__}")
def eval_slice_on_lambda(lambda_obj, acc: dict, i: int, start: float, end: float,
videos: list, interp: SexpInterpreter = None) -> dict:
"""
Evaluate a SLICE_ON lambda function.
Args:
lambda_obj: The Lambda object from the compiled recipe
acc: Current accumulator state
i: Beat index
start: Slice start time
end: Slice end time
videos: List of video inputs
interp: Interpreter to use
Returns:
Dict with 'layers', 'compose', 'acc' keys
"""
if interp is None:
interp = SexpInterpreter()
# Set up global 'videos' for (len videos) to work
interp.globals['videos'] = videos
# Build initial environment with lambda parameters
env = dict(lambda_obj.closure) if hasattr(lambda_obj, 'closure') and lambda_obj.closure else {}
env['videos'] = videos
# Call the lambda
result = interp._call(lambda_obj, [acc, i, start, end], env)
return result

281
streaming/sources.py Normal file
View File

@@ -0,0 +1,281 @@
"""
Video and image sources with looping support.
"""
import numpy as np
import subprocess
import json
from pathlib import Path
from typing import Optional, Tuple
from abc import ABC, abstractmethod
class Source(ABC):
"""Abstract base class for frame sources."""
@abstractmethod
def read_frame(self, t: float) -> np.ndarray:
"""Read frame at time t (with looping if needed)."""
pass
@property
@abstractmethod
def duration(self) -> float:
"""Source duration in seconds."""
pass
@property
@abstractmethod
def size(self) -> Tuple[int, int]:
"""Frame size as (width, height)."""
pass
@property
@abstractmethod
def fps(self) -> float:
"""Frames per second."""
pass
class VideoSource(Source):
"""
Video file source with automatic looping.
Reads frames on-demand, seeking as needed. When time exceeds
duration, wraps around (loops).
"""
def __init__(self, path: str, target_fps: float = 30):
self.path = Path(path)
self.target_fps = target_fps
# Initialize decode state first (before _probe which could fail)
self._process: Optional[subprocess.Popen] = None
self._current_start: Optional[float] = None
self._frame_buffer: Optional[np.ndarray] = None
self._buffer_time: Optional[float] = None
self._duration = None
self._size = None
self._fps = None
if not self.path.exists():
raise FileNotFoundError(f"Video not found: {path}")
self._probe()
def _probe(self):
"""Get video metadata."""
cmd = [
"ffprobe", "-v", "quiet",
"-print_format", "json",
"-show_format", "-show_streams",
str(self.path)
]
result = subprocess.run(cmd, capture_output=True, text=True)
data = json.loads(result.stdout)
# Get duration
self._duration = float(data["format"]["duration"])
# Get video stream info
for stream in data["streams"]:
if stream["codec_type"] == "video":
self._size = (int(stream["width"]), int(stream["height"]))
# Parse fps from r_frame_rate (e.g., "30/1" or "30000/1001")
fps_parts = stream.get("r_frame_rate", "30/1").split("/")
self._fps = float(fps_parts[0]) / float(fps_parts[1])
break
@property
def duration(self) -> float:
return self._duration
@property
def size(self) -> Tuple[int, int]:
return self._size
@property
def fps(self) -> float:
return self._fps
def _start_decode(self, start_time: float):
"""Start ffmpeg decode process from given time."""
if self._process:
try:
self._process.stdout.close()
except:
pass
self._process.terminate()
try:
self._process.wait(timeout=1)
except:
self._process.kill()
self._process.wait()
w, h = self._size
cmd = [
"ffmpeg", "-v", "quiet",
"-ss", str(start_time),
"-i", str(self.path),
"-f", "rawvideo",
"-pix_fmt", "rgb24",
"-r", str(self.target_fps),
"-"
]
self._process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=w * h * 3 * 4, # Buffer a few frames
)
self._current_start = start_time
self._buffer_time = start_time
def read_frame(self, t: float) -> np.ndarray:
"""
Read frame at time t.
If t exceeds duration, wraps around (loops).
Seeks if needed, otherwise reads sequentially.
"""
# Wrap time for looping
t_wrapped = t % self._duration
# Check if we need to seek (loop point or large time jump)
need_seek = (
self._process is None or
self._buffer_time is None or
abs(t_wrapped - self._buffer_time) > 1.0 / self.target_fps * 2
)
if need_seek:
self._start_decode(t_wrapped)
# Read frame
w, h = self._size
frame_size = w * h * 3
# Try to read with retries for seek settling
for attempt in range(3):
raw = self._process.stdout.read(frame_size)
if len(raw) == frame_size:
break
# End of stream or seek not ready - restart from beginning
self._start_decode(0)
if len(raw) < frame_size:
# Still no data - return last frame or black
if self._frame_buffer is not None:
return self._frame_buffer.copy()
return np.zeros((h, w, 3), dtype=np.uint8)
frame = np.frombuffer(raw, dtype=np.uint8).reshape((h, w, 3))
self._frame_buffer = frame # Cache for fallback
self._buffer_time = t_wrapped + 1.0 / self.target_fps
return frame
def close(self):
"""Clean up resources."""
if self._process:
self._process.terminate()
self._process.wait()
self._process = None
def __del__(self):
self.close()
def __repr__(self):
return f"VideoSource({self.path.name}, {self._size[0]}x{self._size[1]}, {self._duration:.1f}s)"
class ImageSource(Source):
"""
Static image source (returns same frame for any time).
Useful for backgrounds, overlays, etc.
"""
def __init__(self, path: str):
self.path = Path(path)
if not self.path.exists():
raise FileNotFoundError(f"Image not found: {path}")
# Load image
import cv2
self._frame = cv2.imread(str(self.path))
self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2RGB)
self._size = (self._frame.shape[1], self._frame.shape[0])
@property
def duration(self) -> float:
return float('inf') # Images last forever
@property
def size(self) -> Tuple[int, int]:
return self._size
@property
def fps(self) -> float:
return 30.0 # Arbitrary
def read_frame(self, t: float) -> np.ndarray:
return self._frame.copy()
def __repr__(self):
return f"ImageSource({self.path.name}, {self._size[0]}x{self._size[1]})"
class LiveSource(Source):
"""
Live video capture source (webcam, capture card, etc.).
Time parameter is ignored - always returns latest frame.
"""
def __init__(self, device: int = 0, size: Tuple[int, int] = (1280, 720), fps: float = 30):
import cv2
self._cap = cv2.VideoCapture(device)
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, size[0])
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1])
self._cap.set(cv2.CAP_PROP_FPS, fps)
# Get actual settings
self._size = (
int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
self._fps = self._cap.get(cv2.CAP_PROP_FPS)
if not self._cap.isOpened():
raise RuntimeError(f"Could not open video device {device}")
@property
def duration(self) -> float:
return float('inf') # Live - no duration
@property
def size(self) -> Tuple[int, int]:
return self._size
@property
def fps(self) -> float:
return self._fps
def read_frame(self, t: float) -> np.ndarray:
"""Read latest frame (t is ignored for live sources)."""
import cv2
ret, frame = self._cap.read()
if not ret:
return np.zeros((self._size[1], self._size[0], 3), dtype=np.uint8)
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
def close(self):
self._cap.release()
def __del__(self):
self.close()
def __repr__(self):
return f"LiveSource({self._size[0]}x{self._size[1]}, {self._fps}fps)"

1081
streaming/stream_sexp.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,863 @@
"""
Fully Generic Streaming S-expression Interpreter.
The interpreter knows NOTHING about video, audio, or any domain.
All domain logic comes from primitives loaded via (require-primitives ...).
Built-in forms:
- Control: if, cond, let, let*, lambda, ->
- Arithmetic: +, -, *, /, mod, map-range
- Comparison: <, >, =, <=, >=, and, or, not
- Data: dict, get, list, nth, len, quote
- Random: rand, rand-int, rand-range
- Scan: bind (access scan state)
Everything else comes from primitives or effects.
Context (ctx) is passed explicitly to frame evaluation:
- ctx.t: current time
- ctx.frame-num: current frame number
- ctx.fps: frames per second
"""
import sys
import time
import json
import hashlib
import math
import numpy as np
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, List, Any, Optional, Tuple
# Try pip-installed artdag first, fall back to local path
try:
from artdag.sexp.parser import parse, parse_all, Symbol, Keyword
except ImportError:
sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag"))
from artdag.sexp.parser import parse, parse_all, Symbol, Keyword
@dataclass
class Context:
"""Runtime context passed to frame evaluation."""
t: float = 0.0
frame_num: int = 0
fps: float = 30.0
class StreamInterpreter:
"""
Fully generic streaming sexp interpreter.
No domain-specific knowledge - just evaluates expressions
and calls primitives.
"""
def __init__(self, sexp_path: str):
self.sexp_path = Path(sexp_path)
self.sexp_dir = self.sexp_path.parent
text = self.sexp_path.read_text()
self.ast = parse(text)
self.config = self._parse_config()
# Global environment for def bindings
self.globals: Dict[str, Any] = {}
# Scans
self.scans: Dict[str, dict] = {}
# Audio playback path (for syncing output)
self.audio_playback: Optional[str] = None
# Registries for external definitions
self.primitives: Dict[str, Any] = {}
self.effects: Dict[str, dict] = {}
self.macros: Dict[str, dict] = {}
self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs"
self.frame_pipeline = None
# External config files (set before run())
self.sources_config: Optional[Path] = None
self.audio_config: Optional[Path] = None
import random
self.rng = random.Random(self.config.get('seed', 42))
def _load_config_file(self, config_path):
"""Load a config file and process its definitions."""
config_path = Path(config_path) # Accept str or Path
if not config_path.exists():
print(f"Warning: config file not found: {config_path}", file=sys.stderr)
return
text = config_path.read_text()
ast = parse_all(text)
for form in ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'def':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
value = self._eval(form[2], self.globals)
self.globals[name] = value
print(f"Config: {name}", file=sys.stderr)
elif cmd == 'audio-playback':
# Path relative to working directory (consistent with other paths)
path = str(form[1]).strip('"')
self.audio_playback = str(Path(path).resolve())
print(f"Audio playback: {self.audio_playback}", file=sys.stderr)
def _parse_config(self) -> dict:
"""Parse config from (stream name :key val ...)."""
config = {'fps': 30, 'seed': 42, 'width': 720, 'height': 720}
if not self.ast or not isinstance(self.ast[0], Symbol):
return config
if self.ast[0].name != 'stream':
return config
i = 2
while i < len(self.ast):
if isinstance(self.ast[i], Keyword):
config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None
i += 2
elif isinstance(self.ast[i], list):
break
else:
i += 1
return config
def _load_primitives(self, lib_name: str):
"""Load primitives from a Python library file."""
import importlib.util
lib_paths = [
self.primitive_lib_dir / f"{lib_name}.py",
self.sexp_dir / "primitive_libs" / f"{lib_name}.py",
self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py",
]
lib_path = None
for p in lib_paths:
if p.exists():
lib_path = p
break
if not lib_path:
print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr)
return
spec = importlib.util.spec_from_file_location(lib_name, lib_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
count = 0
for name in dir(module):
if name.startswith('prim_'):
func = getattr(module, name)
prim_name = name[5:]
dash_name = prim_name.replace('_', '-')
# Register ONLY with namespace (geometry:ripple-displace)
self.primitives[f"{lib_name}:{dash_name}"] = func
count += 1
if hasattr(module, 'PRIMITIVES'):
prims = getattr(module, 'PRIMITIVES')
if isinstance(prims, dict):
for name, func in prims.items():
# Register ONLY with namespace
dash_name = name.replace('_', '-')
self.primitives[f"{lib_name}:{dash_name}"] = func
count += 1
print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr)
def _load_effect(self, effect_path: Path):
"""Load and register an effect from a .sexp file."""
if not effect_path.exists():
print(f"Warning: effect file not found: {effect_path}", file=sys.stderr)
return
text = effect_path.read_text()
ast = parse_all(text)
for form in ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'define-effect':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = {}
body = None
i = 2
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'params' and i + 1 < len(form):
for pdef in form[i + 1]:
if isinstance(pdef, list) and pdef:
pname = pdef[0].name if isinstance(pdef[0], Symbol) else str(pdef[0])
pinfo = {'default': 0}
j = 1
while j < len(pdef):
if isinstance(pdef[j], Keyword) and j + 1 < len(pdef):
pinfo[pdef[j].name] = pdef[j + 1]
j += 2
else:
j += 1
params[pname] = pinfo
i += 2
else:
body = form[i]
i += 1
self.effects[name] = {'params': params, 'body': body}
print(f"Effect: {name}", file=sys.stderr)
elif cmd == 'defmacro':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]]
body = form[3]
self.macros[name] = {'params': params, 'body': body}
elif cmd == 'effect':
# Handle (effect name :path "...") in included files - recursive
i = 2
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
# Resolve relative to the file being loaded
full = (effect_path.parent / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'include':
# Handle (include :path "...") in included files - recursive
i = 1
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (effect_path.parent / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'scan':
# Handle scans from included files
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
trigger_expr = form[2]
init_val, step_expr = {}, None
i = 3
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'init' and i + 1 < len(form):
init_val = self._eval(form[i + 1], self.globals)
elif form[i].name == 'step' and i + 1 < len(form):
step_expr = form[i + 1]
i += 2
else:
i += 1
self.scans[name] = {
'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val},
'init': init_val,
'step': step_expr,
'trigger': trigger_expr,
}
print(f"Scan: {name}", file=sys.stderr)
def _init(self):
"""Initialize from sexp - load primitives, effects, defs, scans."""
# Load external config files first (they can override recipe definitions)
if self.sources_config:
self._load_config_file(self.sources_config)
if self.audio_config:
self._load_config_file(self.audio_config)
for form in self.ast:
if not isinstance(form, list) or not form:
continue
if not isinstance(form[0], Symbol):
continue
cmd = form[0].name
if cmd == 'require-primitives':
lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"')
self._load_primitives(lib_name)
elif cmd == 'effect':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
i = 2
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (self.sexp_dir / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'include':
i = 1
while i < len(form):
if isinstance(form[i], Keyword) and form[i].name == 'path':
path = str(form[i + 1]).strip('"')
full = (self.sexp_dir / path).resolve()
self._load_effect(full)
i += 2
else:
i += 1
elif cmd == 'audio-playback':
# (audio-playback "path") - set audio file for playback sync
# Skip if already set by config file
if self.audio_playback is None:
path = str(form[1]).strip('"')
self.audio_playback = str((self.sexp_dir / path).resolve())
print(f"Audio playback: {self.audio_playback}", file=sys.stderr)
elif cmd == 'def':
# (def name expr) - evaluate and store in globals
# Skip if already defined by config file
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
if name in self.globals:
print(f"Def: {name} (from config, skipped)", file=sys.stderr)
continue
value = self._eval(form[2], self.globals)
self.globals[name] = value
print(f"Def: {name}", file=sys.stderr)
elif cmd == 'defmacro':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]]
body = form[3]
self.macros[name] = {'params': params, 'body': body}
elif cmd == 'scan':
name = form[1].name if isinstance(form[1], Symbol) else str(form[1])
trigger_expr = form[2]
init_val, step_expr = {}, None
i = 3
while i < len(form):
if isinstance(form[i], Keyword):
if form[i].name == 'init' and i + 1 < len(form):
init_val = self._eval(form[i + 1], self.globals)
elif form[i].name == 'step' and i + 1 < len(form):
step_expr = form[i + 1]
i += 2
else:
i += 1
self.scans[name] = {
'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val},
'init': init_val,
'step': step_expr,
'trigger': trigger_expr,
}
print(f"Scan: {name}", file=sys.stderr)
elif cmd == 'frame':
self.frame_pipeline = form[1] if len(form) > 1 else None
def _eval(self, expr, env: dict) -> Any:
"""Evaluate an expression."""
# Primitives
if isinstance(expr, (int, float)):
return expr
if isinstance(expr, str):
return expr
if isinstance(expr, bool):
return expr
if isinstance(expr, Symbol):
name = expr.name
# Built-in constants
if name == 'pi':
return math.pi
if name == 'true':
return True
if name == 'false':
return False
if name == 'nil':
return None
# Environment lookup
if name in env:
return env[name]
# Global lookup
if name in self.globals:
return self.globals[name]
# Scan state lookup
if name in self.scans:
return self.scans[name]['state']
raise NameError(f"Undefined variable: {name}")
if isinstance(expr, Keyword):
return expr.name
if not isinstance(expr, list) or not expr:
return expr
# Dict literal {:key val ...}
if isinstance(expr[0], Keyword):
result = {}
i = 0
while i < len(expr):
if isinstance(expr[i], Keyword):
result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None
i += 2
else:
i += 1
return result
head = expr[0]
if not isinstance(head, Symbol):
return [self._eval(e, env) for e in expr]
op = head.name
args = expr[1:]
# Check for closure call
if op in env:
val = env[op]
if isinstance(val, dict) and val.get('_type') == 'closure':
closure = val
closure_env = dict(closure['env'])
for i, pname in enumerate(closure['params']):
closure_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(closure['body'], closure_env)
if op in self.globals:
val = self.globals[op]
if isinstance(val, dict) and val.get('_type') == 'closure':
closure = val
closure_env = dict(closure['env'])
for i, pname in enumerate(closure['params']):
closure_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(closure['body'], closure_env)
# Threading macro
if op == '->':
result = self._eval(args[0], env)
for form in args[1:]:
if isinstance(form, list) and form:
new_form = [form[0], result] + form[1:]
result = self._eval(new_form, env)
else:
result = self._eval([form, result], env)
return result
# === Binding ===
if op == 'bind':
scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0])
if scan_name in self.scans:
state = self.scans[scan_name]['state']
if len(args) > 1:
key = args[1].name if isinstance(args[1], Keyword) else str(args[1])
return state.get(key, 0)
return state
return 0
# === Arithmetic ===
if op == '+':
return sum(self._eval(a, env) for a in args)
if op == '-':
vals = [self._eval(a, env) for a in args]
return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0]
if op == '*':
result = 1
for a in args:
result *= self._eval(a, env)
return result
if op == '/':
vals = [self._eval(a, env) for a in args]
return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0
if op == 'mod':
vals = [self._eval(a, env) for a in args]
return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0
# === Comparison ===
if op == '<':
return self._eval(args[0], env) < self._eval(args[1], env)
if op == '>':
return self._eval(args[0], env) > self._eval(args[1], env)
if op == '=':
return self._eval(args[0], env) == self._eval(args[1], env)
if op == '<=':
return self._eval(args[0], env) <= self._eval(args[1], env)
if op == '>=':
return self._eval(args[0], env) >= self._eval(args[1], env)
if op == 'and':
for arg in args:
if not self._eval(arg, env):
return False
return True
if op == 'or':
result = False
for arg in args:
result = self._eval(arg, env)
if result:
return result
return result
if op == 'not':
return not self._eval(args[0], env)
# === Logic ===
if op == 'if':
cond = self._eval(args[0], env)
if cond:
return self._eval(args[1], env)
return self._eval(args[2], env) if len(args) > 2 else None
if op == 'cond':
i = 0
while i < len(args) - 1:
pred = self._eval(args[i], env)
if pred:
return self._eval(args[i + 1], env)
i += 2
return None
if op == 'lambda':
params = args[0]
body = args[1]
param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params]
return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)}
if op == 'let' or op == 'let*':
bindings = args[0]
body = args[1]
new_env = dict(env)
if bindings and isinstance(bindings[0], list):
for binding in bindings:
if isinstance(binding, list) and len(binding) >= 2:
name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0])
val = self._eval(binding[1], new_env)
new_env[name] = val
else:
i = 0
while i < len(bindings):
name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i])
val = self._eval(bindings[i + 1], new_env)
new_env[name] = val
i += 2
return self._eval(body, new_env)
# === Dict ===
if op == 'dict':
result = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
key = args[i].name
val = self._eval(args[i + 1], env) if i + 1 < len(args) else None
result[key] = val
i += 2
else:
i += 1
return result
if op == 'get':
obj = self._eval(args[0], env)
key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env)
if isinstance(obj, dict):
return obj.get(key, 0)
return 0
# === List ===
if op == 'list':
return [self._eval(a, env) for a in args]
if op == 'quote':
return args[0] if args else None
if op == 'nth':
lst = self._eval(args[0], env)
idx = int(self._eval(args[1], env))
if isinstance(lst, (list, tuple)) and 0 <= idx < len(lst):
return lst[idx]
return None
if op == 'len':
val = self._eval(args[0], env)
return len(val) if hasattr(val, '__len__') else 0
if op == 'map':
seq = self._eval(args[0], env)
fn = self._eval(args[1], env)
if not isinstance(seq, (list, tuple)):
return []
# Handle closure (lambda from sexp)
if isinstance(fn, dict) and fn.get('_type') == 'closure':
results = []
for item in seq:
closure_env = dict(fn['env'])
if fn['params']:
closure_env[fn['params'][0]] = item
results.append(self._eval(fn['body'], closure_env))
return results
# Handle Python callable
if callable(fn):
return [fn(item) for item in seq]
return []
# === Effects ===
if op in self.effects:
effect = self.effects[op]
effect_env = dict(env)
param_names = list(effect['params'].keys())
for pname, pdef in effect['params'].items():
effect_env[pname] = pdef.get('default', 0)
positional_idx = 0
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
pname = args[i].name
if pname in effect['params'] and i + 1 < len(args):
effect_env[pname] = self._eval(args[i + 1], env)
i += 2
else:
val = self._eval(args[i], env)
if positional_idx == 0:
effect_env['frame'] = val
elif positional_idx - 1 < len(param_names):
effect_env[param_names[positional_idx - 1]] = val
positional_idx += 1
i += 1
return self._eval(effect['body'], effect_env)
# === Primitives ===
if op in self.primitives:
prim_func = self.primitives[op]
evaluated_args = []
kwargs = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
k = args[i].name
v = self._eval(args[i + 1], env) if i + 1 < len(args) else None
kwargs[k] = v
i += 2
else:
evaluated_args.append(self._eval(args[i], env))
i += 1
try:
if kwargs:
return prim_func(*evaluated_args, **kwargs)
return prim_func(*evaluated_args)
except Exception as e:
print(f"Primitive {op} error: {e}", file=sys.stderr)
return None
# === Macros (function-like: args evaluated before binding) ===
if op in self.macros:
macro = self.macros[op]
macro_env = dict(env)
for i, pname in enumerate(macro['params']):
# Evaluate args in calling environment before binding
macro_env[pname] = self._eval(args[i], env) if i < len(args) else None
return self._eval(macro['body'], macro_env)
# Underscore variant lookup
prim_name = op.replace('-', '_')
if prim_name in self.primitives:
prim_func = self.primitives[prim_name]
evaluated_args = []
kwargs = {}
i = 0
while i < len(args):
if isinstance(args[i], Keyword):
k = args[i].name.replace('-', '_')
v = self._eval(args[i + 1], env) if i + 1 < len(args) else None
kwargs[k] = v
i += 2
else:
evaluated_args.append(self._eval(args[i], env))
i += 1
try:
if kwargs:
return prim_func(*evaluated_args, **kwargs)
return prim_func(*evaluated_args)
except Exception as e:
print(f"Primitive {op} error: {e}", file=sys.stderr)
return None
# Unknown - return as-is
return expr
def _step_scans(self, ctx: Context, env: dict):
"""Step scans based on trigger evaluation."""
for name, scan in self.scans.items():
trigger_expr = scan['trigger']
# Evaluate trigger in context
should_step = self._eval(trigger_expr, env)
if should_step:
state = scan['state']
step_env = dict(state)
step_env.update(env)
new_state = self._eval(scan['step'], step_env)
if isinstance(new_state, dict):
scan['state'] = new_state
else:
scan['state'] = {'acc': new_state}
def run(self, duration: float = None, output: str = "pipe"):
"""Run the streaming pipeline."""
# Import output classes - handle both package and direct execution
try:
from .output import PipeOutput, DisplayOutput, FileOutput
except ImportError:
from output import PipeOutput, DisplayOutput, FileOutput
self._init()
if not self.frame_pipeline:
print("Error: no (frame ...) pipeline defined", file=sys.stderr)
return
w = self.config.get('width', 720)
h = self.config.get('height', 720)
fps = self.config.get('fps', 30)
if duration is None:
# Try to get duration from audio if available
for name, val in self.globals.items():
if hasattr(val, 'duration'):
duration = val.duration
print(f"Using audio duration: {duration:.1f}s", file=sys.stderr)
break
else:
duration = 60.0
n_frames = int(duration * fps)
frame_time = 1.0 / fps
print(f"Streaming {n_frames} frames @ {fps}fps", file=sys.stderr)
# Create context
ctx = Context(fps=fps)
# Output (with optional audio sync)
audio = self.audio_playback
if output == "pipe":
out = PipeOutput(size=(w, h), fps=fps, audio_source=audio)
elif output == "preview":
out = DisplayOutput(size=(w, h), fps=fps, audio_source=audio)
else:
out = FileOutput(output, size=(w, h), fps=fps, audio_source=audio)
try:
frame_times = []
for frame_num in range(n_frames):
if not out.is_open:
break
frame_start = time.time()
ctx.t = frame_num * frame_time
ctx.frame_num = frame_num
# Build frame environment with context
frame_env = {
'ctx': {
't': ctx.t,
'frame-num': ctx.frame_num,
'fps': ctx.fps,
},
't': ctx.t, # Also expose t directly for convenience
'frame-num': ctx.frame_num,
}
# Step scans
self._step_scans(ctx, frame_env)
# Evaluate pipeline
result = self._eval(self.frame_pipeline, frame_env)
if result is not None and hasattr(result, 'shape'):
out.write(result, ctx.t)
frame_elapsed = time.time() - frame_start
frame_times.append(frame_elapsed)
# Progress with timing
if frame_num % 30 == 0:
pct = 100 * frame_num / n_frames
avg_ms = 1000 * sum(frame_times[-30:]) / max(1, len(frame_times[-30:]))
target_ms = 1000 * frame_time
print(f"\r{pct:5.1f}% [{avg_ms:.0f}ms/frame, target {target_ms:.0f}ms]", end="", file=sys.stderr, flush=True)
finally:
out.close()
print("\nDone", file=sys.stderr)
def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None,
sources_config: str = None, audio_config: str = None):
"""Run a streaming sexp."""
interp = StreamInterpreter(sexp_path)
if fps:
interp.config['fps'] = fps
if sources_config:
interp.sources_config = Path(sources_config)
if audio_config:
interp.audio_config = Path(audio_config)
interp.run(duration=duration, output=output)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run streaming sexp (generic interpreter)")
parser.add_argument("sexp", help="Path to .sexp file")
parser.add_argument("-d", "--duration", type=float, default=None)
parser.add_argument("-o", "--output", default="pipe")
parser.add_argument("--fps", type=float, default=None)
parser.add_argument("--sources", dest="sources_config", help="Path to sources config .sexp file")
parser.add_argument("--audio", dest="audio_config", help="Path to audio config .sexp file")
args = parser.parse_args()
run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps,
sources_config=args.sources_config, audio_config=args.audio_config)

View File

@@ -1,26 +1,10 @@
# art-celery/tasks - Celery tasks for 3-phase execution # art-celery/tasks - Celery tasks for streaming video rendering
# #
# Tasks for the Art DAG distributed execution system: # Tasks:
# 1. analyze_input - Extract features from input media # 1. run_stream - Execute a streaming S-expression recipe
# 2. execute_step - Execute a single step from the plan
# 3. run_plan - Orchestrate execution of a full plan
#
# S-expression tasks:
# 4. execute_step_sexp - Execute step from S-expression
# 5. run_plan_sexp - Run plan from S-expression
from .analyze import analyze_input, analyze_inputs from .streaming import run_stream
from .execute import execute_step
from .orchestrate import run_plan, run_recipe
from .execute_sexp import execute_step_sexp, run_plan_sexp
__all__ = [ __all__ = [
"analyze_input", "run_stream",
"analyze_inputs",
"execute_step",
"run_plan",
"run_recipe",
# S-expression tasks
"execute_step_sexp",
"run_plan_sexp",
] ]

View File

@@ -1,132 +0,0 @@
"""
Analysis tasks for extracting features from input media.
Phase 1 of the 3-phase execution model.
"""
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional
from celery import current_task
# Import from the Celery app
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from celery_app import app
# Import artdag analysis module
try:
from artdag.analysis import Analyzer, AnalysisResult
except ImportError:
# artdag not installed, will fail at runtime
Analyzer = None
AnalysisResult = None
logger = logging.getLogger(__name__)
# Cache directory for analysis results
CACHE_DIR = Path(os.environ.get('CACHE_DIR', str(Path.home() / ".artdag" / "cache")))
ANALYSIS_CACHE_DIR = CACHE_DIR / 'analysis'
@app.task(bind=True, name='tasks.analyze_input')
def analyze_input(
self,
input_hash: str,
input_path: str,
features: List[str],
) -> dict:
"""
Analyze a single input file.
Args:
input_hash: Content hash of the input
input_path: Path to the input file
features: List of features to extract
Returns:
Dict with analysis results
"""
if Analyzer is None:
raise ImportError("artdag.analysis not available")
logger.info(f"Analyzing {input_hash[:16]}... for features: {features}")
# Create analyzer with caching
ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True)
analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR)
try:
result = analyzer.analyze(
input_hash=input_hash,
features=features,
input_path=Path(input_path),
)
return {
"status": "completed",
"input_hash": input_hash,
"cache_id": result.cache_id,
"features": features,
"result": result.to_dict(),
}
except Exception as e:
logger.error(f"Analysis failed for {input_hash}: {e}")
return {
"status": "failed",
"input_hash": input_hash,
"error": str(e),
}
@app.task(bind=True, name='tasks.analyze_inputs')
def analyze_inputs(
self,
inputs: Dict[str, str],
features: List[str],
) -> dict:
"""
Analyze multiple inputs in parallel.
Args:
inputs: Dict mapping input_hash to file path
features: List of features to extract from all inputs
Returns:
Dict with all analysis results
"""
if Analyzer is None:
raise ImportError("artdag.analysis not available")
logger.info(f"Analyzing {len(inputs)} inputs for features: {features}")
ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True)
analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR)
results = {}
errors = []
for input_hash, input_path in inputs.items():
try:
result = analyzer.analyze(
input_hash=input_hash,
features=features,
input_path=Path(input_path),
)
results[input_hash] = result.to_dict()
except Exception as e:
logger.error(f"Analysis failed for {input_hash}: {e}")
errors.append({"input_hash": input_hash, "error": str(e)})
return {
"status": "completed" if not errors else "partial",
"results": results,
"errors": errors,
"total": len(inputs),
"successful": len(results),
}

View File

@@ -1,381 +0,0 @@
"""
Step execution task.
Phase 3 of the 3-phase execution model.
Executes individual steps from an execution plan with IPFS-backed caching.
"""
import json
import logging
import os
import socket
from pathlib import Path
from typing import Dict, List, Optional
from celery import current_task
# Import from the Celery app
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from celery_app import app
from claiming import (
get_claimer,
claim_task,
complete_task,
fail_task,
ClaimStatus,
)
from cache_manager import get_cache_manager, L1CacheManager
# Import artdag
try:
from artdag import Cache, NodeType
from artdag.executor import get_executor
from artdag.planning import ExecutionStep
except ImportError:
Cache = None
NodeType = None
get_executor = None
ExecutionStep = None
logger = logging.getLogger(__name__)
def get_worker_id() -> str:
"""Get a unique identifier for this worker."""
hostname = socket.gethostname()
pid = os.getpid()
return f"{hostname}:{pid}"
@app.task(bind=True, name='tasks.execute_step')
def execute_step(
self,
step_json: str,
plan_id: str,
input_cache_ids: Dict[str, str],
) -> dict:
"""
Execute a single step from an execution plan.
Uses hash-based claiming to prevent duplicate work.
Results are stored in IPFS-backed cache.
Args:
step_json: JSON-serialized ExecutionStep
plan_id: ID of the parent execution plan
input_cache_ids: Mapping from input step_id to their cache_id
Returns:
Dict with execution result
"""
if ExecutionStep is None:
raise ImportError("artdag.planning not available")
step = ExecutionStep.from_json(step_json)
worker_id = get_worker_id()
task_id = self.request.id
logger.info(f"Executing step {step.step_id} ({step.node_type}) cache_id={step.cache_id[:16]}...")
# Get L1 cache manager (IPFS-backed)
cache_mgr = get_cache_manager()
# Check if already cached (by cache_id as cid)
cached_path = cache_mgr.get_by_cid(step.cache_id)
if cached_path:
logger.info(f"Step {step.step_id} already cached at {cached_path}")
# Mark as cached in claiming system
claimer = get_claimer()
claimer.mark_cached(step.cache_id, str(cached_path))
return {
"status": "cached",
"step_id": step.step_id,
"cache_id": step.cache_id,
"output_path": str(cached_path),
}
# Try to claim the task
if not claim_task(step.cache_id, worker_id, task_id):
# Another worker is handling it
logger.info(f"Step {step.step_id} claimed by another worker, waiting...")
claimer = get_claimer()
result = claimer.wait_for_completion(step.cache_id, timeout=600)
if result and result.status == ClaimStatus.COMPLETED:
return {
"status": "completed_by_other",
"step_id": step.step_id,
"cache_id": step.cache_id,
"output_path": result.output_path,
}
elif result and result.status == ClaimStatus.CACHED:
return {
"status": "cached",
"step_id": step.step_id,
"cache_id": step.cache_id,
"output_path": result.output_path,
}
elif result and result.status == ClaimStatus.FAILED:
return {
"status": "failed",
"step_id": step.step_id,
"cache_id": step.cache_id,
"error": result.error,
}
else:
return {
"status": "timeout",
"step_id": step.step_id,
"cache_id": step.cache_id,
"error": "Timeout waiting for other worker",
}
# We have the claim, update to running
claimer = get_claimer()
claimer.update_status(step.cache_id, worker_id, ClaimStatus.RUNNING)
try:
# Handle SOURCE nodes
if step.node_type == "SOURCE":
cid = step.config.get("cid")
if not cid:
raise ValueError(f"SOURCE step missing cid")
# Look up in cache
path = cache_mgr.get_by_cid(cid)
if not path:
raise ValueError(f"SOURCE input not found in cache: {cid[:16]}...")
output_path = str(path)
complete_task(step.cache_id, worker_id, output_path)
return {
"status": "completed",
"step_id": step.step_id,
"cache_id": step.cache_id,
"output_path": output_path,
}
# Handle _LIST virtual nodes
if step.node_type == "_LIST":
item_paths = []
for item_id in step.config.get("items", []):
item_cache_id = input_cache_ids.get(item_id)
if item_cache_id:
path = cache_mgr.get_by_cid(item_cache_id)
if path:
item_paths.append(str(path))
complete_task(step.cache_id, worker_id, json.dumps(item_paths))
return {
"status": "completed",
"step_id": step.step_id,
"cache_id": step.cache_id,
"output_path": None,
"item_paths": item_paths,
}
# Handle COMPOUND nodes (collapsed effect chains)
if step.node_type == "COMPOUND":
filter_chain = step.config.get("filter_chain", [])
if not filter_chain:
raise ValueError("COMPOUND step has empty filter_chain")
# Resolve input paths
input_paths = []
for input_step_id in step.input_steps:
input_cache_id = input_cache_ids.get(input_step_id)
if not input_cache_id:
raise ValueError(f"No cache_id for input step: {input_step_id}")
path = cache_mgr.get_by_cid(input_cache_id)
if not path:
raise ValueError(f"Input not in cache: {input_cache_id[:16]}...")
input_paths.append(Path(path))
if not input_paths:
raise ValueError("COMPOUND step has no inputs")
# Build FFmpeg filter graph from chain
import subprocess
import tempfile
filters = []
for filter_item in filter_chain:
filter_type = filter_item.get("type", "")
filter_config = filter_item.get("config", {})
if filter_type == "TRANSFORM":
effects = filter_config.get("effects", {})
for eff_name, eff_value in effects.items():
if eff_name == "saturation":
filters.append(f"eq=saturation={eff_value}")
elif eff_name == "brightness":
filters.append(f"eq=brightness={eff_value}")
elif eff_name == "contrast":
filters.append(f"eq=contrast={eff_value}")
elif eff_name == "hue":
filters.append(f"hue=h={eff_value}")
elif filter_type == "RESIZE":
width = filter_config.get("width", -1)
height = filter_config.get("height", -1)
mode = filter_config.get("mode", "fit")
if mode == "fit":
filters.append(f"scale={width}:{height}:force_original_aspect_ratio=decrease")
elif mode == "fill":
filters.append(f"scale={width}:{height}:force_original_aspect_ratio=increase,crop={width}:{height}")
else:
filters.append(f"scale={width}:{height}")
output_dir = Path(tempfile.mkdtemp())
output_path = output_dir / f"compound_{step.cache_id[:16]}.mp4"
cmd = ["ffmpeg", "-y", "-i", str(input_paths[0])]
# Handle segment timing
for filter_item in filter_chain:
if filter_item.get("type") == "SEGMENT":
seg_config = filter_item.get("config", {})
if "start" in seg_config:
cmd.extend(["-ss", str(seg_config["start"])])
if "end" in seg_config:
duration = seg_config["end"] - seg_config.get("start", 0)
cmd.extend(["-t", str(duration)])
elif "duration" in seg_config:
cmd.extend(["-t", str(seg_config["duration"])])
if filters:
cmd.extend(["-vf", ",".join(filters)])
cmd.extend(["-c:v", "libx264", "-c:a", "aac", str(output_path)])
logger.info(f"Running COMPOUND FFmpeg: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
raise RuntimeError(f"FFmpeg failed: {result.stderr}")
cached_file, ipfs_cid = cache_mgr.put(
source_path=output_path,
node_type="COMPOUND",
node_id=step.cache_id,
)
logger.info(f"COMPOUND step {step.step_id} completed with {len(filter_chain)} filters")
complete_task(step.cache_id, worker_id, str(cached_file.path))
import shutil
if output_dir.exists():
shutil.rmtree(output_dir, ignore_errors=True)
return {
"status": "completed",
"step_id": step.step_id,
"cache_id": step.cache_id,
"output_path": str(cached_file.path),
"cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
"filter_count": len(filter_chain),
}
# Get executor for this node type
try:
node_type = NodeType[step.node_type]
except KeyError:
node_type = step.node_type
executor = get_executor(node_type)
if executor is None:
raise ValueError(f"No executor for node type: {step.node_type}")
# Resolve input paths from cache
input_paths = []
for input_step_id in step.input_steps:
input_cache_id = input_cache_ids.get(input_step_id)
if not input_cache_id:
raise ValueError(f"No cache_id for input step: {input_step_id}")
path = cache_mgr.get_by_cid(input_cache_id)
if not path:
raise ValueError(f"Input not in cache: {input_cache_id[:16]}...")
input_paths.append(Path(path))
# Create temp output path
import tempfile
output_dir = Path(tempfile.mkdtemp())
output_path = output_dir / f"output_{step.cache_id[:16]}.mp4"
# Execute
logger.info(f"Running executor for {step.node_type} with {len(input_paths)} inputs")
result_path = executor.execute(step.config, input_paths, output_path)
# Store in IPFS-backed cache
cached_file, ipfs_cid = cache_mgr.put(
source_path=result_path,
node_type=step.node_type,
node_id=step.cache_id,
)
logger.info(f"Step {step.step_id} completed, IPFS CID: {ipfs_cid}")
# Mark completed
complete_task(step.cache_id, worker_id, str(cached_file.path))
# Build outputs list (for multi-output support)
outputs = []
if step.outputs:
# Use pre-defined outputs from step
for output_def in step.outputs:
outputs.append({
"name": output_def.name,
"cache_id": output_def.cache_id,
"media_type": output_def.media_type,
"index": output_def.index,
"path": str(cached_file.path),
"cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
})
else:
# Single output (backwards compat)
outputs.append({
"name": step.name or step.step_id,
"cache_id": step.cache_id,
"media_type": "video/mp4",
"index": 0,
"path": str(cached_file.path),
"cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
})
# Cleanup temp
if output_dir.exists():
import shutil
shutil.rmtree(output_dir, ignore_errors=True)
return {
"status": "completed",
"step_id": step.step_id,
"name": step.name,
"cache_id": step.cache_id,
"output_path": str(cached_file.path),
"cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
"outputs": outputs,
}
except Exception as e:
logger.error(f"Step {step.step_id} failed: {e}")
fail_task(step.cache_id, worker_id, str(e))
return {
"status": "failed",
"step_id": step.step_id,
"cache_id": step.cache_id,
"error": str(e),
}

View File

@@ -1,582 +0,0 @@
"""
S-expression step execution task.
Executes individual steps received as S-expressions.
The S-expression is the canonical format - workers verify
cache_ids by hashing the received S-expression.
"""
import json
import logging
import os
import socket
from pathlib import Path
from typing import Dict, Optional
from celery import current_task
# Import from the Celery app
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from celery_app import app
from claiming import (
get_claimer,
claim_task,
complete_task,
fail_task,
ClaimStatus,
)
from cache_manager import get_cache_manager
# Import artdag S-expression modules
try:
from artdag.sexp import parse, Symbol, Keyword
from artdag import NodeType
from artdag.executor import get_executor
except ImportError:
parse = None
Symbol = None
Keyword = None
NodeType = None
get_executor = None
logger = logging.getLogger(__name__)
def get_worker_id() -> str:
"""Get a unique identifier for this worker."""
hostname = socket.gethostname()
pid = os.getpid()
return f"{hostname}:{pid}"
def sexp_to_config(sexp) -> Dict:
"""
Convert parsed S-expression to config dict.
Input: (effect :hash "abc123" :inputs ["step1"])
Output: {"node_type": "EFFECT", "hash": "abc123", "inputs": ["step1"]}
"""
if not isinstance(sexp, list) or len(sexp) < 1:
raise ValueError(f"Invalid step S-expression: {sexp}")
# First element is the node type
head = sexp[0]
if isinstance(head, Symbol):
node_type = head.name.upper()
else:
node_type = str(head).upper()
config = {"node_type": node_type}
# Parse keyword arguments
i = 1
while i < len(sexp):
item = sexp[i]
if isinstance(item, Keyword):
if i + 1 < len(sexp):
key = item.name.replace('-', '_')
value = sexp[i + 1]
config[key] = value
i += 2
else:
i += 1
else:
# Positional argument
i += 1
return config
@app.task(bind=True, name='tasks.execute_step_sexp')
def execute_step_sexp(
self,
step_sexp: str,
step_id: str,
cache_id: str,
plan_id: str,
input_cache_ids: Dict[str, str],
) -> dict:
"""
Execute a single step from an S-expression.
The step is received as a serialized S-expression string.
Workers can verify the cache_id by hashing the S-expression.
Args:
step_sexp: Serialized S-expression for the step
step_id: Human-readable step identifier
cache_id: Expected cache_id (SHA3-256 of step_sexp)
plan_id: ID of the parent execution plan
input_cache_ids: Mapping from input step_id to their cache_id
Returns:
Dict with execution result
"""
if parse is None:
raise ImportError("artdag.sexp not available")
worker_id = get_worker_id()
task_id = self.request.id
logger.info(f"Executing step {step_id} cache_id={cache_id[:16]}...")
logger.debug(f"Step S-expression: {step_sexp[:100]}...")
# Parse the S-expression
try:
parsed = parse(step_sexp)
config = sexp_to_config(parsed)
node_type = config.pop("node_type")
except Exception as e:
logger.error(f"Failed to parse step S-expression: {e}")
return {
"status": "failed",
"step_id": step_id,
"cache_id": cache_id,
"error": f"Parse error: {e}",
}
# Get cache manager
cache_mgr = get_cache_manager()
# Check if already cached
cached_path = cache_mgr.get_by_cid(cache_id)
if cached_path:
logger.info(f"Step {step_id} already cached at {cached_path}")
claimer = get_claimer()
claimer.mark_cached(cache_id, str(cached_path))
return {
"status": "cached",
"step_id": step_id,
"cache_id": cache_id,
"output_path": str(cached_path),
}
# Try to claim the task
if not claim_task(cache_id, worker_id, task_id):
logger.info(f"Step {step_id} claimed by another worker, waiting...")
claimer = get_claimer()
result = claimer.wait_for_completion(cache_id, timeout=600)
if result and result.status == ClaimStatus.COMPLETED:
return {
"status": "completed_by_other",
"step_id": step_id,
"cache_id": cache_id,
"output_path": result.output_path,
}
elif result and result.status == ClaimStatus.CACHED:
return {
"status": "cached",
"step_id": step_id,
"cache_id": cache_id,
"output_path": result.output_path,
}
elif result and result.status == ClaimStatus.FAILED:
return {
"status": "failed",
"step_id": step_id,
"cache_id": cache_id,
"error": result.error,
}
else:
return {
"status": "timeout",
"step_id": step_id,
"cache_id": cache_id,
"error": "Timeout waiting for other worker",
}
# We have the claim, update to running
claimer = get_claimer()
claimer.update_status(cache_id, worker_id, ClaimStatus.RUNNING)
try:
# Handle SOURCE nodes
if node_type == "SOURCE":
# Support both :cid (new IPFS) and :hash (legacy)
content_id = config.get("cid") or config.get("hash")
if not content_id:
raise ValueError("SOURCE step missing :cid or :hash")
path = cache_mgr.get_by_cid(content_id)
if not path:
raise ValueError(f"SOURCE input not found: {content_id[:16]}...")
output_path = str(path)
complete_task(cache_id, worker_id, output_path)
return {
"status": "completed",
"step_id": step_id,
"cache_id": cache_id,
"output_path": output_path,
}
# Handle EFFECT nodes
if node_type == "EFFECT":
effect_hash = config.get("cid") or config.get("hash")
if not effect_hash:
raise ValueError("EFFECT step missing :cid")
# Get input paths
inputs = config.get("inputs", [])
input_paths = []
for inp in inputs:
inp_cache_id = input_cache_ids.get(inp, inp)
path = cache_mgr.get_by_cid(inp_cache_id)
if not path:
raise ValueError(f"Input not found: {inp_cache_id[:16]}...")
input_paths.append(Path(path))
# Get executor
try:
executor = get_executor(NodeType.SOURCE) # Effects use SOURCE executor for now
except:
executor = None
if executor is None:
# Fallback: copy input to output (identity-like behavior)
if input_paths:
output_path = str(input_paths[0])
complete_task(cache_id, worker_id, output_path)
return {
"status": "completed",
"step_id": step_id,
"cache_id": cache_id,
"output_path": output_path,
}
raise ValueError(f"No executor for EFFECT and no inputs")
# Handle COMPOUND nodes (collapsed effect chains)
if node_type == "COMPOUND":
filter_chain = config.get("filter_chain", [])
if not filter_chain:
raise ValueError("COMPOUND step has empty filter_chain")
# Get input paths
inputs = config.get("inputs", [])
input_paths = []
for inp in inputs:
inp_cache_id = input_cache_ids.get(inp, inp)
path = cache_mgr.get_by_cid(inp_cache_id)
if not path:
raise ValueError(f"Input not found: {inp_cache_id[:16]}...")
input_paths.append(Path(path))
if not input_paths:
raise ValueError("COMPOUND step has no inputs")
# Build FFmpeg filter graph from chain
filters = []
for i, filter_item in enumerate(filter_chain):
filter_type = filter_item.get("type", "")
filter_config = filter_item.get("config", {})
if filter_type == "EFFECT":
# Effect - for now identity-like, can be extended
effect_hash = filter_config.get("cid") or filter_config.get("hash") or filter_config.get("effect")
# TODO: resolve effect to actual FFmpeg filter
# For now, skip identity-like effects
pass
elif filter_type == "TRANSFORM":
# Transform effects map to FFmpeg filters
effects = filter_config.get("effects", {})
for eff_name, eff_value in effects.items():
if eff_name == "saturation":
filters.append(f"eq=saturation={eff_value}")
elif eff_name == "brightness":
filters.append(f"eq=brightness={eff_value}")
elif eff_name == "contrast":
filters.append(f"eq=contrast={eff_value}")
elif eff_name == "hue":
filters.append(f"hue=h={eff_value}")
elif filter_type == "RESIZE":
width = filter_config.get("width", -1)
height = filter_config.get("height", -1)
mode = filter_config.get("mode", "fit")
if mode == "fit":
filters.append(f"scale={width}:{height}:force_original_aspect_ratio=decrease")
elif mode == "fill":
filters.append(f"scale={width}:{height}:force_original_aspect_ratio=increase,crop={width}:{height}")
else:
filters.append(f"scale={width}:{height}")
elif filter_type == "SEGMENT":
# Segment handled via -ss and -t, not filter
pass
# Create temp output
import tempfile
import subprocess
output_dir = Path(tempfile.mkdtemp())
output_path = output_dir / f"compound_{cache_id[:16]}.mp4"
# Build FFmpeg command
input_path = input_paths[0]
cmd = ["ffmpeg", "-y", "-i", str(input_path)]
# Handle segment timing if present
for filter_item in filter_chain:
if filter_item.get("type") == "SEGMENT":
seg_config = filter_item.get("config", {})
if "start" in seg_config:
cmd.extend(["-ss", str(seg_config["start"])])
if "end" in seg_config:
duration = seg_config["end"] - seg_config.get("start", 0)
cmd.extend(["-t", str(duration)])
elif "duration" in seg_config:
cmd.extend(["-t", str(seg_config["duration"])])
# Add filter graph if any
if filters:
cmd.extend(["-vf", ",".join(filters)])
# Output options
cmd.extend(["-c:v", "libx264", "-c:a", "aac", str(output_path)])
logger.info(f"Running COMPOUND FFmpeg: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
raise RuntimeError(f"FFmpeg failed: {result.stderr}")
# Store in cache
cached_file, ipfs_cid = cache_mgr.put(
source_path=output_path,
node_type="COMPOUND",
node_id=cache_id,
)
logger.info(f"COMPOUND step {step_id} completed with {len(filter_chain)} filters, IPFS CID: {ipfs_cid}")
complete_task(cache_id, worker_id, str(cached_file.path))
# Cleanup temp
if output_dir.exists():
import shutil
shutil.rmtree(output_dir, ignore_errors=True)
return {
"status": "completed",
"step_id": step_id,
"cache_id": cache_id,
"output_path": str(cached_file.path),
"cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
"filter_count": len(filter_chain),
}
# Get executor for other node types
try:
node_type_enum = NodeType[node_type]
except (KeyError, TypeError):
node_type_enum = node_type
executor = get_executor(node_type_enum)
if executor is None:
raise ValueError(f"No executor for node type: {node_type}")
# Resolve input paths
inputs = config.pop("inputs", [])
input_paths = []
for inp in inputs:
inp_cache_id = input_cache_ids.get(inp, inp)
path = cache_mgr.get_by_cid(inp_cache_id)
if not path:
raise ValueError(f"Input not found: {inp_cache_id[:16]}...")
input_paths.append(Path(path))
# Create temp output
import tempfile
output_dir = Path(tempfile.mkdtemp())
output_path = output_dir / f"output_{cache_id[:16]}.mp4"
# Execute
logger.info(f"Running executor for {node_type} with {len(input_paths)} inputs")
result_path = executor.execute(config, input_paths, output_path)
# Store in cache
cached_file, ipfs_cid = cache_mgr.put(
source_path=result_path,
node_type=node_type,
node_id=cache_id,
)
logger.info(f"Step {step_id} completed, IPFS CID: {ipfs_cid}")
complete_task(cache_id, worker_id, str(cached_file.path))
# Cleanup temp
if output_dir.exists():
import shutil
shutil.rmtree(output_dir, ignore_errors=True)
return {
"status": "completed",
"step_id": step_id,
"cache_id": cache_id,
"output_path": str(cached_file.path),
"cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
}
except Exception as e:
logger.error(f"Step {step_id} failed: {e}")
fail_task(cache_id, worker_id, str(e))
return {
"status": "failed",
"step_id": step_id,
"cache_id": cache_id,
"error": str(e),
}
@app.task(bind=True, name='tasks.run_plan_sexp')
def run_plan_sexp(
self,
plan_sexp: str,
run_id: Optional[str] = None,
) -> dict:
"""
Execute a complete S-expression execution plan.
Args:
plan_sexp: Serialized S-expression plan
run_id: Optional run ID for tracking
Returns:
Dict with execution results
"""
if parse is None:
raise ImportError("artdag.sexp not available")
from artdag.sexp.scheduler import PlanScheduler
from artdag.sexp.planner import ExecutionPlanSexp, PlanStep
logger.info(f"Running plan from S-expression (run_id={run_id})")
# Parse the plan S-expression
parsed = parse(plan_sexp)
# Extract plan metadata and steps
plan_id = None
recipe_id = None
recipe_hash = None
inputs = {}
steps = []
output_step_id = None
i = 1
while i < len(parsed):
item = parsed[i]
if isinstance(item, Keyword):
key = item.name
if i + 1 < len(parsed):
value = parsed[i + 1]
if key == "id":
plan_id = value
elif key == "recipe":
recipe_id = value
elif key == "recipe-hash":
recipe_hash = value
elif key == "output":
output_step_id = value
i += 2
else:
i += 1
elif isinstance(item, list) and len(item) > 0:
head = item[0]
if isinstance(head, Symbol):
if head.name == "inputs":
# Parse inputs block
for j in range(1, len(item)):
inp = item[j]
if isinstance(inp, list) and len(inp) >= 2:
name = inp[0].name if isinstance(inp[0], Symbol) else str(inp[0])
value = inp[1]
inputs[name] = value
elif head.name == "step":
# Parse step
step_id = item[1] if len(item) > 1 else None
step_cache_id = None
step_level = 0
step_node = None
j = 2
while j < len(item):
sub = item[j]
if isinstance(sub, Keyword):
if sub.name == "cache-id" and j + 1 < len(item):
step_cache_id = item[j + 1]
j += 2
elif sub.name == "level" and j + 1 < len(item):
step_level = item[j + 1]
j += 2
else:
j += 1
elif isinstance(sub, list):
step_node = sub
j += 1
else:
j += 1
if step_id and step_cache_id and step_node:
# Convert step_node to config
config = sexp_to_config(step_node)
node_type = config.pop("node_type")
step_inputs = config.pop("inputs", [])
steps.append(PlanStep(
step_id=step_id,
node_type=node_type,
config=config,
inputs=step_inputs if isinstance(step_inputs, list) else [],
cache_id=step_cache_id,
level=step_level,
))
i += 1
else:
i += 1
# Create plan object
plan = ExecutionPlanSexp(
plan_id=plan_id or "unknown",
recipe_id=recipe_id or "unknown",
recipe_hash=recipe_hash or "",
steps=steps,
output_step_id=output_step_id or (steps[-1].step_id if steps else ""),
inputs=inputs,
)
# Create scheduler and run
cache_mgr = get_cache_manager()
scheduler = PlanScheduler(
cache_manager=cache_mgr,
celery_app=app,
execute_task_name='tasks.execute_step_sexp',
)
result = scheduler.schedule(plan)
return {
"status": result.status,
"run_id": run_id,
"plan_id": result.plan_id,
"output_cache_id": result.output_cache_id,
"output_path": result.output_path,
"output_ipfs_cid": result.output_ipfs_cid,
"steps_completed": result.steps_completed,
"steps_cached": result.steps_cached,
"steps_failed": result.steps_failed,
"error": result.error,
}

View File

@@ -1,514 +0,0 @@
"""
Plan orchestration tasks.
Coordinates the full 3-phase execution:
1. Analyze inputs
2. Generate plan
3. Execute steps level by level
Uses IPFS-backed cache for durability.
"""
import json
import logging
import os
from pathlib import Path
from typing import Dict, List, Optional
from celery import current_task, group, chain
# Import from the Celery app
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from celery_app import app
from claiming import get_claimer
from cache_manager import get_cache_manager
# Import artdag modules
try:
from artdag import Cache
from artdag.analysis import Analyzer, AnalysisResult
from artdag.planning import RecipePlanner, ExecutionPlan, Recipe
except ImportError:
Cache = None
Analyzer = None
AnalysisResult = None
RecipePlanner = None
ExecutionPlan = None
Recipe = None
from .execute import execute_step
logger = logging.getLogger(__name__)
# Cache directories
CACHE_DIR = Path(os.environ.get('CACHE_DIR', str(Path.home() / ".artdag" / "cache")))
ANALYSIS_CACHE_DIR = CACHE_DIR / 'analysis'
PLAN_CACHE_DIR = CACHE_DIR / 'plans'
@app.task(bind=True, name='tasks.run_plan')
def run_plan(
self,
plan_json: str,
run_id: Optional[str] = None,
) -> dict:
"""
Execute a complete execution plan.
Runs steps level by level, with parallel execution within each level.
Results are stored in IPFS-backed cache.
Args:
plan_json: JSON-serialized ExecutionPlan
run_id: Optional run ID for tracking
Returns:
Dict with execution results
"""
if ExecutionPlan is None:
raise ImportError("artdag.planning not available")
plan = ExecutionPlan.from_json(plan_json)
cache_mgr = get_cache_manager()
logger.info(f"Executing plan {plan.plan_id[:16]}... ({len(plan.steps)} steps)")
# Build initial cache_ids mapping (step_id -> cache_id)
cache_ids = {}
for step in plan.steps:
cache_ids[step.step_id] = step.cache_id
# Also map input hashes
for name, cid in plan.input_hashes.items():
cache_ids[name] = cid
# Group steps by level
steps_by_level = plan.get_steps_by_level()
max_level = max(steps_by_level.keys()) if steps_by_level else 0
results_by_step = {}
total_cached = 0
total_executed = 0
for level in range(max_level + 1):
level_steps = steps_by_level.get(level, [])
if not level_steps:
continue
logger.info(f"Executing level {level}: {len(level_steps)} steps")
# Check which steps need execution
steps_to_run = []
for step in level_steps:
# Check if cached
cached_path = cache_mgr.get_by_cid(step.cache_id)
if cached_path:
results_by_step[step.step_id] = {
"status": "cached",
"cache_id": step.cache_id,
"output_path": str(cached_path),
}
total_cached += 1
else:
steps_to_run.append(step)
if not steps_to_run:
logger.info(f"Level {level}: all steps cached")
continue
# Build input cache_ids for this level
level_cache_ids = dict(cache_ids)
# Execute steps in parallel
tasks = [
execute_step.s(step.to_json(), plan.plan_id, level_cache_ids)
for step in steps_to_run
]
job = group(tasks)
async_results = job.apply_async()
# Wait for completion
try:
step_results = async_results.get(timeout=3600)
except Exception as e:
logger.error(f"Level {level} execution failed: {e}")
return {
"status": "failed",
"error": str(e),
"level": level,
"results": results_by_step,
"run_id": run_id,
}
# Process results
for result in step_results:
step_id = result.get("step_id")
cache_id = result.get("cache_id")
results_by_step[step_id] = result
cache_ids[step_id] = cache_id
if result.get("status") in ("completed", "cached", "completed_by_other"):
total_executed += 1
elif result.get("status") == "failed":
logger.error(f"Step {step_id} failed: {result.get('error')}")
return {
"status": "failed",
"error": f"Step {step_id} failed: {result.get('error')}",
"level": level,
"results": results_by_step,
"run_id": run_id,
}
# Get final output
output_step = plan.get_step(plan.output_step)
output_cache_id = output_step.cache_id if output_step else None
output_path = None
output_ipfs_cid = None
output_name = plan.output_name
if output_cache_id:
output_path = cache_mgr.get_by_cid(output_cache_id)
output_ipfs_cid = cache_mgr.get_ipfs_cid(output_cache_id)
# Build list of all outputs with their names and artifacts
all_outputs = []
for step in plan.steps:
step_result = results_by_step.get(step.step_id, {})
step_outputs = step_result.get("outputs", [])
# If no outputs in result, build from step definition
if not step_outputs and step.outputs:
for output_def in step.outputs:
output_cache_path = cache_mgr.get_by_cid(output_def.cache_id)
output_ipfs = cache_mgr.get_ipfs_cid(output_def.cache_id) if output_cache_path else None
all_outputs.append({
"name": output_def.name,
"step_id": step.step_id,
"step_name": step.name,
"cache_id": output_def.cache_id,
"media_type": output_def.media_type,
"path": str(output_cache_path) if output_cache_path else None,
"ipfs_cid": output_ipfs,
"status": "cached" if output_cache_path else "missing",
})
else:
for output in step_outputs:
all_outputs.append({
**output,
"step_id": step.step_id,
"step_name": step.name,
"status": "completed",
})
return {
"status": "completed",
"run_id": run_id,
"plan_id": plan.plan_id,
"plan_name": plan.name,
"recipe_name": plan.recipe_name,
"output_name": output_name,
"output_cache_id": output_cache_id,
"output_path": str(output_path) if output_path else None,
"output_ipfs_cid": output_ipfs_cid,
"total_steps": len(plan.steps),
"cached": total_cached,
"executed": total_executed,
"results": results_by_step,
"outputs": all_outputs,
}
def _extract_analysis_from_recipe(compiled_recipe) -> List[Dict]:
"""
Extract analysis nodes from a compiled recipe.
Finds all (analyze ...) nodes and returns their configurations.
Analysis nodes are identified by type "ANALYZE" or by having
an "analyze" config key.
"""
analysis_nodes = []
nodes = compiled_recipe.nodes
if isinstance(nodes, dict):
nodes = list(nodes.values())
for node in nodes:
node_type = node.get("type", "").upper()
config = node.get("config", {})
# Check if this is an analysis node
if node_type == "ANALYZE" or config.get("analyze"):
analysis_nodes.append({
"node_id": node.get("id"),
"input_ref": config.get("input") or config.get("source"),
"feature": config.get("feature") or config.get("analyze"),
"config": config,
})
return analysis_nodes
@app.task(bind=True, name='tasks.run_recipe')
def run_recipe(
self,
recipe_sexp: str,
input_hashes: Dict[str, str],
run_id: Optional[str] = None,
) -> dict:
"""
Run a complete recipe through all phases.
The recipe S-expression declares what analysis is needed.
Analysis nodes in the recipe are executed first, then their
outputs are used to generate the execution plan.
1. Parse: Compile recipe S-expression
2. Analyze: Run analysis nodes from recipe
3. Plan: Generate execution plan using analysis results
4. Execute: Run the plan
Args:
recipe_sexp: Recipe S-expression content
input_hashes: Mapping from input name to content hash
run_id: Optional run ID for tracking
Returns:
Dict with final results
"""
# Import S-expression compiler
try:
from artdag.sexp import compile_string
except ImportError:
raise ImportError("artdag.sexp not available")
if Analyzer is None:
raise ImportError("artdag.analysis not available")
cache_mgr = get_cache_manager()
logger.info(f"Running recipe with {len(input_hashes)} inputs")
# Phase 1: Parse recipe
logger.info("Phase 1: Parsing recipe S-expression...")
try:
compiled = compile_string(recipe_sexp)
except Exception as e:
return {"status": "failed", "error": f"Recipe parse error: {e}"}
logger.info(f"Parsed recipe: {compiled.name}")
# Phase 2: Run analysis nodes from recipe
logger.info("Phase 2: Running analysis from recipe...")
analysis_nodes = _extract_analysis_from_recipe(compiled)
logger.info(f"Found {len(analysis_nodes)} analysis nodes in recipe")
ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True)
analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR)
analysis_results = {}
for analysis_node in analysis_nodes:
input_ref = analysis_node["input_ref"]
feature = analysis_node["feature"]
node_id = analysis_node["node_id"]
# Resolve input reference to content hash
cid = input_hashes.get(input_ref)
if not cid:
logger.warning(f"Analysis node {node_id}: input '{input_ref}' not in input_hashes")
continue
path = cache_mgr.get_by_cid(cid)
if not path:
logger.warning(f"Analysis node {node_id}: content {cid[:16]}... not in cache")
continue
try:
# Run analysis for the specific feature
features = [feature] if feature else ["beats", "energy"]
result = analyzer.analyze(
input_hash=cid,
features=features,
input_path=Path(path),
)
# Store result keyed by node_id so plan can reference it
analysis_results[node_id] = result
# Also store by cid for compatibility
analysis_results[cid] = result
logger.info(f"Analysis {node_id}: feature={feature}, tempo={result.tempo}")
except Exception as e:
logger.warning(f"Analysis failed for {node_id}: {e}")
logger.info(f"Completed {len(analysis_results)} analysis results")
# Phase 3: Generate plan
logger.info("Phase 3: Generating execution plan...")
# Use the S-expression planner if available
try:
from artdag.sexp.planner import create_plan
plan = create_plan(compiled, inputs=input_hashes)
except ImportError:
# Fall back to legacy planner
if RecipePlanner is None:
raise ImportError("No planner available")
recipe = Recipe.from_dict(compiled.to_dict())
planner = RecipePlanner(use_tree_reduction=True)
plan = planner.plan(
recipe=recipe,
input_hashes=input_hashes,
analysis=analysis_results,
)
logger.info(f"Generated plan with {len(plan.steps)} steps")
# Save plan as S-expression through cache manager (goes to IPFS)
import tempfile
plan_content = plan.to_sexp_string() if hasattr(plan, 'to_sexp_string') else plan.to_json()
plan_suffix = ".sexp" if hasattr(plan, 'to_sexp_string') else ".json"
with tempfile.NamedTemporaryFile(delete=False, suffix=plan_suffix, mode="w") as tmp:
tmp.write(plan_content)
tmp_path = Path(tmp.name)
# Store in cache (content-addressed, auto-pins to IPFS)
# Plan is just another node output - no special treatment needed
cached, plan_ipfs_cid = cache_mgr.put(tmp_path, node_type="plan", move=True)
plan_cache_id = plan_ipfs_cid or cached.cid # Prefer IPFS CID
logger.info(f"Plan cached: cid={plan_cache_id}, ipfs={plan_ipfs_cid}")
# Phase 4: Execute
logger.info("Phase 4: Executing plan...")
result = run_plan(plan.to_json(), run_id=run_id)
return {
"status": result.get("status"),
"run_id": run_id,
"recipe": compiled.name,
"plan_id": plan.plan_id,
"plan_cache_id": plan_cache_id,
"plan_ipfs_cid": plan_ipfs_cid,
"output_path": result.get("output_path"),
"output_cache_id": result.get("output_cache_id"),
"output_ipfs_cid": result.get("output_ipfs_cid"),
"analysis_count": len(analysis_results),
"total_steps": len(plan.steps),
"cached": result.get("cached", 0),
"executed": result.get("executed", 0),
"error": result.get("error"),
}
@app.task(bind=True, name='tasks.generate_plan')
def generate_plan(
self,
recipe_sexp: str,
input_hashes: Dict[str, str],
) -> dict:
"""
Generate an execution plan without executing it.
Useful for:
- Previewing what will be executed
- Checking cache status
- Debugging recipe issues
Args:
recipe_sexp: Recipe S-expression content
input_hashes: Mapping from input name to content hash
Returns:
Dict with plan details
"""
try:
from artdag.sexp import compile_string
except ImportError:
raise ImportError("artdag.sexp not available")
if Analyzer is None:
raise ImportError("artdag.analysis not available")
cache_mgr = get_cache_manager()
# Parse recipe
try:
compiled = compile_string(recipe_sexp)
except Exception as e:
return {"status": "failed", "error": f"Recipe parse error: {e}"}
# Extract and run analysis nodes from recipe
analysis_nodes = _extract_analysis_from_recipe(compiled)
ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True)
analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR)
analysis_results = {}
for analysis_node in analysis_nodes:
input_ref = analysis_node["input_ref"]
feature = analysis_node["feature"]
node_id = analysis_node["node_id"]
cid = input_hashes.get(input_ref)
if not cid:
continue
path = cache_mgr.get_by_cid(cid)
if path:
try:
features = [feature] if feature else ["beats", "energy"]
result = analyzer.analyze(
input_hash=cid,
features=features,
input_path=Path(path),
)
analysis_results[node_id] = result
analysis_results[cid] = result
except Exception as e:
logger.warning(f"Analysis failed for {node_id}: {e}")
# Generate plan
try:
from artdag.sexp.planner import create_plan
plan = create_plan(compiled, inputs=input_hashes)
except ImportError:
if RecipePlanner is None:
raise ImportError("No planner available")
recipe = Recipe.from_dict(compiled.to_dict())
planner = RecipePlanner(use_tree_reduction=True)
plan = planner.plan(
recipe=recipe,
input_hashes=input_hashes,
analysis=analysis_results,
)
# Check cache status for each step
steps_status = []
for step in plan.steps:
cached = cache_mgr.has_content(step.cache_id)
steps_status.append({
"step_id": step.step_id,
"node_type": step.node_type,
"cache_id": step.cache_id,
"level": step.level,
"cached": cached,
})
cached_count = sum(1 for s in steps_status if s["cached"])
return {
"status": "planned",
"recipe": compiled.name,
"plan_id": plan.plan_id,
"total_steps": len(plan.steps),
"cached_steps": cached_count,
"pending_steps": len(plan.steps) - cached_count,
"steps": steps_status,
"plan_json": plan.to_json(),
}

303
tasks/streaming.py Normal file
View File

@@ -0,0 +1,303 @@
"""
Streaming video rendering task.
Executes S-expression recipes for frame-by-frame video processing.
Supports CID and friendly name references for assets.
"""
import hashlib
import logging
import os
import sys
import tempfile
from pathlib import Path
from typing import Dict, Optional
from celery import current_task
# Add parent directory to path for imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from celery_app import app
from cache_manager import get_cache_manager
logger = logging.getLogger(__name__)
def resolve_asset(ref: str, actor_id: Optional[str] = None) -> Optional[Path]:
"""
Resolve an asset reference (CID or friendly name) to a file path.
Args:
ref: CID or friendly name (e.g., "my-video" or "QmXyz...")
actor_id: User ID for friendly name resolution
Returns:
Path to the asset file, or None if not found
"""
cache_mgr = get_cache_manager()
# Try as direct CID first
path = cache_mgr.get_by_cid(ref)
if path and path.exists():
logger.info(f"Resolved {ref[:16]}... as CID to {path}")
return path
# Try as friendly name if actor_id provided
if actor_id:
import asyncio
from database import resolve_friendly_name
try:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
cid = loop.run_until_complete(resolve_friendly_name(actor_id, ref))
finally:
loop.close()
if cid:
path = cache_mgr.get_by_cid(cid)
if path and path.exists():
logger.info(f"Resolved '{ref}' via friendly name to {path}")
return path
except Exception as e:
logger.warning(f"Failed to resolve friendly name '{ref}': {e}")
logger.warning(f"Could not resolve asset reference: {ref}")
return None
class CIDVideoSource:
"""
Video source that resolves CIDs to file paths.
Wraps the streaming VideoSource to work with cached assets.
"""
def __init__(self, cid: str, fps: float = 30, actor_id: Optional[str] = None):
self.cid = cid
self.fps = fps
self.actor_id = actor_id
self._source = None
def _ensure_source(self):
if self._source is None:
path = resolve_asset(self.cid, self.actor_id)
if not path:
raise ValueError(f"Could not resolve video source: {self.cid}")
from streaming.stream_sexp_generic import VideoSource
# Import from primitives where VideoSource is defined
from sexp_effects.primitive_libs.streaming import VideoSource
self._source = VideoSource(str(path), self.fps)
def read_at(self, t: float):
self._ensure_source()
return self._source.read_at(t)
def read(self):
self._ensure_source()
return self._source.read()
@property
def size(self):
self._ensure_source()
return self._source.size
@property
def duration(self):
self._ensure_source()
return self._source._duration
def close(self):
if self._source:
self._source.close()
class CIDAudioAnalyzer:
"""
Audio analyzer that resolves CIDs to file paths.
"""
def __init__(self, cid: str, actor_id: Optional[str] = None):
self.cid = cid
self.actor_id = actor_id
self._analyzer = None
def _ensure_analyzer(self):
if self._analyzer is None:
path = resolve_asset(self.cid, self.actor_id)
if not path:
raise ValueError(f"Could not resolve audio source: {self.cid}")
from sexp_effects.primitive_libs.streaming import AudioAnalyzer
self._analyzer = AudioAnalyzer(str(path))
def get_energy(self, t: float) -> float:
self._ensure_analyzer()
return self._analyzer.get_energy(t)
def get_beat(self, t: float) -> bool:
self._ensure_analyzer()
return self._analyzer.get_beat(t)
def get_beat_count(self, t: float) -> int:
self._ensure_analyzer()
return self._analyzer.get_beat_count(t)
@property
def duration(self):
self._ensure_analyzer()
return self._analyzer.duration
def create_cid_primitives(actor_id: Optional[str] = None):
"""
Create CID-aware primitive functions.
Returns dict of primitives that resolve CIDs before creating sources.
"""
def prim_make_video_source_cid(cid: str, fps: float = 30):
return CIDVideoSource(cid, fps, actor_id)
def prim_make_audio_analyzer_cid(cid: str):
return CIDAudioAnalyzer(cid, actor_id)
return {
'streaming:make-video-source': prim_make_video_source_cid,
'streaming:make-audio-analyzer': prim_make_audio_analyzer_cid,
}
@app.task(bind=True, name='tasks.run_stream')
def run_stream(
self,
recipe_sexp: str,
output_name: str = "output.mp4",
duration: Optional[float] = None,
fps: Optional[float] = None,
actor_id: Optional[str] = None,
sources_sexp: Optional[str] = None,
audio_sexp: Optional[str] = None,
) -> dict:
"""
Execute a streaming S-expression recipe.
Args:
recipe_sexp: The recipe S-expression content
output_name: Name for the output file
duration: Optional duration override (seconds)
fps: Optional FPS override
actor_id: User ID for friendly name resolution
sources_sexp: Optional sources config S-expression
audio_sexp: Optional audio config S-expression
Returns:
Dict with output_cid, output_path, and status
"""
task_id = self.request.id
logger.info(f"Starting stream task {task_id}")
self.update_state(state='INITIALIZING', meta={'progress': 0})
# Get the app directory for primitive/effect paths
app_dir = Path(__file__).parent.parent # celery/
sexp_effects_dir = app_dir / "sexp_effects"
effects_dir = app_dir / "effects"
templates_dir = app_dir / "templates"
# Create temp directory for work
work_dir = Path(tempfile.mkdtemp(prefix="stream_"))
recipe_path = work_dir / "recipe.sexp"
output_path = work_dir / output_name
# Create symlinks to effect directories so relative paths work
(work_dir / "sexp_effects").symlink_to(sexp_effects_dir)
(work_dir / "effects").symlink_to(effects_dir)
(work_dir / "templates").symlink_to(templates_dir)
try:
# Write recipe to temp file
recipe_path.write_text(recipe_sexp)
# Write optional config files
sources_path = None
if sources_sexp:
sources_path = work_dir / "sources.sexp"
sources_path.write_text(sources_sexp)
audio_path = None
if audio_sexp:
audio_path = work_dir / "audio.sexp"
audio_path.write_text(audio_sexp)
self.update_state(state='RENDERING', meta={'progress': 5})
# Import the streaming interpreter
from streaming.stream_sexp_generic import StreamInterpreter
# Create interpreter
interp = StreamInterpreter(str(recipe_path))
# Set primitive library directory explicitly
interp.primitive_lib_dir = sexp_effects_dir / "primitive_libs"
if fps:
interp.config['fps'] = fps
if sources_path:
interp.sources_config = sources_path
if audio_path:
interp.audio_config = audio_path
# Override primitives with CID-aware versions
cid_prims = create_cid_primitives(actor_id)
interp.primitives.update(cid_prims)
# Run rendering to file
logger.info(f"Rendering to {output_path}")
interp.run(duration=duration, output=str(output_path))
self.update_state(state='CACHING', meta={'progress': 90})
# Store output in cache
if output_path.exists():
cache_mgr = get_cache_manager()
cached_file, ipfs_cid = cache_mgr.put(
source_path=output_path,
node_type="STREAM_OUTPUT",
node_id=f"stream_{task_id}",
)
logger.info(f"Stream output cached: CID={cached_file.cid}, IPFS={ipfs_cid}")
return {
"status": "completed",
"task_id": task_id,
"output_cid": cached_file.cid,
"ipfs_cid": ipfs_cid,
"output_path": str(cached_file.path),
}
else:
return {
"status": "failed",
"task_id": task_id,
"error": "Output file not created",
}
except Exception as e:
logger.error(f"Stream task {task_id} failed: {e}")
import traceback
traceback.print_exc()
return {
"status": "failed",
"task_id": task_id,
"error": str(e),
}
finally:
# Cleanup temp directory
import shutil
if work_dir.exists():
shutil.rmtree(work_dir, ignore_errors=True)

View File

@@ -0,0 +1,25 @@
;; Crossfade with Zoom Transition
;;
;; Macro for transitioning between two frames with a zoom effect.
;; Active frame zooms out while next frame zooms in.
;;
;; Required context:
;; - zoom effect must be loaded
;; - blend effect must be loaded
;;
;; Parameters:
;; active-frame: current frame
;; next-frame: frame to transition to
;; fade-amt: transition progress (0 = all active, 1 = all next)
;;
;; Usage:
;; (include :path "../templates/crossfade-zoom.sexp")
;; ...
;; (crossfade-zoom active-frame next-frame 0.5)
(defmacro crossfade-zoom (active-frame next-frame fade-amt)
(let [active-zoom (+ 1.0 fade-amt)
active-zoomed (zoom active-frame :amount active-zoom)
next-zoom (+ 0.1 (* fade-amt 0.9))
next-zoomed (zoom next-frame :amount next-zoom)]
(blend active-zoomed next-zoomed :opacity fade-amt)))

Some files were not shown because too many files have changed in this diff Show More