From bb458aa924dacf5f07f50c4abefe3cb184dafd98 Mon Sep 17 00:00:00 2001 From: giles Date: Mon, 2 Feb 2026 19:10:11 +0000 Subject: [PATCH] Replace batch DAG system with streaming architecture - Remove legacy_tasks.py, hybrid_state.py, render.py - Remove old task modules (analyze, execute, execute_sexp, orchestrate) - Add streaming interpreter from test repo - Add sexp_effects with primitives and video effects - Add streaming Celery task with CID-based asset resolution - Support both CID and friendly name references for assets - Add .dockerignore to prevent local clones from conflicting Co-Authored-By: Claude Opus 4.5 --- .dockerignore | 22 + app/templates/runs/artifacts.html | 62 + app/templates/runs/plan.html | 99 + celery_app.py | 6 +- configs/audio-dizzy.sexp | 17 + configs/audio-halleluwah.sexp | 17 + configs/sources-default.sexp | 38 + configs/sources-woods-half.sexp | 19 + configs/sources-woods.sexp | 39 + effects/quick_test_explicit.sexp | 150 ++ hybrid_state.py | 294 --- legacy_tasks.py | 1219 --------- render.py | 65 - sexp_effects/__init__.py | 32 + sexp_effects/effects/ascii_art.sexp | 17 + sexp_effects/effects/ascii_art_fx.sexp | 52 + sexp_effects/effects/ascii_fx_zone.sexp | 102 + sexp_effects/effects/ascii_zones.sexp | 30 + sexp_effects/effects/blend.sexp | 31 + sexp_effects/effects/blend_multi.sexp | 58 + sexp_effects/effects/bloom.sexp | 16 + sexp_effects/effects/blur.sexp | 8 + sexp_effects/effects/brightness.sexp | 9 + sexp_effects/effects/color-adjust.sexp | 13 + sexp_effects/effects/color_cycle.sexp | 13 + sexp_effects/effects/contrast.sexp | 9 + sexp_effects/effects/crt.sexp | 30 + sexp_effects/effects/datamosh.sexp | 14 + sexp_effects/effects/echo.sexp | 19 + sexp_effects/effects/edge_detect.sexp | 9 + sexp_effects/effects/emboss.sexp | 13 + sexp_effects/effects/film_grain.sexp | 19 + sexp_effects/effects/fisheye.sexp | 16 + sexp_effects/effects/flip.sexp | 16 + sexp_effects/effects/grayscale.sexp | 7 + sexp_effects/effects/hue_shift.sexp | 12 + sexp_effects/effects/invert.sexp | 9 + sexp_effects/effects/kaleidoscope.sexp | 20 + sexp_effects/effects/layer.sexp | 36 + sexp_effects/effects/mirror.sexp | 33 + sexp_effects/effects/neon_glow.sexp | 23 + sexp_effects/effects/noise.sexp | 8 + sexp_effects/effects/outline.sexp | 24 + sexp_effects/effects/pixelate.sexp | 13 + sexp_effects/effects/pixelsort.sexp | 11 + sexp_effects/effects/posterize.sexp | 8 + sexp_effects/effects/resize-frame.sexp | 11 + sexp_effects/effects/rgb_split.sexp | 13 + sexp_effects/effects/ripple.sexp | 19 + sexp_effects/effects/rotate.sexp | 11 + sexp_effects/effects/saturation.sexp | 9 + sexp_effects/effects/scanlines.sexp | 15 + sexp_effects/effects/sepia.sexp | 7 + sexp_effects/effects/sharpen.sexp | 8 + sexp_effects/effects/strobe.sexp | 16 + sexp_effects/effects/swirl.sexp | 17 + sexp_effects/effects/threshold.sexp | 9 + sexp_effects/effects/tile_grid.sexp | 29 + sexp_effects/effects/trails.sexp | 20 + sexp_effects/effects/vignette.sexp | 23 + sexp_effects/effects/wave.sexp | 22 + sexp_effects/effects/zoom.sexp | 8 + sexp_effects/interpreter.py | 1016 ++++++++ sexp_effects/parser.py | 168 ++ sexp_effects/primitive_libs/__init__.py | 102 + sexp_effects/primitive_libs/arrays.py | 196 ++ sexp_effects/primitive_libs/ascii.py | 388 +++ sexp_effects/primitive_libs/blending.py | 116 + sexp_effects/primitive_libs/color.py | 137 + sexp_effects/primitive_libs/color_ops.py | 90 + sexp_effects/primitive_libs/core.py | 271 ++ sexp_effects/primitive_libs/drawing.py | 136 + sexp_effects/primitive_libs/filters.py | 119 + sexp_effects/primitive_libs/geometry.py | 143 + sexp_effects/primitive_libs/image.py | 144 + sexp_effects/primitive_libs/math.py | 164 ++ sexp_effects/primitive_libs/streaming.py | 304 +++ sexp_effects/primitives.py | 3043 ++++++++++++++++++++++ sexp_effects/test_interpreter.py | 236 ++ streaming/__init__.py | 44 + streaming/audio.py | 486 ++++ streaming/backends.py | 308 +++ streaming/compositor.py | 595 +++++ streaming/demo.py | 125 + streaming/output.py | 369 +++ streaming/pipeline.py | 846 ++++++ streaming/recipe_adapter.py | 470 ++++ streaming/recipe_executor.py | 415 +++ streaming/sexp_executor.py | 678 +++++ streaming/sexp_interp.py | 376 +++ streaming/sources.py | 281 ++ streaming/stream_sexp.py | 1081 ++++++++ streaming/stream_sexp_generic.py | 863 ++++++ tasks/__init__.py | 26 +- tasks/analyze.py | 132 - tasks/execute.py | 381 --- tasks/execute_sexp.py | 582 ----- tasks/orchestrate.py | 514 ---- tasks/streaming.py | 303 +++ templates/crossfade-zoom.sexp | 25 + templates/cycle-crossfade.sexp | 65 + templates/process-pair.sexp | 112 + templates/scan-oscillating-spin.sexp | 28 + templates/scan-ripple-drops.sexp | 41 + templates/standard-effects.sexp | 22 + templates/standard-primitives.sexp | 14 + templates/stream-process-pair.sexp | 72 + 107 files changed, 15830 insertions(+), 3211 deletions(-) create mode 100644 .dockerignore create mode 100644 app/templates/runs/artifacts.html create mode 100644 app/templates/runs/plan.html create mode 100644 configs/audio-dizzy.sexp create mode 100644 configs/audio-halleluwah.sexp create mode 100644 configs/sources-default.sexp create mode 100644 configs/sources-woods-half.sexp create mode 100644 configs/sources-woods.sexp create mode 100644 effects/quick_test_explicit.sexp delete mode 100644 hybrid_state.py delete mode 100644 legacy_tasks.py delete mode 100755 render.py create mode 100644 sexp_effects/__init__.py create mode 100644 sexp_effects/effects/ascii_art.sexp create mode 100644 sexp_effects/effects/ascii_art_fx.sexp create mode 100644 sexp_effects/effects/ascii_fx_zone.sexp create mode 100644 sexp_effects/effects/ascii_zones.sexp create mode 100644 sexp_effects/effects/blend.sexp create mode 100644 sexp_effects/effects/blend_multi.sexp create mode 100644 sexp_effects/effects/bloom.sexp create mode 100644 sexp_effects/effects/blur.sexp create mode 100644 sexp_effects/effects/brightness.sexp create mode 100644 sexp_effects/effects/color-adjust.sexp create mode 100644 sexp_effects/effects/color_cycle.sexp create mode 100644 sexp_effects/effects/contrast.sexp create mode 100644 sexp_effects/effects/crt.sexp create mode 100644 sexp_effects/effects/datamosh.sexp create mode 100644 sexp_effects/effects/echo.sexp create mode 100644 sexp_effects/effects/edge_detect.sexp create mode 100644 sexp_effects/effects/emboss.sexp create mode 100644 sexp_effects/effects/film_grain.sexp create mode 100644 sexp_effects/effects/fisheye.sexp create mode 100644 sexp_effects/effects/flip.sexp create mode 100644 sexp_effects/effects/grayscale.sexp create mode 100644 sexp_effects/effects/hue_shift.sexp create mode 100644 sexp_effects/effects/invert.sexp create mode 100644 sexp_effects/effects/kaleidoscope.sexp create mode 100644 sexp_effects/effects/layer.sexp create mode 100644 sexp_effects/effects/mirror.sexp create mode 100644 sexp_effects/effects/neon_glow.sexp create mode 100644 sexp_effects/effects/noise.sexp create mode 100644 sexp_effects/effects/outline.sexp create mode 100644 sexp_effects/effects/pixelate.sexp create mode 100644 sexp_effects/effects/pixelsort.sexp create mode 100644 sexp_effects/effects/posterize.sexp create mode 100644 sexp_effects/effects/resize-frame.sexp create mode 100644 sexp_effects/effects/rgb_split.sexp create mode 100644 sexp_effects/effects/ripple.sexp create mode 100644 sexp_effects/effects/rotate.sexp create mode 100644 sexp_effects/effects/saturation.sexp create mode 100644 sexp_effects/effects/scanlines.sexp create mode 100644 sexp_effects/effects/sepia.sexp create mode 100644 sexp_effects/effects/sharpen.sexp create mode 100644 sexp_effects/effects/strobe.sexp create mode 100644 sexp_effects/effects/swirl.sexp create mode 100644 sexp_effects/effects/threshold.sexp create mode 100644 sexp_effects/effects/tile_grid.sexp create mode 100644 sexp_effects/effects/trails.sexp create mode 100644 sexp_effects/effects/vignette.sexp create mode 100644 sexp_effects/effects/wave.sexp create mode 100644 sexp_effects/effects/zoom.sexp create mode 100644 sexp_effects/interpreter.py create mode 100644 sexp_effects/parser.py create mode 100644 sexp_effects/primitive_libs/__init__.py create mode 100644 sexp_effects/primitive_libs/arrays.py create mode 100644 sexp_effects/primitive_libs/ascii.py create mode 100644 sexp_effects/primitive_libs/blending.py create mode 100644 sexp_effects/primitive_libs/color.py create mode 100644 sexp_effects/primitive_libs/color_ops.py create mode 100644 sexp_effects/primitive_libs/core.py create mode 100644 sexp_effects/primitive_libs/drawing.py create mode 100644 sexp_effects/primitive_libs/filters.py create mode 100644 sexp_effects/primitive_libs/geometry.py create mode 100644 sexp_effects/primitive_libs/image.py create mode 100644 sexp_effects/primitive_libs/math.py create mode 100644 sexp_effects/primitive_libs/streaming.py create mode 100644 sexp_effects/primitives.py create mode 100644 sexp_effects/test_interpreter.py create mode 100644 streaming/__init__.py create mode 100644 streaming/audio.py create mode 100644 streaming/backends.py create mode 100644 streaming/compositor.py create mode 100644 streaming/demo.py create mode 100644 streaming/output.py create mode 100644 streaming/pipeline.py create mode 100644 streaming/recipe_adapter.py create mode 100644 streaming/recipe_executor.py create mode 100644 streaming/sexp_executor.py create mode 100644 streaming/sexp_interp.py create mode 100644 streaming/sources.py create mode 100644 streaming/stream_sexp.py create mode 100644 streaming/stream_sexp_generic.py delete mode 100644 tasks/analyze.py delete mode 100644 tasks/execute.py delete mode 100644 tasks/execute_sexp.py delete mode 100644 tasks/orchestrate.py create mode 100644 tasks/streaming.py create mode 100644 templates/crossfade-zoom.sexp create mode 100644 templates/cycle-crossfade.sexp create mode 100644 templates/process-pair.sexp create mode 100644 templates/scan-oscillating-spin.sexp create mode 100644 templates/scan-ripple-drops.sexp create mode 100644 templates/standard-effects.sexp create mode 100644 templates/standard-primitives.sexp create mode 100644 templates/stream-process-pair.sexp diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..f48a442 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +# Don't copy local clones - Dockerfile will clone fresh +artdag-effects/ + +# Python cache +__pycache__/ +*.py[cod] +*.egg-info/ +.pytest_cache/ + +# Virtual environments +.venv/ +venv/ + +# Local env +.env + +# Git +.git/ + +# IDE +.vscode/ +.idea/ diff --git a/app/templates/runs/artifacts.html b/app/templates/runs/artifacts.html new file mode 100644 index 0000000..874188c --- /dev/null +++ b/app/templates/runs/artifacts.html @@ -0,0 +1,62 @@ +{% extends "base.html" %} + +{% block title %}Run Artifacts{% endblock %} + +{% block content %} + + +

Run Artifacts

+ +{% if artifacts %} +
+ {% for artifact in artifacts %} +
+
+ + {{ artifact.role }} + + {{ artifact.step_name }} +
+ +
+

Content Hash

+

{{ artifact.hash }}

+
+ +
+ + {% if artifact.media_type == 'video' %}Video + {% elif artifact.media_type == 'image' %}Image + {% elif artifact.media_type == 'audio' %}Audio + {% else %}File{% endif %} + + {{ (artifact.size_bytes / 1024)|round(1) }} KB +
+ + +
+ {% endfor %} +
+{% else %} +
+

No artifacts found for this run.

+
+{% endif %} +{% endblock %} diff --git a/app/templates/runs/plan.html b/app/templates/runs/plan.html new file mode 100644 index 0000000..f50090d --- /dev/null +++ b/app/templates/runs/plan.html @@ -0,0 +1,99 @@ +{% extends "base.html" %} + +{% block title %}Run Plan - {{ run_id[:16] }}{% endblock %} + +{% block head %} + +{% endblock %} + +{% block content %} + + +

Execution Plan

+ +{% if plan %} +
+ +
+

DAG Visualization

+
+
+ + +
+

Steps ({{ plan.steps|length if plan.steps else 0 }})

+
+ {% for step in plan.get('steps', []) %} +
+
+ {{ step.name or step.id or 'Step ' ~ loop.index }} + + {{ step.status or ('cached' if step.cached else 'pending') }} + +
+ {% if step.cache_id %} +
+ {{ step.cache_id[:24] }}... +
+ {% endif %} +
+ {% else %} +

No steps defined

+ {% endfor %} +
+
+
+ + +{% else %} +
+

No execution plan available for this run.

+
+{% endif %} +{% endblock %} diff --git a/celery_app.py b/celery_app.py index 9f81107..f997330 100644 --- a/celery_app.py +++ b/celery_app.py @@ -1,8 +1,8 @@ """ Art DAG Celery Application -Distributed rendering for the Art DAG system. -Uses the foundational artdag language from GitHub. +Streaming video rendering for the Art DAG system. +Uses S-expression recipes with frame-by-frame processing. """ import os @@ -14,7 +14,7 @@ app = Celery( 'art_celery', broker=REDIS_URL, backend=REDIS_URL, - include=['legacy_tasks', 'tasks', 'tasks.analyze', 'tasks.execute', 'tasks.orchestrate', 'tasks.execute_sexp'] + include=['tasks', 'tasks.streaming'] ) app.conf.update( diff --git a/configs/audio-dizzy.sexp b/configs/audio-dizzy.sexp new file mode 100644 index 0000000..dc16087 --- /dev/null +++ b/configs/audio-dizzy.sexp @@ -0,0 +1,17 @@ +;; Audio Configuration - dizzy.mp3 +;; +;; Defines audio analyzer and playback for a recipe. +;; Pass to recipe with: --audio configs/audio-dizzy.sexp +;; +;; Provides: +;; - music: audio analyzer for beat/energy detection +;; - audio-playback: path for synchronized playback + +(require-primitives "streaming") + +;; Audio analyzer (provides beat detection and energy levels) +;; Paths relative to working directory (project root) +(def music (streaming:make-audio-analyzer "dizzy.mp3")) + +;; Audio playback path (for sync with video output) +(audio-playback "dizzy.mp3") diff --git a/configs/audio-halleluwah.sexp b/configs/audio-halleluwah.sexp new file mode 100644 index 0000000..5e4b812 --- /dev/null +++ b/configs/audio-halleluwah.sexp @@ -0,0 +1,17 @@ +;; Audio Configuration - dizzy.mp3 +;; +;; Defines audio analyzer and playback for a recipe. +;; Pass to recipe with: --audio configs/audio-dizzy.sexp +;; +;; Provides: +;; - music: audio analyzer for beat/energy detection +;; - audio-playback: path for synchronized playback + +(require-primitives "streaming") + +;; Audio analyzer (provides beat detection and energy levels) +;; Paths relative to working directory (project root) +(def music (streaming:make-audio-analyzer "woods_half/halleluwah.webm")) + +;; Audio playback path (for sync with video output) +(audio-playback "woods_half/halleluwah.webm") diff --git a/configs/sources-default.sexp b/configs/sources-default.sexp new file mode 100644 index 0000000..754bd92 --- /dev/null +++ b/configs/sources-default.sexp @@ -0,0 +1,38 @@ +;; Default Sources Configuration +;; +;; Defines video sources and per-pair effect configurations. +;; Pass to recipe with: --sources configs/sources-default.sexp +;; +;; Required by recipes using process-pair macro: +;; - sources: array of video sources +;; - pair-configs: array of effect configurations per source + +(require-primitives "streaming") + +;; Video sources array +;; Paths relative to working directory (project root) +(def sources [ + (streaming:make-video-source "monday.webm" 30) + (streaming:make-video-source "escher.webm" 30) + (streaming:make-video-source "2.webm" 30) + (streaming:make-video-source "disruptors.webm" 30) + (streaming:make-video-source "4.mp4" 30) + (streaming:make-video-source "ecstacy.mp4" 30) + (streaming:make-video-source "dopple.webm" 30) + (streaming:make-video-source "5.mp4" 30) +]) + +;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges +;; :dir = rotation direction (1 or -1) +;; :rot-a, :rot-b = max rotation angles for clip A and B +;; :zoom-a, :zoom-b = max zoom amounts for clip A and B +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4 + {:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller) + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed) + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5 +]) diff --git a/configs/sources-woods-half.sexp b/configs/sources-woods-half.sexp new file mode 100644 index 0000000..d2feff8 --- /dev/null +++ b/configs/sources-woods-half.sexp @@ -0,0 +1,19 @@ +;; Half-resolution Woods Sources (960x540) +;; +;; Pass to recipe with: --sources configs/sources-woods-half.sexp + +(require-primitives "streaming") + +(def sources [ + (streaming:make-video-source "woods_half/1.webm" 30) + (streaming:make-video-source "woods_half/2.webm" 30) + (streaming:make-video-source "woods_half/3.webm" 30) + (streaming:make-video-source "woods_half/4.webm" 30) +]) + +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} +]) diff --git a/configs/sources-woods.sexp b/configs/sources-woods.sexp new file mode 100644 index 0000000..717bfd9 --- /dev/null +++ b/configs/sources-woods.sexp @@ -0,0 +1,39 @@ +;; Default Sources Configuration +;; +;; Defines video sources and per-pair effect configurations. +;; Pass to recipe with: --sources configs/sources-default.sexp +;; +;; Required by recipes using process-pair macro: +;; - sources: array of video sources +;; - pair-configs: array of effect configurations per source + +(require-primitives "streaming") + +;; Video sources array +;; Paths relative to working directory (project root) +(def sources [ + (streaming:make-video-source "woods/1.webm" 10) + (streaming:make-video-source "woods/2.webm" 10) + (streaming:make-video-source "woods/3.webm" 10) + (streaming:make-video-source "woods/4.webm" 10) + (streaming:make-video-source "woods/5.webm" 10) + (streaming:make-video-source "woods/6.webm" 10) + (streaming:make-video-source "woods/7.webm" 10) + (streaming:make-video-source "woods/8.webm" 10) +]) + +;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges +;; :dir = rotation direction (1 or -1) +;; :rot-a, :rot-b = max rotation angles for clip A and B +;; :zoom-a, :zoom-b = max zoom amounts for clip A and B +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + +]) diff --git a/effects/quick_test_explicit.sexp b/effects/quick_test_explicit.sexp new file mode 100644 index 0000000..0a3698b --- /dev/null +++ b/effects/quick_test_explicit.sexp @@ -0,0 +1,150 @@ +;; Quick Test - Fully Explicit Streaming Version +;; +;; The interpreter is completely generic - knows nothing about video/audio. +;; All domain logic is explicit via primitives. +;; +;; Run with built-in sources/audio: +;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp --fps 30 +;; +;; Run with external config files: +;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp \ +;; --sources configs/sources-default.sexp \ +;; --audio configs/audio-dizzy.sexp \ +;; --fps 30 + +(stream "quick_test_explicit" + :fps 30 + :width 1920 + :height 1080 + :seed 42 + + ;; Load standard primitives and effects + (include :path "../templates/standard-primitives.sexp") + (include :path "../templates/standard-effects.sexp") + + ;; Load reusable templates + (include :path "../templates/stream-process-pair.sexp") + (include :path "../templates/crossfade-zoom.sexp") + + ;; === SOURCES AS ARRAY === + (def sources [ + (streaming:make-video-source "monday.webm" 30) + (streaming:make-video-source "escher.webm" 30) + (streaming:make-video-source "2.webm" 30) + (streaming:make-video-source "disruptors.webm" 30) + (streaming:make-video-source "4.mp4" 30) + (streaming:make-video-source "ecstacy.mp4" 30) + (streaming:make-video-source "dopple.webm" 30) + (streaming:make-video-source "5.mp4" 30) + ]) + + ;; Per-pair config: [rot-dir, rot-a-max, rot-b-max, zoom-a-max, zoom-b-max] + ;; Pairs 3,6: reversed (negative rot-a, positive rot-b, shrink zoom-a, grow zoom-b) + ;; Pair 5: smaller ranges + (def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4 + {:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller) + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed) + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5 + ]) + + ;; Audio analyzer + (def music (streaming:make-audio-analyzer "dizzy.mp3")) + + ;; Audio playback + (audio-playback "../dizzy.mp3") + + ;; === GLOBAL SCANS === + + ;; Cycle state: which source is active (recipe-specific) + ;; clen = beats per source (8-24 beats = ~4-12 seconds) + (scan cycle (streaming:audio-beat music t) + :init {:active 0 :beat 0 :clen 16} + :step (if (< (+ beat 1) clen) + (dict :active active :beat (+ beat 1) :clen clen) + (dict :active (mod (+ active 1) (len sources)) :beat 0 + :clen (+ 8 (mod (* (streaming:audio-beat-count music t) 7) 17))))) + + ;; Reusable scans from templates (require 'music' to be defined) + (include :path "../templates/scan-oscillating-spin.sexp") + (include :path "../templates/scan-ripple-drops.sexp") + + ;; === PER-PAIR STATE (dynamically sized based on sources) === + ;; Each pair has: inv-a, inv-b, hue-a, hue-b, mix, rot-angle + (scan pairs (streaming:audio-beat music t) + :init {:states (map (core:range (len sources)) (lambda (_) + {:inv-a 0 :inv-b 0 :hue-a 0 :hue-b 0 :hue-a-val 0 :hue-b-val 0 :mix 0.5 :mix-rem 5 :angle 0 :rot-beat 0 :rot-clen 25}))} + :step (dict :states (map states (lambda (p) + (let [;; Invert toggles (10% chance, lasts 1-4 beats) + new-inv-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-a) 1))) + new-inv-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-b) 1))) + ;; Hue shifts (10% chance, lasts 1-4 beats) - use countdown like invert + old-hue-a (get p :hue-a) + old-hue-b (get p :hue-b) + new-hue-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-a 1))) + new-hue-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-b 1))) + ;; Pick random hue value when triggering (stored separately) + new-hue-a-val (if (> new-hue-a old-hue-a) (+ 30 (* (core:rand) 300)) (get p :hue-a-val)) + new-hue-b-val (if (> new-hue-b old-hue-b) (+ 30 (* (core:rand) 300)) (get p :hue-b-val)) + ;; Mix (holds for 1-10 beats, then picks 0, 0.5, or 1) + mix-rem (get p :mix-rem) + old-mix (get p :mix) + new-mix-rem (if (> mix-rem 0) (- mix-rem 1) (+ 1 (core:rand-int 1 10))) + new-mix (if (> mix-rem 0) old-mix (* (core:rand-int 0 2) 0.5)) + ;; Rotation (accumulates, reverses direction when cycle completes) + rot-beat (get p :rot-beat) + rot-clen (get p :rot-clen) + old-angle (get p :angle) + ;; Note: dir comes from pair-configs, but we store rotation state here + new-rot-beat (if (< (+ rot-beat 1) rot-clen) (+ rot-beat 1) 0) + new-rot-clen (if (< (+ rot-beat 1) rot-clen) rot-clen (+ 20 (core:rand-int 0 10))) + new-angle (+ old-angle (/ 360 rot-clen))] + (dict :inv-a new-inv-a :inv-b new-inv-b + :hue-a new-hue-a :hue-b new-hue-b + :hue-a-val new-hue-a-val :hue-b-val new-hue-b-val + :mix new-mix :mix-rem new-mix-rem + :angle new-angle :rot-beat new-rot-beat :rot-clen new-rot-clen)))))) + + ;; === FRAME PIPELINE === + (frame + (let [now t + e (streaming:audio-energy music now) + + ;; Get cycle state + active (bind cycle :active) + beat-pos (bind cycle :beat) + clen (bind cycle :clen) + + ;; Transition logic: last third of cycle crossfades to next + phase3 (* beat-pos 3) + fading (and (>= phase3 (* clen 2)) (< phase3 (* clen 3))) + fade-amt (if fading (/ (- phase3 (* clen 2)) clen) 0) + next-idx (mod (+ active 1) (len sources)) + + ;; Get pair states array (required by process-pair macro) + pair-states (bind pairs :states) + + ;; Process active pair using macro from template + active-frame (process-pair active) + + ;; Crossfade with zoom during transition (using macro) + result (if fading + (crossfade-zoom active-frame (process-pair next-idx) fade-amt) + active-frame) + + ;; Final: global spin + ripple + spun (rotate result :angle (bind spin :angle)) + rip-gate (bind ripple-state :gate) + rip-amp (* rip-gate (core:map-range e 0 1 5 50))] + + (ripple spun + :amplitude rip-amp + :center_x (bind ripple-state :cx) + :center_y (bind ripple-state :cy) + :frequency 8 + :decay 2 + :speed 5)))) diff --git a/hybrid_state.py b/hybrid_state.py deleted file mode 100644 index b351a7c..0000000 --- a/hybrid_state.py +++ /dev/null @@ -1,294 +0,0 @@ -""" -Hybrid State Manager: Local Redis + IPNS Sync. - -Provides fast local operations with eventual consistency across L1 nodes. - -- Local Redis: Fast reads/writes (microseconds) -- IPNS Sync: Background sync with other nodes (every N seconds) -- Duplicate work: Accepted, idempotent (same inputs → same CID) - -Usage: - from hybrid_state import get_state_manager - - state = get_state_manager() - - # Fast local lookup - cid = state.get_cached_cid(cache_id) - - # Fast local write (synced in background) - state.set_cached_cid(cache_id, output_cid) -""" - -import json -import logging -import os -import threading -import time -from typing import Dict, Optional - -import redis - -logger = logging.getLogger(__name__) - -# Configuration -REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/5") -CLUSTER_KEY = os.environ.get("ARTDAG_CLUSTER_KEY", "default") -IPNS_SYNC_INTERVAL = int(os.environ.get("ARTDAG_IPNS_SYNC_INTERVAL", "30")) -IPNS_ENABLED = os.environ.get("ARTDAG_IPNS_SYNC", "").lower() in ("true", "1", "yes") - -# Redis keys -CACHE_KEY = "artdag:cid_cache" # hash: cache_id → output CID -ANALYSIS_KEY = "artdag:analysis_cache" # hash: input_hash:features → analysis CID -PLAN_KEY = "artdag:plan_cache" # hash: plan_id → plan CID -RUN_KEY = "artdag:run_cache" # hash: run_id → output CID -CLAIM_KEY_PREFIX = "artdag:claim:" # string: cache_id → worker (with TTL) - -# IPNS names (relative to cluster key) -IPNS_CACHE_NAME = "cache" -IPNS_ANALYSIS_NAME = "analysis" -IPNS_PLAN_NAME = "plans" - - -class HybridStateManager: - """ - Local Redis + async IPNS sync for distributed L1 coordination. - - Fast path (local Redis): - - get_cached_cid / set_cached_cid - - try_claim / release_claim - - Slow path (background IPNS sync): - - Periodically syncs local state with global IPNS state - - Merges remote state into local (pulls new entries) - - Publishes local state to IPNS (pushes updates) - """ - - def __init__( - self, - redis_url: str = REDIS_URL, - cluster_key: str = CLUSTER_KEY, - sync_interval: int = IPNS_SYNC_INTERVAL, - ipns_enabled: bool = IPNS_ENABLED, - ): - self.cluster_key = cluster_key - self.sync_interval = sync_interval - self.ipns_enabled = ipns_enabled - - # Connect to Redis - self._redis = redis.from_url(redis_url, decode_responses=True) - - # IPNS client (lazy import) - self._ipfs = None - - # Sync thread - self._sync_thread = None - self._stop_sync = threading.Event() - - # Start background sync if enabled - if self.ipns_enabled: - self._start_background_sync() - - @property - def ipfs(self): - """Lazy import of IPFS client.""" - if self._ipfs is None: - try: - import ipfs_client - self._ipfs = ipfs_client - except ImportError: - logger.warning("ipfs_client not available, IPNS sync disabled") - self._ipfs = False - return self._ipfs if self._ipfs else None - - # ========== CID Cache ========== - - def get_cached_cid(self, cache_id: str) -> Optional[str]: - """Get output CID for a cache_id. Fast local lookup.""" - return self._redis.hget(CACHE_KEY, cache_id) - - def set_cached_cid(self, cache_id: str, cid: str) -> None: - """Set output CID for a cache_id. Fast local write.""" - self._redis.hset(CACHE_KEY, cache_id, cid) - - def get_all_cached_cids(self) -> Dict[str, str]: - """Get all cached CIDs.""" - return self._redis.hgetall(CACHE_KEY) - - # ========== Analysis Cache ========== - - def get_analysis_cid(self, input_hash: str, features: list) -> Optional[str]: - """Get analysis CID for input + features.""" - key = f"{input_hash}:{','.join(sorted(features))}" - return self._redis.hget(ANALYSIS_KEY, key) - - def set_analysis_cid(self, input_hash: str, features: list, cid: str) -> None: - """Set analysis CID for input + features.""" - key = f"{input_hash}:{','.join(sorted(features))}" - self._redis.hset(ANALYSIS_KEY, key, cid) - - def get_all_analysis_cids(self) -> Dict[str, str]: - """Get all analysis CIDs.""" - return self._redis.hgetall(ANALYSIS_KEY) - - # ========== Plan Cache ========== - - def get_plan_cid(self, plan_id: str) -> Optional[str]: - """Get plan CID for a plan_id.""" - return self._redis.hget(PLAN_KEY, plan_id) - - def set_plan_cid(self, plan_id: str, cid: str) -> None: - """Set plan CID for a plan_id.""" - self._redis.hset(PLAN_KEY, plan_id, cid) - - def get_all_plan_cids(self) -> Dict[str, str]: - """Get all plan CIDs.""" - return self._redis.hgetall(PLAN_KEY) - - # ========== Run Cache ========== - - def get_run_cid(self, run_id: str) -> Optional[str]: - """Get output CID for a run_id.""" - return self._redis.hget(RUN_KEY, run_id) - - def set_run_cid(self, run_id: str, cid: str) -> None: - """Set output CID for a run_id.""" - self._redis.hset(RUN_KEY, run_id, cid) - - # ========== Claiming ========== - - def try_claim(self, cache_id: str, worker_id: str, ttl: int = 300) -> bool: - """ - Try to claim a cache_id for execution. - - Returns True if claimed, False if already claimed by another worker. - Uses Redis SETNX for atomic claim. - """ - key = f"{CLAIM_KEY_PREFIX}{cache_id}" - return self._redis.set(key, worker_id, nx=True, ex=ttl) - - def release_claim(self, cache_id: str) -> None: - """Release a claim.""" - key = f"{CLAIM_KEY_PREFIX}{cache_id}" - self._redis.delete(key) - - def get_claim(self, cache_id: str) -> Optional[str]: - """Get current claim holder for a cache_id.""" - key = f"{CLAIM_KEY_PREFIX}{cache_id}" - return self._redis.get(key) - - # ========== IPNS Sync ========== - - def _start_background_sync(self): - """Start background IPNS sync thread.""" - if self._sync_thread is not None: - return - - def sync_loop(): - logger.info(f"IPNS sync started (interval={self.sync_interval}s)") - while not self._stop_sync.wait(timeout=self.sync_interval): - try: - self._sync_with_ipns() - except Exception as e: - logger.warning(f"IPNS sync failed: {e}") - - self._sync_thread = threading.Thread(target=sync_loop, daemon=True) - self._sync_thread.start() - - def stop_sync(self): - """Stop background sync thread.""" - self._stop_sync.set() - if self._sync_thread: - self._sync_thread.join(timeout=5) - - def _sync_with_ipns(self): - """Sync local state with IPNS global state.""" - if not self.ipfs: - return - - logger.debug("Starting IPNS sync...") - - # Sync each cache type - self._sync_hash(CACHE_KEY, IPNS_CACHE_NAME) - self._sync_hash(ANALYSIS_KEY, IPNS_ANALYSIS_NAME) - self._sync_hash(PLAN_KEY, IPNS_PLAN_NAME) - - logger.debug("IPNS sync complete") - - def _sync_hash(self, redis_key: str, ipns_name: str): - """Sync a Redis hash with IPNS.""" - ipns_full_name = f"{self.cluster_key}/{ipns_name}" - - # Pull: resolve IPNS → get global state - global_state = {} - try: - global_cid = self.ipfs.name_resolve(ipns_full_name) - if global_cid: - global_bytes = self.ipfs.get_bytes(global_cid) - if global_bytes: - global_state = json.loads(global_bytes.decode('utf-8')) - logger.debug(f"Pulled {len(global_state)} entries from {ipns_name}") - except Exception as e: - logger.debug(f"Could not resolve {ipns_full_name}: {e}") - - # Merge global into local (add entries we don't have) - if global_state: - pipe = self._redis.pipeline() - for key, value in global_state.items(): - pipe.hsetnx(redis_key, key, value) - results = pipe.execute() - added = sum(1 for r in results if r) - if added: - logger.info(f"Merged {added} new entries from IPNS/{ipns_name}") - - # Push: get local state, merge with global, publish - local_state = self._redis.hgetall(redis_key) - if local_state: - merged = {**global_state, **local_state} - - # Only publish if we have new entries - if len(merged) > len(global_state): - try: - new_cid = self.ipfs.add_json(merged) - if new_cid: - # Note: name_publish can be slow - self.ipfs.name_publish(ipns_full_name, new_cid) - logger.info(f"Published {len(merged)} entries to IPNS/{ipns_name}") - except Exception as e: - logger.warning(f"Failed to publish to {ipns_full_name}: {e}") - - def force_sync(self): - """Force an immediate IPNS sync (blocking).""" - self._sync_with_ipns() - - # ========== Stats ========== - - def get_stats(self) -> Dict: - """Get cache statistics.""" - return { - "cached_cids": self._redis.hlen(CACHE_KEY), - "analysis_cids": self._redis.hlen(ANALYSIS_KEY), - "plan_cids": self._redis.hlen(PLAN_KEY), - "run_cids": self._redis.hlen(RUN_KEY), - "ipns_enabled": self.ipns_enabled, - "cluster_key": self.cluster_key[:16] + "..." if len(self.cluster_key) > 16 else self.cluster_key, - } - - -# Singleton instance -_state_manager: Optional[HybridStateManager] = None - - -def get_state_manager() -> HybridStateManager: - """Get the singleton state manager instance.""" - global _state_manager - if _state_manager is None: - _state_manager = HybridStateManager() - return _state_manager - - -def reset_state_manager(): - """Reset the singleton (for testing).""" - global _state_manager - if _state_manager: - _state_manager.stop_sync() - _state_manager = None diff --git a/legacy_tasks.py b/legacy_tasks.py deleted file mode 100644 index 5827fdd..0000000 --- a/legacy_tasks.py +++ /dev/null @@ -1,1219 +0,0 @@ -""" -Art DAG Celery Tasks - -Distributed rendering tasks for the Art DAG system. -Supports both single-effect runs and multi-step DAG execution. -""" - -import json -import logging -import os -import subprocess -import sys -from datetime import datetime, timezone -from pathlib import Path -from typing import Dict, List, Optional - -from celery import Task -from celery_app import app -from cache_manager import file_hash - -# Import artdag components -from artdag import DAG, Node, NodeType -from artdag.engine import Engine -from artdag.executor import register_executor, Executor, get_executor -from artdag.nodes.effect import register_effect -import artdag.nodes # Register all built-in executors (SOURCE, EFFECT, etc.) - -# Add effects to path (use env var in Docker, fallback to home dir locally) -EFFECTS_PATH = Path(os.environ.get("EFFECTS_PATH", str(Path.home() / "artdag-effects"))) -ARTDAG_PATH = Path(os.environ.get("ARTDAG_PATH", str(Path.home() / "art" / "artdag"))) - -logger = logging.getLogger(__name__) - - -def get_effects_commit() -> str: - """Get current git commit hash of effects repo.""" - try: - result = subprocess.run( - ["git", "rev-parse", "HEAD"], - cwd=EFFECTS_PATH, - capture_output=True, - text=True - ) - if result.returncode == 0: - return result.stdout.strip() - except Exception: - pass - return "unknown" - - -def get_artdag_commit() -> str: - """Get current git commit hash of artdag repo.""" - try: - result = subprocess.run( - ["git", "rev-parse", "HEAD"], - cwd=ARTDAG_PATH, - capture_output=True, - text=True - ) - if result.returncode == 0: - return result.stdout.strip() - except Exception: - pass - return "unknown" - - -sys.path.insert(0, str(EFFECTS_PATH / "dog")) - -# Register the dog effect with the EFFECT executor -# New format uses process() instead of effect_dog() -from effect import process as dog_process - -@register_effect("dog") -def _dog_effect(input_path: Path, output_path: Path, config: dict) -> Path: - """Dog effect wrapper - registered for DAG EFFECT nodes.""" - # Wrap for new whole-video API - return dog_process([input_path], output_path, config, None) - - -# Cache directory (shared between server and worker) -CACHE_DIR = Path(os.environ.get("CACHE_DIR", str(Path.home() / ".artdag" / "cache"))) - - -# ============ Executors for Effects ============ - -@register_executor("effect:dog") -class DogExecutor(Executor): - """Executor for the dog effect.""" - - def execute(self, config: Dict, inputs: List[Path], output_path: Path) -> Path: - from effect import process as dog_process - if len(inputs) != 1: - raise ValueError(f"Dog effect expects 1 input, got {len(inputs)}") - return dog_process(inputs, output_path, config, None) - - -@register_executor("effect:identity") -class IdentityExecutor(Executor): - """Executor for the identity effect (passthrough).""" - - def execute(self, config: Dict, inputs: List[Path], output_path: Path) -> Path: - from artdag.nodes.effect import effect_identity - if len(inputs) != 1: - raise ValueError(f"Identity effect expects 1 input, got {len(inputs)}") - return effect_identity(inputs[0], output_path, config) - - -@register_executor(NodeType.SOURCE) -class SourceExecutor(Executor): - """Executor for SOURCE nodes - loads content from cache by hash.""" - - def execute(self, config: Dict, inputs: List[Path], output_path: Path) -> Path: - # Source nodes load from cache by cid - cid = config.get("cid") - if not cid: - raise ValueError("SOURCE node requires cid in config") - - # Look up in cache - from cache_manager import get_cache_manager - cache_manager = get_cache_manager() - source_path = cache_manager.get_by_cid(cid) - - if not source_path or not source_path.exists(): - # Not in cache - fetch from IPFS - import logging - logger = logging.getLogger(__name__) - logger.info(f"SOURCE {cid[:16]}... not in cache, fetching from IPFS") - - import ipfs_client - fetch_path = CACHE_DIR / "ipfs_fetch" / cid - fetch_path.parent.mkdir(parents=True, exist_ok=True) - - if ipfs_client.get_file(cid, str(fetch_path)): - logger.info(f"SOURCE {cid[:16]}... fetched from IPFS to {fetch_path}") - source_path = fetch_path - else: - raise ValueError(f"Source content not in cache and IPFS fetch failed: {cid}") - - # For source nodes, we just return the path (no transformation) - # The engine will use this as input to subsequent nodes - return source_path - - -class RenderTask(Task): - """Base task with provenance tracking.""" - - def on_success(self, retval, task_id, args, kwargs): - """Record successful render.""" - print(f"Task {task_id} completed: {retval}") - - def on_failure(self, exc, task_id, args, kwargs, einfo): - """Record failed render.""" - print(f"Task {task_id} failed: {exc}") - - -@app.task(base=RenderTask, bind=True) -def render_effect(self, input_hash: str, effect_name: str, output_name: str) -> dict: - """ - Render an effect on an input asset. - - Args: - input_hash: SHA3-256 hash of input asset - effect_name: Name of effect (e.g., "dog", "identity") - output_name: Name for output asset - - Returns: - Provenance record with output hash - """ - from cache_manager import get_cache_manager - - # Registry hashes (for effects/infra metadata only) - REGISTRY = { - "effect:dog": { - "hash": "d048fe313433eb4e38f0e24194ffae91b896ca3e6eed3e50b2cc37b7be495555" - }, - "effect:identity": { - "hash": "640ea11ee881ebf4101af0a955439105ab11e763682b209e88ea08fc66e1cc03" - }, - "infra:artdag": { - "hash": "96a5972de216aee12ec794dcad5f9360da2e676171eabf24a46dfe1ee5fee4b0" - }, - "infra:giles-hp": { - "hash": "964bf6e69dc4e2493f42375013caffe26404ec3cf8eb5d9bc170cd42a361523b" - } - } - - # Input comes from cache by hash (supports both legacy and new cache locations) - cache_manager = get_cache_manager() - input_path = cache_manager.get_by_cid(input_hash) - if not input_path or not input_path.exists(): - raise ValueError(f"Input not in cache: {input_hash}") - - output_dir = CACHE_DIR - - # Verify input - actual_hash = file_hash(input_path) - if actual_hash != input_hash: - raise ValueError(f"Input hash mismatch: expected {input_hash}, got {actual_hash}") - - self.update_state(state='RENDERING', meta={'effect': effect_name, 'input': input_hash[:16]}) - - # Load and apply effect - if effect_name == "dog": - from effect import effect_dog, DOG_HASH - output_path = output_dir / f"{output_name}.mkv" - result = effect_dog(input_path, output_path, {}) - expected_hash = DOG_HASH - elif effect_name == "identity": - from artdag.nodes.effect import effect_identity - output_path = output_dir / f"{output_name}{input_path.suffix}" - result = effect_identity(input_path, output_path, {}) - expected_hash = input_hash - else: - raise ValueError(f"Unknown effect: {effect_name}") - - # Verify output - output_cid = file_hash(result) - if output_cid != expected_hash: - raise ValueError(f"Output hash mismatch: expected {expected_hash}, got {output_cid}") - - # Build effect info based on source - if effect_name == "identity": - # Identity is from artdag package on GitHub - artdag_commit = get_artdag_commit() - effect_info = { - "name": f"effect:{effect_name}", - "cid": REGISTRY[f"effect:{effect_name}"]["hash"], - "repo": "github", - "repo_commit": artdag_commit, - "repo_url": f"https://github.com/gilesbradshaw/art-dag/blob/{artdag_commit}/artdag/nodes/effect.py" - } - else: - # Other effects from rose-ash effects repo - effects_commit = get_effects_commit() - effect_info = { - "name": f"effect:{effect_name}", - "cid": REGISTRY[f"effect:{effect_name}"]["hash"], - "repo": "rose-ash", - "repo_commit": effects_commit, - "repo_url": f"https://git.rose-ash.com/art-dag/effects/src/commit/{effects_commit}/{effect_name}" - } - - # Build provenance - provenance = { - "task_id": self.request.id, - "rendered_at": datetime.now(timezone.utc).isoformat(), - "rendered_by": "@giles@artdag.rose-ash.com", - "output": { - "name": output_name, - "cid": output_cid, - }, - "inputs": [ - {"cid": input_hash} - ], - "effects": [effect_info], - "infrastructure": { - "software": {"name": "infra:artdag", "cid": REGISTRY["infra:artdag"]["hash"]}, - "hardware": {"name": "infra:giles-hp", "cid": REGISTRY["infra:giles-hp"]["hash"]} - } - } - - # Store provenance on IPFS - import ipfs_client - provenance_cid = ipfs_client.add_json(provenance) - if provenance_cid: - provenance["provenance_cid"] = provenance_cid - logger.info(f"Stored provenance on IPFS: {provenance_cid}") - else: - logger.warning("Failed to store provenance on IPFS") - - return provenance - - -@app.task(base=RenderTask, bind=True) -def execute_dag(self, dag_json: str, run_id: str = None) -> dict: - """ - Execute a multi-step DAG. - - Args: - dag_json: Serialized DAG as JSON string - run_id: Optional run ID for tracking - - Returns: - Execution result with output hash and node results - """ - from cache_manager import get_cache_manager - - # Parse DAG - try: - dag = DAG.from_json(dag_json) - except Exception as e: - raise ValueError(f"Invalid DAG JSON: {e}") - - # Validate DAG - errors = dag.validate() - if errors: - raise ValueError(f"Invalid DAG: {errors}") - - # Create engine with cache directory - engine = Engine(CACHE_DIR / "nodes") - - # Set up progress callback - def progress_callback(progress): - self.update_state( - state='EXECUTING', - meta={ - 'node_id': progress.node_id, - 'node_type': progress.node_type, - 'status': progress.status, - 'progress': progress.progress, - 'message': progress.message, - } - ) - logger.info(f"DAG progress: {progress.node_id} - {progress.status} - {progress.message}") - - engine.set_progress_callback(progress_callback) - - # Execute DAG - self.update_state(state='EXECUTING', meta={'status': 'starting', 'nodes': len(dag.nodes)}) - result = engine.execute(dag) - - if not result.success: - raise RuntimeError(f"DAG execution failed: {result.error}") - - # Index all node outputs by cid and upload to IPFS - cache_manager = get_cache_manager() - output_cid = None - node_hashes = {} # node_id -> cid mapping - node_ipfs_cids = {} # node_id -> ipfs_cid mapping - - # Process all node results (intermediates + output) - for node_id, node_path in result.node_results.items(): - if node_path and Path(node_path).exists(): - node = dag.nodes.get(node_id) - # Skip SOURCE nodes - they're already in cache - if node and (node.node_type == NodeType.SOURCE or str(node.node_type) == "SOURCE"): - cid = node.config.get("cid") - if cid: - node_hashes[node_id] = cid - continue - - # Determine node type for cache metadata - node_type_str = str(node.node_type) if node else "intermediate" - if "effect" in node_type_str.lower(): - cache_node_type = "effect_output" - else: - cache_node_type = "dag_intermediate" - - # Store in cache_manager (stored by IPFS CID, indexed by node_id) - cached, content_cid = cache_manager.put( - Path(node_path), - node_type=cache_node_type, - cache_id=node_id, - ) - # content_cid is always IPFS CID now (IPFS failures are fatal) - node_hashes[node_id] = content_cid - node_ipfs_cids[node_id] = content_cid - logger.info(f"Cached node {node_id}: IPFS CID {content_cid}") - - # Get output hash from the output node - # Use the same identifier that's in the cache index (IPFS CID if available) - if result.output_path and result.output_path.exists(): - local_hash = file_hash(result.output_path) - output_ipfs_cid = node_ipfs_cids.get(dag.output_id) - # Use IPFS CID as primary identifier if available, otherwise local hash - # This must match what's in the content_index from cache_manager.put() - output_cid = node_hashes.get(dag.output_id, local_hash) - - # Store output in database (for L2 to query IPFS CID) - import asyncio - import database - - # Store plan (DAG) to IPFS and local cache - plan_cid = None - try: - import ipfs_client - dag_dict = json.loads(dag_json) - plan_cid = ipfs_client.add_json(dag_dict) - if plan_cid: - logger.info(f"Stored plan to IPFS: {plan_cid}") - # Also store locally so it can be retrieved without IPFS - # Store directly in cache_dir (get_by_cid checks cache_dir/cid) - plan_path = CACHE_DIR / plan_cid - CACHE_DIR.mkdir(parents=True, exist_ok=True) - with open(plan_path, "w") as f: - json.dump(dag_dict, f, indent=2) - except Exception as e: - logger.warning(f"Failed to store plan to IPFS: {e}") - - async def save_to_db(): - if database.pool is None: - await database.init_db() - await database.create_cache_item(output_cid, output_ipfs_cid) - # Also save the run result - if run_id: - input_hashes_for_db = [ - node.config.get("cid") - for node in dag.nodes.values() - if (node.node_type == NodeType.SOURCE or str(node.node_type) == "SOURCE") - and node.config.get("cid") - ] - # Get actor_id and recipe from pending_runs (saved when run started) - actor_id = None - recipe_name = "dag" - pending = await database.get_pending_run(run_id) - if pending: - actor_id = pending.get("actor_id") - recipe_name = pending.get("recipe") or "dag" - - await database.save_run_cache( - run_id=run_id, - output_cid=output_cid, - recipe=recipe_name, - inputs=input_hashes_for_db, - ipfs_cid=output_ipfs_cid, - actor_id=actor_id, - plan_cid=plan_cid, - ) - - # Save output as media for the user - if actor_id: - await database.save_item_metadata( - cid=output_cid, - actor_id=actor_id, - item_type="media", - description=f"Output from recipe: {recipe_name}", - source_type="recipe", - source_note=f"run_id: {run_id}", - ) - - # Clean up pending run - if pending: - await database.complete_pending_run(run_id) - - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - asyncio.ensure_future(save_to_db()) - else: - loop.run_until_complete(save_to_db()) - except RuntimeError: - asyncio.run(save_to_db()) - - # Record activity for deletion tracking - input_hashes = [] - intermediate_hashes = [] - for node_id, node in dag.nodes.items(): - if node.node_type == NodeType.SOURCE or str(node.node_type) == "SOURCE": - cid = node.config.get("cid") - if cid: - input_hashes.append(cid) - elif node_id != dag.output_id and node_id in node_hashes: - intermediate_hashes.append(node_hashes[node_id]) - - if input_hashes: - from artdag.activities import Activity - from datetime import datetime, timezone - activity = Activity( - activity_id=run_id or f"dag-{output_cid[:16]}", - input_ids=sorted(input_hashes), - output_id=output_cid, - intermediate_ids=intermediate_hashes, - created_at=datetime.now(timezone.utc).timestamp(), - status="completed", - ) - cache_manager.activity_store.add(activity) - - # Build provenance - input_hashes_for_provenance = [] - for node_id, node in dag.nodes.items(): - if node.node_type == NodeType.SOURCE or str(node.node_type) == "SOURCE": - cid = node.config.get("cid") - if cid: - input_hashes_for_provenance.append({"cid": cid}) - - provenance = { - "task_id": self.request.id, - "run_id": run_id, - "rendered_at": datetime.now(timezone.utc).isoformat(), - "output": { - "cid": output_cid, - "ipfs_cid": node_ipfs_cids.get(dag.output_id) if dag.output_id else None, - }, - "inputs": input_hashes_for_provenance, - "dag": dag_json, # Full DAG definition - "nodes": { - node_id: { - "cid": node_hashes.get(node_id), - "ipfs_cid": node_ipfs_cids.get(node_id), - } - for node_id in dag.nodes.keys() - if node_id in node_hashes - }, - "execution": { - "execution_time": result.execution_time, - "nodes_executed": result.nodes_executed, - "nodes_cached": result.nodes_cached, - } - } - - # Store provenance on IPFS - import ipfs_client - provenance_cid = ipfs_client.add_json(provenance) - if provenance_cid: - provenance["provenance_cid"] = provenance_cid - logger.info(f"Stored DAG provenance on IPFS: {provenance_cid}") - else: - logger.warning("Failed to store DAG provenance on IPFS") - - # Build result - return { - "success": True, - "run_id": run_id, - "output_cid": output_cid, - "output_ipfs_cid": node_ipfs_cids.get(dag.output_id) if dag.output_id else None, - "output_path": str(result.output_path) if result.output_path else None, - "execution_time": result.execution_time, - "nodes_executed": result.nodes_executed, - "nodes_cached": result.nodes_cached, - "node_results": { - node_id: str(path) for node_id, path in result.node_results.items() - }, - "node_hashes": node_hashes, # node_id -> cid - "node_ipfs_cids": node_ipfs_cids, # node_id -> ipfs_cid - "provenance_cid": provenance_cid, - } - - -@app.task(base=RenderTask, bind=True) -def execute_recipe(self, recipe_sexp: str, input_hashes: Dict[str, str], run_id: str = None) -> dict: - """ - Execute an S-expression recipe. - - The recipe S-expression unfolds into a plan S-expression with code-addressed - cache IDs computed before execution. Each plan node gets a deterministic hash - "bucket" based on the computation definition (Merkle tree), not the results. - - Phases: - 1. Parse: compile_string(recipe_sexp) -> CompiledRecipe - 2. Analyze: Extract and run analysis nodes from recipe - 3. Plan: create_plan(compiled, inputs) -> ExecutionPlanSexp with cache IDs - 4. Store: plan.to_string() -> store as S-expression - 5. Execute: Run steps level-by-level, checking cache by cache_id - 6. Return: Include plan_sexp in result - - Args: - recipe_sexp: Recipe as S-expression string - input_hashes: Mapping from input name to content hash (CID) - run_id: Optional run ID for tracking - - Returns: - Execution result with output CID, plan S-expression, and node results - """ - from cache_manager import get_cache_manager - import ipfs_client - - # Try to import S-expression modules - try: - from artdag.sexp import compile_string, CompileError, ParseError - from artdag.sexp.planner import create_plan, ExecutionPlanSexp, PlanStep - except ImportError as e: - raise ImportError(f"S-expression modules not available: {e}") - - cache_manager = get_cache_manager() - - logger.info(f"Executing recipe with {len(input_hashes)} inputs, run_id={run_id}") - - # ============ Phase 1: Parse ============ - self.update_state(state='PARSING', meta={'status': 'parsing recipe'}) - logger.info("Phase 1: Parsing recipe S-expression...") - - try: - compiled = compile_string(recipe_sexp) - except (ParseError, CompileError) as e: - raise ValueError(f"Recipe parse error: {e}") - - recipe_name = compiled.name or "unnamed" - logger.info(f"Parsed recipe: {recipe_name}") - - # ============ Phase 2: Analysis ============ - self.update_state(state='ANALYZING', meta={'status': 'running analysis'}) - logger.info("Phase 2: Running analysis nodes...") - - analysis_results = {} - # Extract analysis nodes from compiled recipe - for node in compiled.nodes: - node_type = node.get("type", "").upper() - config = node.get("config", {}) - - if node_type == "ANALYZE" or config.get("analyze"): - node_id = node.get("id") - input_ref = config.get("input") or config.get("source") - feature = config.get("feature") or config.get("analyze") - - # Resolve input reference to CID - cid = input_hashes.get(input_ref) - if not cid: - logger.warning(f"Analysis node {node_id}: input '{input_ref}' not in input_hashes") - continue - - # Get input file path - input_path = cache_manager.get_by_cid(cid) - if not input_path: - logger.warning(f"Analysis node {node_id}: content {cid[:16]}... not in cache") - continue - - # Run analysis - try: - from artdag.analysis import Analyzer - analysis_dir = CACHE_DIR / "analysis" - analysis_dir.mkdir(parents=True, exist_ok=True) - analyzer = Analyzer(cache_dir=analysis_dir) - - features = [feature] if feature else ["beats", "energy"] - result = analyzer.analyze( - input_hash=cid, - features=features, - input_path=Path(input_path), - ) - analysis_results[node_id] = result - analysis_results[cid] = result - logger.info(f"Analysis {node_id}: feature={feature}") - except Exception as e: - logger.warning(f"Analysis failed for {node_id}: {e}") - - logger.info(f"Completed {len(analysis_results)} analysis results") - - # ============ Phase 3: Generate Plan ============ - self.update_state(state='PLANNING', meta={'status': 'generating plan'}) - logger.info("Phase 3: Generating execution plan with code-addressed cache IDs...") - - plan = create_plan(compiled, inputs=input_hashes) - logger.info(f"Generated plan with {len(plan.steps)} steps, plan_id={plan.plan_id[:16]}...") - - # ============ Phase 4: Store Plan as S-expression ============ - plan_sexp = plan.to_string(pretty=True) - plan_cid = None - - try: - plan_cid = ipfs_client.add_string(plan_sexp) - if plan_cid: - logger.info(f"Stored plan to IPFS: {plan_cid}") - # Also store locally for fast retrieval - plan_path = CACHE_DIR / plan_cid - CACHE_DIR.mkdir(parents=True, exist_ok=True) - plan_path.write_text(plan_sexp) - - # Save plan_cid to database immediately so it's available even if run fails - if run_id: - import asyncio - import database - async def save_plan_cid(): - if database.pool is None: - await database.init_db() - await database.update_pending_run_plan(run_id, plan_cid) - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - asyncio.ensure_future(save_plan_cid()) - else: - loop.run_until_complete(save_plan_cid()) - except RuntimeError: - asyncio.run(save_plan_cid()) - logger.info(f"Saved plan_cid to pending run: {run_id}") - except Exception as e: - logger.warning(f"Failed to store plan to IPFS: {e}") - - # ============ Phase 5: Execute Steps Level-by-Level ============ - self.update_state(state='EXECUTING', meta={'status': 'executing steps', 'total_steps': len(plan.steps)}) - logger.info("Phase 4: Executing plan steps...") - - # Group steps by level - steps_by_level: Dict[int, List[PlanStep]] = {} - for step in plan.steps: - level = step.level - steps_by_level.setdefault(level, []).append(step) - - max_level = max(steps_by_level.keys()) if steps_by_level else 0 - - step_results = {} # step_id -> {"status", "path", "cid", "ipfs_cid"} - cache_id_to_path = {} # cache_id -> output path (for resolving inputs) - total_cached = 0 - total_executed = 0 - - # Map input names to their cache_ids (inputs are their own cache_ids) - for name, cid in input_hashes.items(): - cache_id_to_path[cid] = cache_manager.get_by_cid(cid) - - for level in range(max_level + 1): - level_steps = steps_by_level.get(level, []) - if not level_steps: - continue - - logger.info(f"Executing level {level}: {len(level_steps)} steps") - - for step in level_steps: - self.update_state( - state='EXECUTING', - meta={ - 'step_id': step.step_id, - 'step_type': step.node_type, - 'level': level, - 'cache_id': step.cache_id[:16], - } - ) - - # Check if cached using code-addressed cache_id - cached_path = cache_manager.get_by_cid(step.cache_id) - if cached_path and cached_path.exists(): - logger.info(f"Step {step.step_id}: cached at {step.cache_id[:16]}...") - step_results[step.step_id] = { - "status": "cached", - "path": str(cached_path), - "cache_id": step.cache_id, - } - cache_id_to_path[step.cache_id] = cached_path - total_cached += 1 - continue - - # Execute the step - try: - # Resolve input paths from previous step cache_ids - input_paths = [] - for input_ref in step.inputs: - # input_ref is a step_id - find its cache_id and path - input_step = next((s for s in plan.steps if s.step_id == input_ref), None) - if input_step: - input_cache_id = input_step.cache_id - input_path = cache_id_to_path.get(input_cache_id) - if input_path: - input_paths.append(Path(input_path)) - else: - # Check if it's a source input - source_cid = step.config.get("cid") - if source_cid: - input_path = cache_manager.get_by_cid(source_cid) - if input_path: - input_paths.append(Path(input_path)) - else: - # Direct CID reference (source node) - source_cid = input_hashes.get(input_ref) or step.config.get("cid") - if source_cid: - input_path = cache_manager.get_by_cid(source_cid) - if input_path: - input_paths.append(Path(input_path)) - - # Handle SOURCE nodes - if step.node_type.upper() == "SOURCE": - source_cid = step.config.get("cid") - - # If source has :input true, resolve CID from input_hashes - if not source_cid and step.config.get("input"): - source_name = step.config.get("name", "") - # Try various key formats for lookup - name_variants = [ - source_name, - source_name.lower().replace(" ", "-"), - source_name.lower().replace(" ", "_"), - source_name.lower(), - ] - for variant in name_variants: - if variant in input_hashes: - source_cid = input_hashes[variant] - logger.info(f"Resolved SOURCE '{source_name}' -> {source_cid[:16]}... via '{variant}'") - break - - if not source_cid: - raise ValueError(f"SOURCE '{source_name}' not found in input_hashes. Available: {list(input_hashes.keys())}") - - if source_cid: - source_path = cache_manager.get_by_cid(source_cid) - if source_path: - step_results[step.step_id] = { - "status": "source", - "path": str(source_path), - "cache_id": step.cache_id, - "cid": source_cid, - } - cache_id_to_path[step.cache_id] = source_path - total_cached += 1 - continue - else: - raise ValueError(f"Source content not found in cache: {source_cid[:16]}...") - else: - raise ValueError(f"SOURCE step has no cid and no :input flag: {step.config}") - - # Handle COMPOUND nodes (collapsed effect chains) - if step.node_type.upper() == "COMPOUND": - import subprocess - import tempfile - - filter_chain = step.config.get("filter_chain", []) - if not filter_chain: - raise ValueError("COMPOUND step has empty filter_chain") - - # Get input path - if not input_paths: - raise ValueError("COMPOUND step has no inputs") - - # For COMPOUND with EFFECT filters, run effects sequentially - current_input = input_paths[0] - temp_files = [] - - for i, filter_item in enumerate(filter_chain): - filter_type = filter_item.get("type", "") - filter_config = filter_item.get("config", {}) - - if filter_type == "EFFECT": - effect_name = filter_config.get("effect") - effect_cid = filter_config.get("cid") - - if effect_name: - # Try specific executor first, fall back to generic EFFECT executor - effect_executor = get_executor(f"effect:{effect_name}") - if not effect_executor: - effect_executor = get_executor("EFFECT") - - if effect_executor: - temp_dir = Path(tempfile.mkdtemp()) - temp_output = temp_dir / f"compound_{i}_{effect_name}.mkv" - - logger.info(f"COMPOUND: Running effect {effect_name} (cid={effect_cid[:16] if effect_cid else 'built-in'}...) step {i+1}/{len(filter_chain)}") - result_path = effect_executor.execute(filter_config, [current_input], temp_output) - - current_input = result_path - temp_files.append(temp_dir) - else: - raise ValueError(f"COMPOUND: No executor for effect {effect_name}") - - # Store final result - output_dir = CACHE_DIR / "nodes" / step.cache_id - output_dir.mkdir(parents=True, exist_ok=True) - final_output = output_dir / "output.mkv" - - import shutil - shutil.copy2(current_input, final_output) - - # Upload to IPFS (stored by IPFS CID, indexed by cache_id) - cached, content_cid = cache_manager.put( - final_output, - node_type="COMPOUND", - cache_id=step.cache_id, - ) - - # Cleanup temp files - for temp_dir in temp_files: - if temp_dir.exists(): - shutil.rmtree(temp_dir, ignore_errors=True) - - step_results[step.step_id] = { - "status": "executed", - "path": str(final_output), - "cache_id": step.cache_id, - "cid": content_cid, - "filter_count": len(filter_chain), - } - cache_id_to_path[step.cache_id] = final_output - total_executed += 1 - logger.info(f"COMPOUND step {step.step_id}: {len(filter_chain)} effects -> {content_cid[:16]}...") - continue - - # Handle SEQUENCE nodes (concatenate clips) - if step.node_type.upper() == "SEQUENCE": - import subprocess - import tempfile - - if len(input_paths) < 2: - raise ValueError(f"SEQUENCE requires at least 2 inputs, got {len(input_paths)}") - - # Create concat list file for FFmpeg - temp_dir = Path(tempfile.mkdtemp()) - concat_list = temp_dir / "concat.txt" - with open(concat_list, "w") as f: - for inp in input_paths: - f.write(f"file '{inp}'\n") - - output_dir = CACHE_DIR / "nodes" / step.cache_id - output_dir.mkdir(parents=True, exist_ok=True) - final_output = output_dir / "output.mkv" - - # FFmpeg concat demuxer - cmd = [ - "ffmpeg", "-y", - "-f", "concat", - "-safe", "0", - "-i", str(concat_list), - "-c", "copy", - str(final_output) - ] - - logger.info(f"SEQUENCE: Concatenating {len(input_paths)} clips") - result = subprocess.run(cmd, capture_output=True, text=True) - - if result.returncode != 0: - # Try with re-encoding if copy fails - cmd = [ - "ffmpeg", "-y", - "-f", "concat", - "-safe", "0", - "-i", str(concat_list), - "-c:v", "libx264", "-c:a", "aac", - str(final_output) - ] - result = subprocess.run(cmd, capture_output=True, text=True) - if result.returncode != 0: - raise RuntimeError(f"FFmpeg concat failed: {result.stderr}") - - # Upload to IPFS (stored by IPFS CID, indexed by cache_id) - cached, content_cid = cache_manager.put( - final_output, - node_type="SEQUENCE", - cache_id=step.cache_id, - ) - - # Cleanup - import shutil - shutil.rmtree(temp_dir, ignore_errors=True) - - step_results[step.step_id] = { - "status": "executed", - "path": str(final_output), - "cache_id": step.cache_id, - "cid": content_cid, - "input_count": len(input_paths), - } - cache_id_to_path[step.cache_id] = final_output - total_executed += 1 - logger.info(f"SEQUENCE step {step.step_id}: {len(input_paths)} clips -> {content_cid[:16]}...") - continue - - # Handle EFFECT nodes - if step.node_type.upper() == "EFFECT": - effect_name = step.config.get("effect") - if not effect_name: - raise ValueError(f"EFFECT node missing 'effect' in config: {step.config}") - - # Try specific executor first (e.g., effect:dog) - executor = get_executor(f"effect:{effect_name}") - logger.info(f"EFFECT: get_executor('effect:{effect_name}') = {executor}") - if not executor: - # Fall back to generic EFFECT executor (handles IPFS effects) - executor = get_executor("EFFECT") - logger.info(f"EFFECT: Fallback get_executor('EFFECT') = {executor}") - if not executor: - raise ValueError(f"No executor for effect: {effect_name}") - - if len(input_paths) != 1: - raise ValueError(f"EFFECT expects 1 input, got {len(input_paths)}") - - output_dir = CACHE_DIR / "nodes" / step.cache_id - output_dir.mkdir(parents=True, exist_ok=True) - output_path = output_dir / "output.mkv" - - effect_cid = step.config.get("cid") - logger.info(f"EFFECT: Running {effect_name} (cid={effect_cid[:16] if effect_cid else 'built-in'}...)") - result_path = executor.execute(step.config, input_paths, output_path) - - cached, content_cid = cache_manager.put( - result_path, - node_type="EFFECT", - cache_id=step.cache_id, - ) - - step_results[step.step_id] = { - "status": "executed", - "path": str(result_path), - "cache_id": step.cache_id, - "cid": content_cid, - "effect": effect_name, - } - cache_id_to_path[step.cache_id] = result_path - total_executed += 1 - logger.info(f"EFFECT step {step.step_id}: {effect_name} -> {content_cid[:16]}...") - continue - - # Fallback: try to get executor for unknown node types - executor = get_executor(step.node_type.upper()) - if not executor: - executor = get_executor(step.node_type) - - if not executor: - raise ValueError(f"No executor for node type: {step.node_type}") - - # Determine output path - output_dir = CACHE_DIR / "nodes" / step.cache_id - output_dir.mkdir(parents=True, exist_ok=True) - output_path = output_dir / "output.mkv" - - # Execute - logger.info(f"Executing step {step.step_id} ({step.node_type}) with {len(input_paths)} inputs") - result_path = executor.execute(step.config, input_paths, output_path) - - # Store result in cache (by IPFS CID, indexed by cache_id) - cached, content_cid = cache_manager.put( - result_path, - node_type=step.node_type, - cache_id=step.cache_id, - ) - - step_results[step.step_id] = { - "status": "executed", - "path": str(result_path), - "cache_id": step.cache_id, - "cid": content_cid, - "ipfs_cid": content_cid if content_cid.startswith("Qm") or content_cid.startswith("bafy") else None, - } - cache_id_to_path[step.cache_id] = result_path - total_executed += 1 - - logger.info(f"Step {step.step_id}: executed -> {content_cid[:16]}...") - - except Exception as e: - logger.error(f"Step {step.step_id} failed: {e}") - return { - "success": False, - "run_id": run_id, - "error": f"Step {step.step_id} failed: {e}", - "step_results": step_results, - "plan_cid": plan_cid, - "plan_sexp": plan_sexp, - } - - # Get output from final step - output_step = next((s for s in plan.steps if s.step_id == plan.output_step_id), None) - output_cid = None - output_ipfs_cid = None - output_path = None - output_cache_id = None # Keep track of cache_id separately - - if output_step: - output_result = step_results.get(output_step.step_id, {}) - output_cache_id = output_result.get("cache_id") - output_ipfs_cid = output_result.get("cid") # cid should be IPFS CID now - output_path = output_result.get("path") - - # Upload final output to IPFS if not already there - if output_path and not output_ipfs_cid: - output_path_obj = Path(output_path) if isinstance(output_path, str) else output_path - if output_path_obj.exists(): - logger.info(f"Uploading final output to IPFS: {output_path}") - output_ipfs_cid = ipfs_client.add_file(str(output_path_obj)) - if output_ipfs_cid: - logger.info(f"Uploaded output to IPFS: {output_ipfs_cid}") - else: - logger.error(f"Failed to upload output to IPFS: {output_path}") - - # Use IPFS CID as the primary output identifier - output_cid = output_ipfs_cid - - # Fail if output couldn't be uploaded to IPFS - if not output_ipfs_cid: - logger.error(f"Recipe failed: Could not upload output to IPFS! output_cid={output_cid}, output_path={output_path}") - return { - "success": False, - "run_id": run_id, - "error": "Failed to upload output to IPFS", - "plan_cid": plan_cid, - "plan_sexp": plan_sexp, - "output_cid": output_cid, - "output_path": output_path, - "step_results": step_results, - "total_steps": len(plan.steps), - "cached": total_cached, - "executed": total_executed, - } - - # Fail if no output was produced - if not output_cid: - logger.error(f"Recipe produced no output! output_step={plan.output_step_id}, result={output_result if output_step else 'no output step'}") - return { - "success": False, - "run_id": run_id, - "error": "Recipe produced no output", - "plan_cid": plan_cid, - "plan_sexp": plan_sexp, - "step_results": step_results, - "total_steps": len(plan.steps), - "cached": total_cached, - "executed": total_executed, - } - - # ============ Phase 6: Store Results ============ - logger.info("Phase 5: Storing results...") - - # Store in database - import asyncio - import database - - async def save_to_db(): - if database.pool is None: - await database.init_db() - - # Get actor_id from pending run - actor_id = None - pending = await database.get_pending_run(run_id) if run_id else None - if pending: - actor_id = pending.get("actor_id") - - await database.save_run_cache( - run_id=run_id, - output_cid=output_cid, - recipe=recipe_name, - inputs=list(input_hashes.values()), - ipfs_cid=output_ipfs_cid, - actor_id=actor_id, - plan_cid=plan_cid, - ) - - # Save output as media for user - if actor_id and output_cid: - await database.save_item_metadata( - cid=output_cid, - actor_id=actor_id, - item_type="media", - description=f"Output from recipe: {recipe_name}", - source_type="recipe", - source_note=f"run_id: {run_id}", - ) - - # Complete pending run - if pending and run_id: - await database.complete_pending_run(run_id) - - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - asyncio.ensure_future(save_to_db()) - else: - loop.run_until_complete(save_to_db()) - except RuntimeError: - asyncio.run(save_to_db()) - - # Build and store provenance - provenance = { - "task_id": self.request.id, - "run_id": run_id, - "rendered_at": datetime.now(timezone.utc).isoformat(), - "recipe": recipe_name, - "recipe_sexp": recipe_sexp, - "plan_sexp": plan_sexp, - "plan_cid": plan_cid, - "output": { - "cid": output_cid, - "ipfs_cid": output_ipfs_cid, - }, - "inputs": input_hashes, - "steps": { - step_id: { - "cache_id": result.get("cache_id"), - "cid": result.get("cid"), - "status": result.get("status"), - } - for step_id, result in step_results.items() - }, - "execution": { - "total_steps": len(plan.steps), - "cached": total_cached, - "executed": total_executed, - } - } - - provenance_cid = ipfs_client.add_json(provenance) - if provenance_cid: - logger.info(f"Stored provenance on IPFS: {provenance_cid}") - - logger.info(f"Recipe execution complete: output={output_cid[:16] if output_cid else 'none'}...") - - return { - "success": True, - "run_id": run_id, - "recipe": recipe_name, - "plan_cid": plan_cid, - "plan_sexp": plan_sexp, - "output_cid": output_cid, - "output_ipfs_cid": output_ipfs_cid, - "output_path": output_path, - "total_steps": len(plan.steps), - "cached": total_cached, - "executed": total_executed, - "step_results": step_results, - "provenance_cid": provenance_cid, - } - - -def build_effect_dag(input_hashes: List[str], effect_name: str) -> DAG: - """ - Build a simple DAG for applying an effect to inputs. - - Args: - input_hashes: List of input content hashes - effect_name: Name of effect to apply (e.g., "dog", "identity") - - Returns: - DAG ready for execution - """ - dag = DAG() - - # Add source nodes for each input - source_ids = [] - for i, cid in enumerate(input_hashes): - source_node = Node( - node_type=NodeType.SOURCE, - config={"cid": cid}, - name=f"source_{i}", - ) - dag.add_node(source_node) - source_ids.append(source_node.node_id) - - # Add effect node - effect_node = Node( - node_type=f"effect:{effect_name}", - config={}, - inputs=source_ids, - name=f"effect_{effect_name}", - ) - dag.add_node(effect_node) - dag.set_output(effect_node.node_id) - - return dag diff --git a/render.py b/render.py deleted file mode 100755 index 5f7ccd6..0000000 --- a/render.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python3 -""" -CLI to submit render tasks to Art DAG Celery. - -Usage: - python render.py dog cat # Render cat through dog effect - python render.py identity cat # Render cat through identity effect - python render.py # General form -""" - -import argparse -import json -import sys - -from legacy_tasks import render_effect - -# Known asset hashes -ASSETS = { - "cat": "33268b6e167deaf018cc538de12dbe562612b33e89a749391cef855b320a269b", -} - - -def main(): - parser = argparse.ArgumentParser(description="Submit render task to Art DAG Celery") - parser.add_argument("effect", help="Effect to apply (e.g., dog, identity)") - parser.add_argument("input", help="Input asset name or hash") - parser.add_argument("--output", "-o", help="Output name (default: -from-)") - parser.add_argument("--sync", "-s", action="store_true", help="Wait for result") - - args = parser.parse_args() - - # Resolve input to hash - input_hash = ASSETS.get(args.input, args.input) - if len(input_hash) != 64: - print(f"Error: Unknown asset '{args.input}' and not a valid hash") - sys.exit(1) - - # Generate output name - output_name = args.output or f"{args.effect}-from-{args.input}-celery" - - print(f"Submitting render task:") - print(f" Effect: {args.effect}") - print(f" Input: {args.input} ({input_hash[:16]}...)") - print(f" Output: {output_name}") - - # Submit task - task = render_effect.delay(input_hash, args.effect, output_name) - print(f" Task ID: {task.id}") - - if args.sync: - print("\nWaiting for result...") - try: - result = task.get(timeout=300) - print("\nRender complete!") - print(json.dumps(result, indent=2)) - except Exception as e: - print(f"\nRender failed: {e}") - sys.exit(1) - else: - print("\nTask submitted. Check status with:") - print(f" celery -A celery_app inspect query_task {task.id}") - - -if __name__ == "__main__": - main() diff --git a/sexp_effects/__init__.py b/sexp_effects/__init__.py new file mode 100644 index 0000000..b001c71 --- /dev/null +++ b/sexp_effects/__init__.py @@ -0,0 +1,32 @@ +""" +S-Expression Effects System + +Safe, shareable effects defined in S-expressions. +""" + +from .parser import parse, parse_file, Symbol, Keyword +from .interpreter import ( + Interpreter, + get_interpreter, + load_effect, + load_effects_dir, + run_effect, + list_effects, + make_process_frame, +) +from .primitives import PRIMITIVES + +__all__ = [ + 'parse', + 'parse_file', + 'Symbol', + 'Keyword', + 'Interpreter', + 'get_interpreter', + 'load_effect', + 'load_effects_dir', + 'run_effect', + 'list_effects', + 'make_process_frame', + 'PRIMITIVES', +] diff --git a/sexp_effects/effects/ascii_art.sexp b/sexp_effects/effects/ascii_art.sexp new file mode 100644 index 0000000..5565872 --- /dev/null +++ b/sexp_effects/effects/ascii_art.sexp @@ -0,0 +1,17 @@ +;; ASCII Art effect - converts image to ASCII characters +(require-primitives "ascii") + +(define-effect ascii_art + :params ( + (char_size :type int :default 8 :range [4 32]) + (alphabet :type string :default "standard") + (color_mode :type string :default "color" :desc ""color", "mono", "invert", or any color name/hex") + (background_color :type string :default "black" :desc "background color name/hex") + (invert_colors :type int :default 0 :desc "swap foreground and background colors") + (contrast :type float :default 1.5 :range [1 3]) + ) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + (chars (luminance-to-chars luminances alphabet contrast))) + (render-char-grid frame chars colors char_size color_mode background_color invert_colors))) diff --git a/sexp_effects/effects/ascii_art_fx.sexp b/sexp_effects/effects/ascii_art_fx.sexp new file mode 100644 index 0000000..2bb14be --- /dev/null +++ b/sexp_effects/effects/ascii_art_fx.sexp @@ -0,0 +1,52 @@ +;; ASCII Art FX - converts image to ASCII characters with per-character effects +(require-primitives "ascii") + +(define-effect ascii_art_fx + :params ( + ;; Basic parameters + (char_size :type int :default 8 :range [4 32] + :desc "Size of each character cell in pixels") + (alphabet :type string :default "standard" + :desc "Character set to use") + (color_mode :type string :default "color" + :choices [color mono invert] + :desc "Color mode: color, mono, invert, or any color name/hex") + (background_color :type string :default "black" + :desc "Background color name or hex value") + (invert_colors :type int :default 0 :range [0 1] + :desc "Swap foreground and background colors (0/1)") + (contrast :type float :default 1.5 :range [1 3] + :desc "Character selection contrast") + + ;; Per-character effects + (char_jitter :type float :default 0 :range [0 20] + :desc "Position jitter amount in pixels") + (char_scale :type float :default 1.0 :range [0.5 2.0] + :desc "Character scale factor") + (char_rotation :type float :default 0 :range [0 180] + :desc "Rotation amount in degrees") + (char_hue_shift :type float :default 0 :range [0 360] + :desc "Hue shift in degrees") + + ;; Modulation sources + (jitter_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives jitter modulation") + (scale_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives scale modulation") + (rotation_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives rotation modulation") + (hue_source :type string :default "none" + :choices [none luminance inv_luminance saturation position_x position_y position_diag random center_dist] + :desc "What drives hue shift modulation") + ) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + (chars (luminance-to-chars luminances alphabet contrast))) + (render-char-grid-fx frame chars colors luminances char_size + color_mode background_color invert_colors + char_jitter char_scale char_rotation char_hue_shift + jitter_source scale_source rotation_source hue_source))) diff --git a/sexp_effects/effects/ascii_fx_zone.sexp b/sexp_effects/effects/ascii_fx_zone.sexp new file mode 100644 index 0000000..69e5340 --- /dev/null +++ b/sexp_effects/effects/ascii_fx_zone.sexp @@ -0,0 +1,102 @@ +;; Composable ASCII Art with Per-Zone Expression-Driven Effects +;; Requires ascii primitive library for the ascii-fx-zone primitive + +(require-primitives "ascii") + +;; Two modes of operation: +;; +;; 1. EXPRESSION MODE: Use zone-* variables in expression parameters +;; Zone variables available: +;; zone-row, zone-col: Grid position (integers) +;; zone-row-norm, zone-col-norm: Normalized position (0-1) +;; zone-lum: Cell luminance (0-1) +;; zone-sat: Cell saturation (0-1) +;; zone-hue: Cell hue (0-360) +;; zone-r, zone-g, zone-b: RGB components (0-1) +;; +;; Example: +;; (ascii-fx-zone frame +;; :cols 80 +;; :char_hue (* zone-lum 180) +;; :char_rotation (* zone-col-norm 30)) +;; +;; 2. CELL EFFECT MODE: Pass a lambda to apply arbitrary effects per-cell +;; The lambda receives (cell-image zone-dict) and returns modified cell. +;; Zone dict contains: row, col, row-norm, col-norm, lum, sat, hue, r, g, b, +;; char, color, cell_size, plus any bound analysis values. +;; +;; Any loaded sexp effect can be called on cells - each cell is just a small frame: +;; (blur cell radius) - Gaussian blur +;; (rotate cell angle) - Rotate by angle degrees +;; (brightness cell factor) - Adjust brightness +;; (contrast cell factor) - Adjust contrast +;; (saturation cell factor) - Adjust saturation +;; (hue_shift cell degrees) - Shift hue +;; (rgb_split cell offset_x offset_y) - RGB channel split +;; (invert cell) - Invert colors +;; (pixelate cell block_size) - Pixelate +;; (wave cell amplitude freq) - Wave distortion +;; ... and any other loaded effect +;; +;; Example: +;; (ascii-fx-zone frame +;; :cols 60 +;; :cell_effect (lambda [cell zone] +;; (blur (rotate cell (* (get zone "energy") 45)) +;; (if (> (get zone "lum") 0.5) 3 0)))) + +(define-effect ascii_fx_zone + :params ( + (cols :type int :default 80 :range [20 200] + :desc "Number of character columns") + (char_size :type int :default nil :range [4 32] + :desc "Character cell size in pixels (overrides cols if set)") + (alphabet :type string :default "standard" + :desc "Character set: standard, blocks, simple, digits, or custom string") + (color_mode :type string :default "color" + :desc "Color mode: color, mono, invert, or any color name/hex") + (background :type string :default "black" + :desc "Background color name or hex value") + (contrast :type float :default 1.5 :range [0.5 3.0] + :desc "Contrast for character selection") + (char_hue :type any :default nil + :desc "Hue shift expression (evaluated per-zone with zone-* vars)") + (char_saturation :type any :default nil + :desc "Saturation multiplier expression (1.0 = unchanged)") + (char_brightness :type any :default nil + :desc "Brightness multiplier expression (1.0 = unchanged)") + (char_scale :type any :default nil + :desc "Character scale expression (1.0 = normal size)") + (char_rotation :type any :default nil + :desc "Character rotation expression (degrees)") + (char_jitter :type any :default nil + :desc "Position jitter expression (pixels)") + (cell_effect :type any :default nil + :desc "Lambda (cell zone) -> cell for arbitrary per-cell effects") + ;; Convenience params for staged recipes (avoids compile-time expression issues) + (energy :type float :default nil + :desc "Energy multiplier (0-1) from audio analysis bind") + (rotation_scale :type float :default 0 + :desc "Max rotation at top-right when energy=1 (degrees)") + ) + ;; The ascii-fx-zone special form handles expression params + ;; If energy + rotation_scale provided, it builds: energy * scale * position_factor + ;; where position_factor = 0 at bottom-left, 3 at top-right + ;; If cell_effect provided, each character is rendered to a cell image, + ;; passed to the lambda, and the result composited back + (ascii-fx-zone frame + :cols cols + :char_size char_size + :alphabet alphabet + :color_mode color_mode + :background background + :contrast contrast + :char_hue char_hue + :char_saturation char_saturation + :char_brightness char_brightness + :char_scale char_scale + :char_rotation char_rotation + :char_jitter char_jitter + :cell_effect cell_effect + :energy energy + :rotation_scale rotation_scale)) diff --git a/sexp_effects/effects/ascii_zones.sexp b/sexp_effects/effects/ascii_zones.sexp new file mode 100644 index 0000000..6bc441c --- /dev/null +++ b/sexp_effects/effects/ascii_zones.sexp @@ -0,0 +1,30 @@ +;; ASCII Zones effect - different character sets for different brightness zones +;; Dark areas use simple chars, mid uses standard, bright uses blocks +(require-primitives "ascii") + +(define-effect ascii_zones + :params ( + (char_size :type int :default 8 :range [4 32]) + (dark_threshold :type int :default 80 :range [0 128]) + (bright_threshold :type int :default 180 :range [128 255]) + (color_mode :type string :default "color") + ) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + ;; Start with simple chars as base + (base-chars (luminance-to-chars luminances "simple" 1.2)) + ;; Map each cell to appropriate alphabet based on brightness zone + (zoned-chars (map-char-grid base-chars luminances + (lambda (r c ch lum) + (cond + ;; Bright zones: use block characters + ((> lum bright_threshold) + (alphabet-char "blocks" (floor (/ (- lum bright_threshold) 15)))) + ;; Dark zones: use simple sparse chars + ((< lum dark_threshold) + (alphabet-char " .-" (floor (/ lum 30)))) + ;; Mid zones: use standard ASCII + (else + (alphabet-char "standard" (floor (/ lum 4))))))))) + (render-char-grid frame zoned-chars colors char_size color_mode (list 0 0 0)))) diff --git a/sexp_effects/effects/blend.sexp b/sexp_effects/effects/blend.sexp new file mode 100644 index 0000000..bf7fefd --- /dev/null +++ b/sexp_effects/effects/blend.sexp @@ -0,0 +1,31 @@ +;; Blend effect - combines two video frames +;; Streaming-compatible: frame is background, overlay is second frame +;; Usage: (blend background overlay :opacity 0.5 :mode "alpha") +;; +;; Params: +;; mode - blend mode (add, multiply, screen, overlay, difference, lighten, darken, alpha) +;; opacity - blend amount (0-1) + +(require-primitives "image" "blending" "core") + +(define-effect blend + :params ( + (overlay :type frame :default nil) + (mode :type string :default "alpha") + (opacity :type float :default 0.5) + ) + (if (core:is-nil overlay) + frame + (let [a frame + b overlay + a-h (image:height a) + a-w (image:width a) + b-h (image:height b) + b-w (image:width b) + ;; Resize b to match a if needed + b-sized (if (and (= a-w b-w) (= a-h b-h)) + b + (image:resize b a-w a-h "linear"))] + (if (= mode "alpha") + (blending:blend-images a b-sized opacity) + (blending:blend-images a (blending:blend-mode a b-sized mode) opacity))))) diff --git a/sexp_effects/effects/blend_multi.sexp b/sexp_effects/effects/blend_multi.sexp new file mode 100644 index 0000000..1ee160f --- /dev/null +++ b/sexp_effects/effects/blend_multi.sexp @@ -0,0 +1,58 @@ +;; N-way weighted blend effect +;; Streaming-compatible: pass inputs as a list of frames +;; Usage: (blend_multi :inputs [(read a) (read b) (read c)] :weights [0.3 0.4 0.3]) +;; +;; Parameters: +;; inputs - list of N frames to blend +;; weights - list of N floats, one per input (resolved per-frame) +;; mode - blend mode applied when folding each frame in: +;; "alpha" — pure weighted average (default) +;; "multiply" — darken by multiplication +;; "screen" — lighten (inverse multiply) +;; "overlay" — contrast-boosting midtone blend +;; "soft-light" — gentle dodge/burn +;; "hard-light" — strong dodge/burn +;; "color-dodge" — brightens towards white +;; "color-burn" — darkens towards black +;; "difference" — absolute pixel difference +;; "exclusion" — softer difference +;; "add" — additive (clamped) +;; "subtract" — subtractive (clamped) +;; "darken" — per-pixel minimum +;; "lighten" — per-pixel maximum +;; resize_mode - how to match frame dimensions (fit, crop, stretch) +;; +;; Uses a left-fold over inputs[1..N-1]. At each step the running +;; opacity is: w[i] / (w[0] + w[1] + ... + w[i]) +;; which produces the correct normalised weighted result. + +(require-primitives "image" "blending") + +(define-effect blend_multi + :params ( + (inputs :type list :default []) + (weights :type list :default []) + (mode :type string :default "alpha") + (resize_mode :type string :default "fit") + ) + (let [n (len inputs) + ;; Target dimensions from first frame + target-w (image:width (nth inputs 0)) + target-h (image:height (nth inputs 0)) + ;; Fold over indices 1..n-1 + ;; Accumulator is (list blended-frame running-weight-sum) + seed (list (nth inputs 0) (nth weights 0)) + result (reduce (range 1 n) seed + (lambda (pair i) + (let [acc (nth pair 0) + running (nth pair 1) + w (nth weights i) + new-running (+ running w) + opacity (/ w (max new-running 0.001)) + f (image:resize (nth inputs i) target-w target-h "linear") + ;; Apply blend mode then mix with opacity + blended (if (= mode "alpha") + (blending:blend-images acc f opacity) + (blending:blend-images acc (blending:blend-mode acc f mode) opacity))] + (list blended new-running))))] + (nth result 0))) diff --git a/sexp_effects/effects/bloom.sexp b/sexp_effects/effects/bloom.sexp new file mode 100644 index 0000000..3524d01 --- /dev/null +++ b/sexp_effects/effects/bloom.sexp @@ -0,0 +1,16 @@ +;; Bloom effect - glow on bright areas +(require-primitives "image" "blending") + +(define-effect bloom + :params ( + (intensity :type float :default 0.5 :range [0 2]) + (threshold :type int :default 200 :range [0 255]) + (radius :type int :default 15 :range [1 50]) + ) + (let* ((bright (map-pixels frame + (lambda (x y c) + (if (> (luminance c) threshold) + c + (rgb 0 0 0))))) + (blurred (image:blur bright radius))) + (blending:blend-mode frame blurred "add"))) diff --git a/sexp_effects/effects/blur.sexp b/sexp_effects/effects/blur.sexp new file mode 100644 index 0000000..b71a55a --- /dev/null +++ b/sexp_effects/effects/blur.sexp @@ -0,0 +1,8 @@ +;; Blur effect - gaussian blur +(require-primitives "image") + +(define-effect blur + :params ( + (radius :type int :default 5 :range [1 50]) + ) + (image:blur frame (max 1 radius))) diff --git a/sexp_effects/effects/brightness.sexp b/sexp_effects/effects/brightness.sexp new file mode 100644 index 0000000..4af53a7 --- /dev/null +++ b/sexp_effects/effects/brightness.sexp @@ -0,0 +1,9 @@ +;; Brightness effect - adjusts overall brightness +;; Uses vectorized adjust primitive for fast processing +(require-primitives "color_ops") + +(define-effect brightness + :params ( + (amount :type int :default 0 :range [-255 255]) + ) + (color_ops:adjust-brightness frame amount)) diff --git a/sexp_effects/effects/color-adjust.sexp b/sexp_effects/effects/color-adjust.sexp new file mode 100644 index 0000000..5318bdd --- /dev/null +++ b/sexp_effects/effects/color-adjust.sexp @@ -0,0 +1,13 @@ +;; Color adjustment effect - replaces TRANSFORM node +(require-primitives "color_ops") + +(define-effect color-adjust + :params ( + (brightness :type int :default 0 :range [-255 255] :desc "Brightness adjustment") + (contrast :type float :default 1 :range [0 3] :desc "Contrast multiplier") + (saturation :type float :default 1 :range [0 2] :desc "Saturation multiplier") + ) + (-> frame + (color_ops:adjust-brightness brightness) + (color_ops:adjust-contrast contrast) + (color_ops:adjust-saturation saturation))) diff --git a/sexp_effects/effects/color_cycle.sexp b/sexp_effects/effects/color_cycle.sexp new file mode 100644 index 0000000..e08dbb6 --- /dev/null +++ b/sexp_effects/effects/color_cycle.sexp @@ -0,0 +1,13 @@ +;; Color Cycle effect - animated hue rotation +(require-primitives "color_ops") + +(define-effect color_cycle + :params ( + (speed :type int :default 1 :range [0 10]) + ) + (let ((shift (* t speed 360))) + (map-pixels frame + (lambda (x y c) + (let* ((hsv (rgb->hsv c)) + (new-h (mod (+ (first hsv) shift) 360))) + (hsv->rgb (list new-h (nth hsv 1) (nth hsv 2)))))))) diff --git a/sexp_effects/effects/contrast.sexp b/sexp_effects/effects/contrast.sexp new file mode 100644 index 0000000..660661d --- /dev/null +++ b/sexp_effects/effects/contrast.sexp @@ -0,0 +1,9 @@ +;; Contrast effect - adjusts image contrast +;; Uses vectorized adjust primitive for fast processing +(require-primitives "color_ops") + +(define-effect contrast + :params ( + (amount :type int :default 1 :range [0.5 3]) + ) + (color_ops:adjust-contrast frame amount)) diff --git a/sexp_effects/effects/crt.sexp b/sexp_effects/effects/crt.sexp new file mode 100644 index 0000000..097eaf9 --- /dev/null +++ b/sexp_effects/effects/crt.sexp @@ -0,0 +1,30 @@ +;; CRT effect - old monitor simulation +(require-primitives "image") + +(define-effect crt + :params ( + (line_spacing :type int :default 2 :range [1 10]) + (line_opacity :type float :default 0.3 :range [0 1]) + (vignette_amount :type float :default 0.2) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (/ w 2)) + (cy (/ h 2)) + (max-dist (sqrt (+ (* cx cx) (* cy cy))))) + (map-pixels frame + (lambda (x y c) + (let* (;; Scanline darkening + (scanline-factor (if (= 0 (mod y line_spacing)) + (- 1 line_opacity) + 1)) + ;; Vignette + (dx (- x cx)) + (dy (- y cy)) + (dist (sqrt (+ (* dx dx) (* dy dy)))) + (vignette-factor (- 1 (* (/ dist max-dist) vignette_amount))) + ;; Combined + (factor (* scanline-factor vignette-factor))) + (rgb (* (red c) factor) + (* (green c) factor) + (* (blue c) factor))))))) diff --git a/sexp_effects/effects/datamosh.sexp b/sexp_effects/effects/datamosh.sexp new file mode 100644 index 0000000..60cec66 --- /dev/null +++ b/sexp_effects/effects/datamosh.sexp @@ -0,0 +1,14 @@ +;; Datamosh effect - glitch block corruption + +(define-effect datamosh + :params ( + (block_size :type int :default 32 :range [8 128]) + (corruption :type float :default 0.3 :range [0 1]) + (max_offset :type int :default 50 :range [0 200]) + (color_corrupt :type bool :default true) + ) + ;; Get previous frame from state, or use current frame if none + (let ((prev (state-get "prev_frame" frame))) + (begin + (state-set "prev_frame" (copy frame)) + (datamosh frame prev block_size corruption max_offset color_corrupt)))) diff --git a/sexp_effects/effects/echo.sexp b/sexp_effects/effects/echo.sexp new file mode 100644 index 0000000..2aa2287 --- /dev/null +++ b/sexp_effects/effects/echo.sexp @@ -0,0 +1,19 @@ +;; Echo effect - motion trails using frame buffer +(require-primitives "blending") + +(define-effect echo + :params ( + (num_echoes :type int :default 4 :range [1 20]) + (decay :type float :default 0.5 :range [0 1]) + ) + (let* ((buffer (state-get 'buffer (list))) + (new-buffer (take (cons frame buffer) (+ num_echoes 1)))) + (begin + (state-set 'buffer new-buffer) + ;; Blend frames with decay + (if (< (length new-buffer) 2) + frame + (let ((result (copy frame))) + ;; Simple blend of first two frames for now + ;; Full version would fold over all frames + (blending:blend-images frame (nth new-buffer 1) (* decay 0.5))))))) diff --git a/sexp_effects/effects/edge_detect.sexp b/sexp_effects/effects/edge_detect.sexp new file mode 100644 index 0000000..170befb --- /dev/null +++ b/sexp_effects/effects/edge_detect.sexp @@ -0,0 +1,9 @@ +;; Edge detection effect - highlights edges +(require-primitives "image") + +(define-effect edge_detect + :params ( + (low :type int :default 50 :range [10 100]) + (high :type int :default 150 :range [50 300]) + ) + (image:edge-detect frame low high)) diff --git a/sexp_effects/effects/emboss.sexp b/sexp_effects/effects/emboss.sexp new file mode 100644 index 0000000..1eac3ce --- /dev/null +++ b/sexp_effects/effects/emboss.sexp @@ -0,0 +1,13 @@ +;; Emboss effect - creates raised/3D appearance +(require-primitives "blending") + +(define-effect emboss + :params ( + (strength :type int :default 1 :range [0.5 3]) + (blend :type float :default 0.3 :range [0 1]) + ) + (let* ((kernel (list (list (- strength) (- strength) 0) + (list (- strength) 1 strength) + (list 0 strength strength))) + (embossed (convolve frame kernel))) + (blending:blend-images embossed frame blend))) diff --git a/sexp_effects/effects/film_grain.sexp b/sexp_effects/effects/film_grain.sexp new file mode 100644 index 0000000..29bdd75 --- /dev/null +++ b/sexp_effects/effects/film_grain.sexp @@ -0,0 +1,19 @@ +;; Film Grain effect - adds film grain texture +(require-primitives "core") + +(define-effect film_grain + :params ( + (intensity :type float :default 0.2 :range [0 1]) + (colored :type bool :default false) + ) + (let ((grain-amount (* intensity 50))) + (map-pixels frame + (lambda (x y c) + (if colored + (rgb (clamp (+ (red c) (gaussian 0 grain-amount)) 0 255) + (clamp (+ (green c) (gaussian 0 grain-amount)) 0 255) + (clamp (+ (blue c) (gaussian 0 grain-amount)) 0 255)) + (let ((n (gaussian 0 grain-amount))) + (rgb (clamp (+ (red c) n) 0 255) + (clamp (+ (green c) n) 0 255) + (clamp (+ (blue c) n) 0 255)))))))) diff --git a/sexp_effects/effects/fisheye.sexp b/sexp_effects/effects/fisheye.sexp new file mode 100644 index 0000000..37750a7 --- /dev/null +++ b/sexp_effects/effects/fisheye.sexp @@ -0,0 +1,16 @@ +;; Fisheye effect - barrel/pincushion lens distortion +(require-primitives "geometry" "image") + +(define-effect fisheye + :params ( + (strength :type float :default 0.3 :range [-1 1]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (zoom_correct :type bool :default true) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (coords (geometry:fisheye-coords w h strength cx cy zoom_correct))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/flip.sexp b/sexp_effects/effects/flip.sexp new file mode 100644 index 0000000..977e1e1 --- /dev/null +++ b/sexp_effects/effects/flip.sexp @@ -0,0 +1,16 @@ +;; Flip effect - flips image horizontally or vertically +(require-primitives "geometry") + +(define-effect flip + :params ( + (horizontal :type bool :default true) + (vertical :type bool :default false) + ) + (let ((result frame)) + (if horizontal + (set! result (geometry:flip-img result "horizontal")) + nil) + (if vertical + (set! result (geometry:flip-img result "vertical")) + nil) + result)) diff --git a/sexp_effects/effects/grayscale.sexp b/sexp_effects/effects/grayscale.sexp new file mode 100644 index 0000000..848f8a7 --- /dev/null +++ b/sexp_effects/effects/grayscale.sexp @@ -0,0 +1,7 @@ +;; Grayscale effect - converts to grayscale +;; Uses vectorized mix-gray primitive for fast processing +(require-primitives "image") + +(define-effect grayscale + :params () + (image:grayscale frame)) diff --git a/sexp_effects/effects/hue_shift.sexp b/sexp_effects/effects/hue_shift.sexp new file mode 100644 index 0000000..ab61bd6 --- /dev/null +++ b/sexp_effects/effects/hue_shift.sexp @@ -0,0 +1,12 @@ +;; Hue shift effect - rotates hue values +;; Uses vectorized shift-hsv primitive for fast processing + +(require-primitives "color_ops") + +(define-effect hue_shift + :params ( + (degrees :type int :default 0 :range [0 360]) + (speed :type int :default 0 :desc "rotation per second") + ) + (let ((shift (+ degrees (* speed t)))) + (color_ops:shift-hsv frame shift 1 1))) diff --git a/sexp_effects/effects/invert.sexp b/sexp_effects/effects/invert.sexp new file mode 100644 index 0000000..34936da --- /dev/null +++ b/sexp_effects/effects/invert.sexp @@ -0,0 +1,9 @@ +;; Invert effect - inverts all colors +;; Uses vectorized invert-img primitive for fast processing +;; amount param: 0 = no invert, 1 = full invert (threshold at 0.5) + +(require-primitives "color_ops") + +(define-effect invert + :params ((amount :type float :default 1 :range [0 1])) + (if (> amount 0.5) (color_ops:invert-img frame) frame)) diff --git a/sexp_effects/effects/kaleidoscope.sexp b/sexp_effects/effects/kaleidoscope.sexp new file mode 100644 index 0000000..9487ae2 --- /dev/null +++ b/sexp_effects/effects/kaleidoscope.sexp @@ -0,0 +1,20 @@ +;; Kaleidoscope effect - mandala-like symmetry patterns +(require-primitives "geometry" "image") + +(define-effect kaleidoscope + :params ( + (segments :type int :default 6 :range [3 16]) + (rotation :type int :default 0 :range [0 360]) + (rotation_speed :type int :default 0 :range [-180 180]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (zoom :type int :default 1 :range [0.5 3]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + ;; Total rotation including time-based animation + (total_rot (+ rotation (* rotation_speed (or _time 0)))) + (coords (geometry:kaleidoscope-coords w h segments total_rot cx cy zoom))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/layer.sexp b/sexp_effects/effects/layer.sexp new file mode 100644 index 0000000..e57d627 --- /dev/null +++ b/sexp_effects/effects/layer.sexp @@ -0,0 +1,36 @@ +;; Layer effect - composite overlay over background at position +;; Streaming-compatible: frame is background, overlay is foreground +;; Usage: (layer background overlay :x 10 :y 20 :opacity 0.8) +;; +;; Params: +;; overlay - frame to composite on top +;; x, y - position to place overlay +;; opacity - blend amount (0-1) +;; mode - blend mode (alpha, multiply, screen, etc.) + +(require-primitives "image" "blending" "core") + +(define-effect layer + :params ( + (overlay :type frame :default nil) + (x :type int :default 0) + (y :type int :default 0) + (opacity :type float :default 1.0) + (mode :type string :default "alpha") + ) + (if (core:is-nil overlay) + frame + (let [bg (copy frame) + fg overlay + fg-w (image:width fg) + fg-h (image:height fg)] + (if (= opacity 1.0) + ;; Simple paste + (paste bg fg x y) + ;; Blend with opacity + (let [blended (if (= mode "alpha") + (blending:blend-images (image:crop bg x y fg-w fg-h) fg opacity) + (blending:blend-images (image:crop bg x y fg-w fg-h) + (blending:blend-mode (image:crop bg x y fg-w fg-h) fg mode) + opacity))] + (paste bg blended x y)))))) diff --git a/sexp_effects/effects/mirror.sexp b/sexp_effects/effects/mirror.sexp new file mode 100644 index 0000000..a450cb6 --- /dev/null +++ b/sexp_effects/effects/mirror.sexp @@ -0,0 +1,33 @@ +;; Mirror effect - mirrors half of image +(require-primitives "geometry" "image") + +(define-effect mirror + :params ( + (mode :type string :default "left_right") + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (hw (floor (/ w 2))) + (hh (floor (/ h 2)))) + (cond + ((= mode "left_right") + (let ((left (image:crop frame 0 0 hw h)) + (result (copy frame))) + (paste result (geometry:flip-img left "horizontal") hw 0))) + + ((= mode "right_left") + (let ((right (image:crop frame hw 0 hw h)) + (result (copy frame))) + (paste result (geometry:flip-img right "horizontal") 0 0))) + + ((= mode "top_bottom") + (let ((top (image:crop frame 0 0 w hh)) + (result (copy frame))) + (paste result (geometry:flip-img top "vertical") 0 hh))) + + ((= mode "bottom_top") + (let ((bottom (image:crop frame 0 hh w hh)) + (result (copy frame))) + (paste result (geometry:flip-img bottom "vertical") 0 0))) + + (else frame)))) diff --git a/sexp_effects/effects/neon_glow.sexp b/sexp_effects/effects/neon_glow.sexp new file mode 100644 index 0000000..39245ab --- /dev/null +++ b/sexp_effects/effects/neon_glow.sexp @@ -0,0 +1,23 @@ +;; Neon Glow effect - glowing edge effect +(require-primitives "image" "blending") + +(define-effect neon_glow + :params ( + (edge_low :type int :default 50 :range [10 200]) + (edge_high :type int :default 150 :range [50 300]) + (glow_radius :type int :default 15 :range [1 50]) + (glow_intensity :type int :default 2 :range [0.5 5]) + (background :type float :default 0.3 :range [0 1]) + ) + (let* ((edge-img (image:edge-detect frame edge_low edge_high)) + (glow (image:blur edge-img glow_radius)) + ;; Intensify the glow + (bright-glow (map-pixels glow + (lambda (x y c) + (rgb (clamp (* (red c) glow_intensity) 0 255) + (clamp (* (green c) glow_intensity) 0 255) + (clamp (* (blue c) glow_intensity) 0 255)))))) + (blending:blend-mode (blending:blend-images frame (make-image (image:width frame) (image:height frame) (list 0 0 0)) + (- 1 background)) + bright-glow + "screen"))) diff --git a/sexp_effects/effects/noise.sexp b/sexp_effects/effects/noise.sexp new file mode 100644 index 0000000..4da8298 --- /dev/null +++ b/sexp_effects/effects/noise.sexp @@ -0,0 +1,8 @@ +;; Noise effect - adds random noise +;; Uses vectorized add-noise primitive for fast processing + +(define-effect noise + :params ( + (amount :type int :default 20 :range [0 100]) + ) + (add-noise frame amount)) diff --git a/sexp_effects/effects/outline.sexp b/sexp_effects/effects/outline.sexp new file mode 100644 index 0000000..276f891 --- /dev/null +++ b/sexp_effects/effects/outline.sexp @@ -0,0 +1,24 @@ +;; Outline effect - shows only edges +(require-primitives "image") + +(define-effect outline + :params ( + (thickness :type int :default 2 :range [1 10]) + (threshold :type int :default 100 :range [20 300]) + (color :type list :default (list 0 0 0) + ) + (fill_mode "original")) + (let* ((edge-img (image:edge-detect frame (/ threshold 2) threshold)) + (dilated (if (> thickness 1) + (dilate edge-img thickness) + edge-img)) + (base (cond + ((= fill_mode "original") (copy frame)) + ((= fill_mode "white") (make-image (image:width frame) (image:height frame) (list 255 255 255))) + (else (make-image (image:width frame) (image:height frame) (list 0 0 0)))))) + (map-pixels base + (lambda (x y c) + (let ((edge-val (luminance (pixel dilated x y)))) + (if (> edge-val 128) + color + c)))))) diff --git a/sexp_effects/effects/pixelate.sexp b/sexp_effects/effects/pixelate.sexp new file mode 100644 index 0000000..3d28ce1 --- /dev/null +++ b/sexp_effects/effects/pixelate.sexp @@ -0,0 +1,13 @@ +;; Pixelate effect - creates blocky pixels +(require-primitives "image") + +(define-effect pixelate + :params ( + (block_size :type int :default 8 :range [2 64]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (small-w (max 1 (floor (/ w block_size)))) + (small-h (max 1 (floor (/ h block_size)))) + (small (image:resize frame small-w small-h "area"))) + (image:resize small w h "nearest"))) diff --git a/sexp_effects/effects/pixelsort.sexp b/sexp_effects/effects/pixelsort.sexp new file mode 100644 index 0000000..155ac13 --- /dev/null +++ b/sexp_effects/effects/pixelsort.sexp @@ -0,0 +1,11 @@ +;; Pixelsort effect - glitch art pixel sorting + +(define-effect pixelsort + :params ( + (sort_by :type string :default "lightness") + (threshold_low :type int :default 50 :range [0 255]) + (threshold_high :type int :default 200 :range [0 255]) + (angle :type int :default 0 :range [0 180]) + (reverse :type bool :default false) + ) + (pixelsort frame sort_by threshold_low threshold_high angle reverse)) diff --git a/sexp_effects/effects/posterize.sexp b/sexp_effects/effects/posterize.sexp new file mode 100644 index 0000000..7052ed3 --- /dev/null +++ b/sexp_effects/effects/posterize.sexp @@ -0,0 +1,8 @@ +;; Posterize effect - reduces color levels +(require-primitives "color_ops") + +(define-effect posterize + :params ( + (levels :type int :default 8 :range [2 32]) + ) + (color_ops:posterize frame levels)) diff --git a/sexp_effects/effects/resize-frame.sexp b/sexp_effects/effects/resize-frame.sexp new file mode 100644 index 0000000..a1cce27 --- /dev/null +++ b/sexp_effects/effects/resize-frame.sexp @@ -0,0 +1,11 @@ +;; Resize effect - replaces RESIZE node +;; Note: uses target-w/target-h to avoid conflict with width/height primitives +(require-primitives "image") + +(define-effect resize-frame + :params ( + (target-w :type int :default 640 :desc "Target width in pixels") + (target-h :type int :default 480 :desc "Target height in pixels") + (mode :type string :default "linear" :choices [linear nearest area] :desc "Interpolation mode") + ) + (image:resize frame target-w target-h mode)) diff --git a/sexp_effects/effects/rgb_split.sexp b/sexp_effects/effects/rgb_split.sexp new file mode 100644 index 0000000..4582701 --- /dev/null +++ b/sexp_effects/effects/rgb_split.sexp @@ -0,0 +1,13 @@ +;; RGB Split effect - chromatic aberration + +(define-effect rgb_split + :params ( + (offset_x :type int :default 10 :range [-50 50]) + (offset_y :type int :default 0 :range [-50 50]) + ) + (let* ((r (channel frame 0)) + (g (channel frame 1)) + (b (channel frame 2)) + (r-shifted (translate (merge-channels r r r) offset_x offset_y)) + (b-shifted (translate (merge-channels b b b) (- offset_x) (- offset_y)))) + (merge-channels (channel r-shifted 0) g (channel b-shifted 0)))) diff --git a/sexp_effects/effects/ripple.sexp b/sexp_effects/effects/ripple.sexp new file mode 100644 index 0000000..0bb7a8d --- /dev/null +++ b/sexp_effects/effects/ripple.sexp @@ -0,0 +1,19 @@ +;; Ripple effect - radial wave distortion from center +(require-primitives "geometry" "image" "math") + +(define-effect ripple + :params ( + (frequency :type int :default 5 :range [1 20]) + (amplitude :type int :default 10 :range [0 50]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (decay :type int :default 1 :range [0 5]) + (speed :type int :default 1 :range [0 10]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (phase (* (or t 0) speed 2 pi)) + (coords (geometry:ripple-displace w h frequency amplitude cx cy decay phase))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/rotate.sexp b/sexp_effects/effects/rotate.sexp new file mode 100644 index 0000000..d06c2f7 --- /dev/null +++ b/sexp_effects/effects/rotate.sexp @@ -0,0 +1,11 @@ +;; Rotate effect - rotates image + +(require-primitives "geometry") + +(define-effect rotate + :params ( + (angle :type int :default 0 :range [-360 360]) + (speed :type int :default 0 :desc "rotation per second") + ) + (let ((total-angle (+ angle (* speed t)))) + (geometry:rotate-img frame total-angle))) diff --git a/sexp_effects/effects/saturation.sexp b/sexp_effects/effects/saturation.sexp new file mode 100644 index 0000000..9852dc7 --- /dev/null +++ b/sexp_effects/effects/saturation.sexp @@ -0,0 +1,9 @@ +;; Saturation effect - adjusts color saturation +;; Uses vectorized shift-hsv primitive for fast processing +(require-primitives "color_ops") + +(define-effect saturation + :params ( + (amount :type int :default 1 :range [0 3]) + ) + (color_ops:adjust-saturation frame amount)) diff --git a/sexp_effects/effects/scanlines.sexp b/sexp_effects/effects/scanlines.sexp new file mode 100644 index 0000000..ddfcf44 --- /dev/null +++ b/sexp_effects/effects/scanlines.sexp @@ -0,0 +1,15 @@ +;; Scanlines effect - VHS-style horizontal line shifting +(require-primitives "core") + +(define-effect scanlines + :params ( + (amplitude :type int :default 10 :range [0 100]) + (frequency :type int :default 10 :range [1 100]) + (randomness :type float :default 0.5 :range [0 1]) + ) + (map-rows frame + (lambda (y row) + (let* ((sine-shift (* amplitude (sin (/ (* y 6.28) (max 1 frequency))))) + (rand-shift (core:rand-range (- amplitude) amplitude)) + (shift (floor (lerp sine-shift rand-shift randomness)))) + (roll row shift 0))))) diff --git a/sexp_effects/effects/sepia.sexp b/sexp_effects/effects/sepia.sexp new file mode 100644 index 0000000..e3a5875 --- /dev/null +++ b/sexp_effects/effects/sepia.sexp @@ -0,0 +1,7 @@ +;; Sepia effect - applies sepia tone +;; Classic warm vintage look +(require-primitives "color_ops") + +(define-effect sepia + :params () + (color_ops:sepia frame)) diff --git a/sexp_effects/effects/sharpen.sexp b/sexp_effects/effects/sharpen.sexp new file mode 100644 index 0000000..538bd7f --- /dev/null +++ b/sexp_effects/effects/sharpen.sexp @@ -0,0 +1,8 @@ +;; Sharpen effect - sharpens edges +(require-primitives "image") + +(define-effect sharpen + :params ( + (amount :type int :default 1 :range [0 5]) + ) + (image:sharpen frame amount)) diff --git a/sexp_effects/effects/strobe.sexp b/sexp_effects/effects/strobe.sexp new file mode 100644 index 0000000..e51ba30 --- /dev/null +++ b/sexp_effects/effects/strobe.sexp @@ -0,0 +1,16 @@ +;; Strobe effect - holds frames for choppy look +(require-primitives "core") + +(define-effect strobe + :params ( + (frame_rate :type int :default 12 :range [1 60]) + ) + (let* ((held (state-get 'held nil)) + (held-until (state-get 'held-until 0)) + (frame-duration (/ 1 frame_rate))) + (if (or (core:is-nil held) (>= t held-until)) + (begin + (state-set 'held (copy frame)) + (state-set 'held-until (+ t frame-duration)) + frame) + held))) diff --git a/sexp_effects/effects/swirl.sexp b/sexp_effects/effects/swirl.sexp new file mode 100644 index 0000000..ba9cf57 --- /dev/null +++ b/sexp_effects/effects/swirl.sexp @@ -0,0 +1,17 @@ +;; Swirl effect - spiral vortex distortion +(require-primitives "geometry" "image") + +(define-effect swirl + :params ( + (strength :type int :default 1 :range [-10 10]) + (radius :type float :default 0.5 :range [0.1 2]) + (center_x :type float :default 0.5 :range [0 1]) + (center_y :type float :default 0.5 :range [0 1]) + (falloff :type string :default "quadratic") + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (coords (geometry:swirl-coords w h strength radius cx cy falloff))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/threshold.sexp b/sexp_effects/effects/threshold.sexp new file mode 100644 index 0000000..50d3bc5 --- /dev/null +++ b/sexp_effects/effects/threshold.sexp @@ -0,0 +1,9 @@ +;; Threshold effect - converts to black and white +(require-primitives "color_ops") + +(define-effect threshold + :params ( + (level :type int :default 128 :range [0 255]) + (invert :type bool :default false) + ) + (color_ops:threshold frame level invert)) diff --git a/sexp_effects/effects/tile_grid.sexp b/sexp_effects/effects/tile_grid.sexp new file mode 100644 index 0000000..44487a9 --- /dev/null +++ b/sexp_effects/effects/tile_grid.sexp @@ -0,0 +1,29 @@ +;; Tile Grid effect - tiles image in grid +(require-primitives "geometry" "image") + +(define-effect tile_grid + :params ( + (rows :type int :default 2 :range [1 10]) + (cols :type int :default 2 :range [1 10]) + (gap :type int :default 0 :range [0 50]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (tile-w (floor (/ (- w (* gap (- cols 1))) cols))) + (tile-h (floor (/ (- h (* gap (- rows 1))) rows))) + (tile (image:resize frame tile-w tile-h "area")) + (result (make-image w h (list 0 0 0)))) + (begin + ;; Manually place tiles using nested iteration + ;; This is a simplified version - full version would loop + (paste result tile 0 0) + (if (> cols 1) + (paste result tile (+ tile-w gap) 0) + nil) + (if (> rows 1) + (paste result tile 0 (+ tile-h gap)) + nil) + (if (and (> cols 1) (> rows 1)) + (paste result tile (+ tile-w gap) (+ tile-h gap)) + nil) + result))) diff --git a/sexp_effects/effects/trails.sexp b/sexp_effects/effects/trails.sexp new file mode 100644 index 0000000..f16c302 --- /dev/null +++ b/sexp_effects/effects/trails.sexp @@ -0,0 +1,20 @@ +;; Trails effect - persistent motion trails +(require-primitives "image" "blending") + +(define-effect trails + :params ( + (persistence :type float :default 0.8 :range [0 0.99]) + ) + (let* ((buffer (state-get 'buffer nil)) + (current frame)) + (if (= buffer nil) + (begin + (state-set 'buffer (copy frame)) + frame) + (let* ((faded (blending:blend-images buffer + (make-image (image:width frame) (image:height frame) (list 0 0 0)) + (- 1 persistence))) + (result (blending:blend-mode faded current "lighten"))) + (begin + (state-set 'buffer result) + result))))) diff --git a/sexp_effects/effects/vignette.sexp b/sexp_effects/effects/vignette.sexp new file mode 100644 index 0000000..46e63ee --- /dev/null +++ b/sexp_effects/effects/vignette.sexp @@ -0,0 +1,23 @@ +;; Vignette effect - darkens corners +(require-primitives "image") + +(define-effect vignette + :params ( + (strength :type float :default 0.5 :range [0 1]) + (radius :type int :default 1 :range [0.5 2]) + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + (cx (/ w 2)) + (cy (/ h 2)) + (max-dist (* (sqrt (+ (* cx cx) (* cy cy))) radius))) + (map-pixels frame + (lambda (x y c) + (let* ((dx (- x cx)) + (dy (- y cy)) + (dist (sqrt (+ (* dx dx) (* dy dy)))) + (factor (- 1 (* (/ dist max-dist) strength))) + (factor (clamp factor 0 1))) + (rgb (* (red c) factor) + (* (green c) factor) + (* (blue c) factor))))))) diff --git a/sexp_effects/effects/wave.sexp b/sexp_effects/effects/wave.sexp new file mode 100644 index 0000000..98b03c2 --- /dev/null +++ b/sexp_effects/effects/wave.sexp @@ -0,0 +1,22 @@ +;; Wave effect - sine wave displacement distortion +(require-primitives "geometry" "image") + +(define-effect wave + :params ( + (amplitude :type int :default 10 :range [0 100]) + (wavelength :type int :default 50 :range [10 500]) + (speed :type int :default 1 :range [0 10]) + (direction :type string :default "horizontal") + ) + (let* ((w (image:width frame)) + (h (image:height frame)) + ;; Use _time for animation phase + (phase (* (or _time 0) speed 2 pi)) + ;; Calculate frequency: waves per dimension + (freq (/ (if (= direction "vertical") w h) wavelength)) + (axis (cond + ((= direction "horizontal") "x") + ((= direction "vertical") "y") + (else "both"))) + (coords (geometry:wave-coords w h axis freq amplitude phase))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/zoom.sexp b/sexp_effects/effects/zoom.sexp new file mode 100644 index 0000000..6e4b9ff --- /dev/null +++ b/sexp_effects/effects/zoom.sexp @@ -0,0 +1,8 @@ +;; Zoom effect - zooms in/out from center +(require-primitives "geometry") + +(define-effect zoom + :params ( + (amount :type int :default 1 :range [0.1 5]) + ) + (geometry:scale-img frame amount amount)) diff --git a/sexp_effects/interpreter.py b/sexp_effects/interpreter.py new file mode 100644 index 0000000..830904a --- /dev/null +++ b/sexp_effects/interpreter.py @@ -0,0 +1,1016 @@ +""" +S-Expression Effect Interpreter + +Interprets effect definitions written in S-expressions. +Only allows safe primitives - no arbitrary code execution. +""" + +import numpy as np +from typing import Any, Dict, List, Optional, Callable +from pathlib import Path + +from .parser import Symbol, Keyword, parse, parse_file +from .primitives import PRIMITIVES, reset_rng + + +def _is_symbol(x) -> bool: + """Check if x is a Symbol (duck typing to support multiple Symbol classes).""" + return hasattr(x, 'name') and type(x).__name__ == 'Symbol' + + +def _is_keyword(x) -> bool: + """Check if x is a Keyword (duck typing to support multiple Keyword classes).""" + return hasattr(x, 'name') and type(x).__name__ == 'Keyword' + + +def _symbol_name(x) -> str: + """Get the name from a Symbol.""" + return x.name if hasattr(x, 'name') else str(x) + + +class Environment: + """Lexical environment for variable bindings.""" + + def __init__(self, parent: 'Environment' = None): + self.bindings: Dict[str, Any] = {} + self.parent = parent + + def get(self, name: str) -> Any: + if name in self.bindings: + return self.bindings[name] + if self.parent: + return self.parent.get(name) + raise NameError(f"Undefined variable: {name}") + + def set(self, name: str, value: Any): + self.bindings[name] = value + + def has(self, name: str) -> bool: + if name in self.bindings: + return True + if self.parent: + return self.parent.has(name) + return False + + +class Lambda: + """A user-defined function (lambda).""" + + def __init__(self, params: List[str], body: Any, env: Environment): + self.params = params + self.body = body + self.env = env # Closure environment + + def __repr__(self): + return f"" + + +class EffectDefinition: + """A parsed effect definition.""" + + def __init__(self, name: str, params: Dict[str, Any], body: Any): + self.name = name + self.params = params # {name: (type, default)} + self.body = body + + def __repr__(self): + return f"" + + +class Interpreter: + """ + S-Expression interpreter for effects. + + Provides a safe execution environment where only + whitelisted primitives can be called. + + Args: + minimal_primitives: If True, only load core primitives (arithmetic, comparison, + basic data access). Additional primitives must be loaded with + (require-primitives) or (with-primitives). + If False (default), load all legacy primitives for backward compatibility. + """ + + def __init__(self, minimal_primitives: bool = False): + # Base environment with primitives + self.global_env = Environment() + self.minimal_primitives = minimal_primitives + + if minimal_primitives: + # Load only core primitives + from .primitive_libs.core import PRIMITIVES as CORE_PRIMITIVES + for name, fn in CORE_PRIMITIVES.items(): + self.global_env.set(name, fn) + else: + # Load all legacy primitives for backward compatibility + for name, fn in PRIMITIVES.items(): + self.global_env.set(name, fn) + + # Special values + self.global_env.set('true', True) + self.global_env.set('false', False) + self.global_env.set('nil', None) + + # Loaded effect definitions + self.effects: Dict[str, EffectDefinition] = {} + + def eval(self, expr: Any, env: Environment = None) -> Any: + """Evaluate an S-expression.""" + if env is None: + env = self.global_env + + # Atoms + if isinstance(expr, (int, float, str, bool)): + return expr + + if expr is None: + return None + + # Handle Symbol (duck typing to support both sexp_effects.parser.Symbol and artdag.sexp.parser.Symbol) + if _is_symbol(expr): + return env.get(expr.name) + + # Handle Keyword (duck typing) + if _is_keyword(expr): + return expr # Keywords evaluate to themselves + + if isinstance(expr, np.ndarray): + return expr # Images pass through + + # Lists (function calls / special forms) + if isinstance(expr, list): + if not expr: + return [] + + head = expr[0] + + # Special forms + if _is_symbol(head): + form = head.name + + # Quote + if form == 'quote': + return expr[1] + + # Define + if form == 'define': + name = expr[1] + if _is_symbol(name): + value = self.eval(expr[2], env) + self.global_env.set(name.name, value) + return value + else: + raise SyntaxError(f"define requires symbol, got {name}") + + # Define-effect + if form == 'define-effect': + return self._define_effect(expr, env) + + # Lambda + if form == 'lambda' or form == 'λ': + params = [p.name if _is_symbol(p) else p for p in expr[1]] + body = expr[2] + return Lambda(params, body, env) + + # Let + if form == 'let': + return self._eval_let(expr, env) + + # Let* + if form == 'let*': + return self._eval_let_star(expr, env) + + # If + if form == 'if': + cond = self.eval(expr[1], env) + if cond: + return self.eval(expr[2], env) + elif len(expr) > 3: + return self.eval(expr[3], env) + return None + + # Cond + if form == 'cond': + return self._eval_cond(expr, env) + + # And + if form == 'and': + result = True + for e in expr[1:]: + result = self.eval(e, env) + if not result: + return False + return result + + # Or + if form == 'or': + for e in expr[1:]: + result = self.eval(e, env) + if result: + return result + return False + + # Not + if form == 'not': + return not self.eval(expr[1], env) + + # Begin (sequence) + if form == 'begin': + result = None + for e in expr[1:]: + result = self.eval(e, env) + return result + + # Thread-first macro: (-> x (f a) (g b)) => (g (f x a) b) + if form == '->': + result = self.eval(expr[1], env) + for form_expr in expr[2:]: + if isinstance(form_expr, list): + # Insert result as first arg: (f a b) => (f result a b) + result = self.eval([form_expr[0], result] + form_expr[1:], env) + else: + # Just a symbol: f => (f result) + result = self.eval([form_expr, result], env) + return result + + # Set! (mutation) + if form == 'set!': + name = expr[1].name if _is_symbol(expr[1]) else expr[1] + value = self.eval(expr[2], env) + # Find and update in appropriate scope + scope = env + while scope: + if name in scope.bindings: + scope.bindings[name] = value + return value + scope = scope.parent + raise NameError(f"Cannot set undefined variable: {name}") + + # State-get / state-set (for effect state) + if form == 'state-get': + state = env.get('__state__') + key = self.eval(expr[1], env) + if _is_symbol(key): + key = key.name + default = self.eval(expr[2], env) if len(expr) > 2 else None + return state.get(key, default) + + if form == 'state-set': + state = env.get('__state__') + key = self.eval(expr[1], env) + if _is_symbol(key): + key = key.name + value = self.eval(expr[2], env) + state[key] = value + return value + + # ascii-fx-zone special form - delays evaluation of expression parameters + if form == 'ascii-fx-zone': + return self._eval_ascii_fx_zone(expr, env) + + # with-primitives - load primitive library and scope to body + if form == 'with-primitives': + return self._eval_with_primitives(expr, env) + + # require-primitives - load primitive library into current scope + if form == 'require-primitives': + return self._eval_require_primitives(expr, env) + + # Function call + fn = self.eval(head, env) + args = [self.eval(arg, env) for arg in expr[1:]] + + # Handle keyword arguments + pos_args = [] + kw_args = {} + i = 0 + while i < len(args): + if _is_keyword(args[i]): + kw_args[args[i].name] = args[i + 1] if i + 1 < len(args) else None + i += 2 + else: + pos_args.append(args[i]) + i += 1 + + return self._apply(fn, pos_args, kw_args, env) + + raise TypeError(f"Cannot evaluate: {expr}") + + def _wrap_lambda(self, lam: 'Lambda') -> Callable: + """Wrap a Lambda in a Python callable for use by primitives.""" + def wrapper(*args): + new_env = Environment(lam.env) + for i, param in enumerate(lam.params): + if i < len(args): + new_env.set(param, args[i]) + else: + new_env.set(param, None) + return self.eval(lam.body, new_env) + return wrapper + + def _apply(self, fn: Any, args: List[Any], kwargs: Dict[str, Any], env: Environment) -> Any: + """Apply a function to arguments.""" + if isinstance(fn, Lambda): + # User-defined function + new_env = Environment(fn.env) + for i, param in enumerate(fn.params): + if i < len(args): + new_env.set(param, args[i]) + else: + new_env.set(param, None) + return self.eval(fn.body, new_env) + + elif callable(fn): + # Wrap any Lambda arguments so primitives can call them + wrapped_args = [] + for arg in args: + if isinstance(arg, Lambda): + wrapped_args.append(self._wrap_lambda(arg)) + else: + wrapped_args.append(arg) + + # Inject _interp and _env for primitives that need them + import inspect + try: + sig = inspect.signature(fn) + params = sig.parameters + if '_interp' in params and '_interp' not in kwargs: + kwargs['_interp'] = self + if '_env' in params and '_env' not in kwargs: + kwargs['_env'] = env + except (ValueError, TypeError): + # Some built-in functions don't have inspectable signatures + pass + + # Primitive function + if kwargs: + return fn(*wrapped_args, **kwargs) + return fn(*wrapped_args) + + else: + raise TypeError(f"Cannot call: {fn}") + + def _parse_bindings(self, bindings: list) -> list: + """Parse bindings in either Scheme or Clojure style. + + Scheme: ((x 1) (y 2)) -> [(x, 1), (y, 2)] + Clojure: [x 1 y 2] -> [(x, 1), (y, 2)] + """ + if not bindings: + return [] + + # Check if Clojure style (flat list with symbols and values alternating) + if _is_symbol(bindings[0]): + # Clojure style: [x 1 y 2] + pairs = [] + i = 0 + while i < len(bindings) - 1: + name = bindings[i].name if _is_symbol(bindings[i]) else bindings[i] + value = bindings[i + 1] + pairs.append((name, value)) + i += 2 + return pairs + else: + # Scheme style: ((x 1) (y 2)) + pairs = [] + for binding in bindings: + name = binding[0].name if _is_symbol(binding[0]) else binding[0] + value = binding[1] + pairs.append((name, value)) + return pairs + + def _eval_let(self, expr: Any, env: Environment) -> Any: + """Evaluate let expression: (let ((x 1) (y 2)) body) or (let [x 1 y 2] body) + + Note: Uses sequential binding (like Clojure let / Scheme let*) so each + binding can reference previous bindings. + """ + bindings = expr[1] + body = expr[2] + + new_env = Environment(env) + for name, value_expr in self._parse_bindings(bindings): + value = self.eval(value_expr, new_env) # Sequential: can see previous bindings + new_env.set(name, value) + + return self.eval(body, new_env) + + def _eval_let_star(self, expr: Any, env: Environment) -> Any: + """Evaluate let* expression: sequential bindings.""" + bindings = expr[1] + body = expr[2] + + new_env = Environment(env) + for name, value_expr in self._parse_bindings(bindings): + value = self.eval(value_expr, new_env) # Evaluate in current env + new_env.set(name, value) + + return self.eval(body, new_env) + + def _eval_cond(self, expr: Any, env: Environment) -> Any: + """Evaluate cond expression.""" + for clause in expr[1:]: + test = clause[0] + if _is_symbol(test) and test.name == 'else': + return self.eval(clause[1], env) + if self.eval(test, env): + return self.eval(clause[1], env) + return None + + def _eval_with_primitives(self, expr: Any, env: Environment) -> Any: + """ + Evaluate with-primitives: scoped primitive library loading. + + Syntax: + (with-primitives "math" + (sin (* x pi))) + + (with-primitives "math" :path "custom/math.py" + body) + + The primitives from the library are only available within the body. + """ + # Parse library name and optional path + lib_name = expr[1] + if _is_symbol(lib_name): + lib_name = lib_name.name + + path = None + body_start = 2 + + # Check for :path keyword + if len(expr) > 2 and _is_keyword(expr[2]) and expr[2].name == 'path': + path = expr[3] + body_start = 4 + + # Load the primitive library + primitives = self.load_primitive_library(lib_name, path) + + # Create new environment with primitives + new_env = Environment(env) + for name, fn in primitives.items(): + new_env.set(name, fn) + + # Evaluate body in new environment + result = None + for e in expr[body_start:]: + result = self.eval(e, new_env) + return result + + def _eval_require_primitives(self, expr: Any, env: Environment) -> Any: + """ + Evaluate require-primitives: load primitives into current scope. + + Syntax: + (require-primitives "math" "color" "filters") + + Unlike with-primitives, this loads into the current environment + (typically used at top-level to set up an effect's dependencies). + """ + for lib_expr in expr[1:]: + if _is_symbol(lib_expr): + lib_name = lib_expr.name + else: + lib_name = lib_expr + + primitives = self.load_primitive_library(lib_name) + for name, fn in primitives.items(): + env.set(name, fn) + + return None + + def load_primitive_library(self, name: str, path: str = None) -> dict: + """ + Load a primitive library by name or path. + + Returns dict of {name: function}. + """ + from .primitive_libs import load_primitive_library + return load_primitive_library(name, path) + + def _eval_ascii_fx_zone(self, expr: Any, env: Environment) -> Any: + """ + Evaluate ascii-fx-zone special form. + + Syntax: + (ascii-fx-zone frame + :cols 80 + :alphabet "standard" + :color_mode "color" + :background "black" + :contrast 1.5 + :char_hue ;; NOT evaluated - passed to primitive + :char_saturation + :char_brightness + :char_scale + :char_rotation + :char_jitter ) + + The expression parameters (:char_hue, etc.) are NOT pre-evaluated. + They are passed as raw S-expressions to the primitive which + evaluates them per-zone with zone context variables injected. + + Requires: (require-primitives "ascii") + """ + # Look up ascii-fx-zone primitive from environment + # It must be loaded via (require-primitives "ascii") + try: + prim_ascii_fx_zone = env.get('ascii-fx-zone') + except NameError: + raise NameError( + "ascii-fx-zone primitive not found. " + "Add (require-primitives \"ascii\") to your effect file." + ) + + # Expression parameter names that should NOT be evaluated + expr_params = {'char_hue', 'char_saturation', 'char_brightness', + 'char_scale', 'char_rotation', 'char_jitter', 'cell_effect'} + + # Parse arguments + frame = self.eval(expr[1], env) # First arg is always the frame + + # Defaults + cols = 80 + char_size = None # If set, overrides cols + alphabet = "standard" + color_mode = "color" + background = "black" + contrast = 1.5 + char_hue = None + char_saturation = None + char_brightness = None + char_scale = None + char_rotation = None + char_jitter = None + cell_effect = None # Lambda for arbitrary per-cell effects + # Convenience params for staged recipes + energy = None + rotation_scale = 0 + # Extra params to pass to zone dict for lambdas + extra_params = {} + + # Parse keyword arguments + i = 2 + while i < len(expr): + item = expr[i] + if _is_keyword(item): + if i + 1 >= len(expr): + break + value_expr = expr[i + 1] + kw_name = item.name + + if kw_name in expr_params: + # Resolve symbol references but don't evaluate expressions + # This handles the case where effect definition passes a param like :char_hue char_hue + resolved = value_expr + if _is_symbol(value_expr): + try: + resolved = env.get(value_expr.name) + except NameError: + resolved = value_expr # Keep as symbol if not found + + if kw_name == 'char_hue': + char_hue = resolved + elif kw_name == 'char_saturation': + char_saturation = resolved + elif kw_name == 'char_brightness': + char_brightness = resolved + elif kw_name == 'char_scale': + char_scale = resolved + elif kw_name == 'char_rotation': + char_rotation = resolved + elif kw_name == 'char_jitter': + char_jitter = resolved + elif kw_name == 'cell_effect': + cell_effect = resolved + else: + # Evaluate normally + value = self.eval(value_expr, env) + if kw_name == 'cols': + cols = int(value) + elif kw_name == 'char_size': + # Handle nil/None values + if value is None or (_is_symbol(value) and value.name == 'nil'): + char_size = None + else: + char_size = int(value) + elif kw_name == 'alphabet': + alphabet = str(value) + elif kw_name == 'color_mode': + color_mode = str(value) + elif kw_name == 'background': + background = str(value) + elif kw_name == 'contrast': + contrast = float(value) + elif kw_name == 'energy': + if value is None or (_is_symbol(value) and value.name == 'nil'): + energy = None + else: + energy = float(value) + elif kw_name == 'rotation_scale': + rotation_scale = float(value) + else: + # Store any other params for lambdas to access + extra_params[kw_name] = value + i += 2 + else: + i += 1 + + # If energy and rotation_scale provided, build rotation expression + # rotation = energy * rotation_scale * position_factor + # position_factor: bottom-left=0, top-right=3 + # Formula: 1.5 * (zone-col-norm + (1 - zone-row-norm)) + if energy is not None and rotation_scale > 0: + # Build expression as S-expression list that will be evaluated per-zone + # (* (* energy rotation_scale) (* 1.5 (+ zone-col-norm (- 1 zone-row-norm)))) + energy_times_scale = energy * rotation_scale + # The position part uses zone variables, so we build it as an expression + char_rotation = [ + Symbol('*'), + energy_times_scale, + [Symbol('*'), 1.5, + [Symbol('+'), Symbol('zone-col-norm'), + [Symbol('-'), 1, Symbol('zone-row-norm')]]] + ] + + # Pull any extra params from environment that aren't standard params + # These are typically passed from recipes for use in cell_effect lambdas + standard_params = { + 'cols', 'char_size', 'alphabet', 'color_mode', 'background', 'contrast', + 'char_hue', 'char_saturation', 'char_brightness', 'char_scale', + 'char_rotation', 'char_jitter', 'cell_effect', 'energy', 'rotation_scale', + 'frame', 't', '_time', '__state__', '__interp__', 'true', 'false', 'nil' + } + # Check environment for extra bindings + current_env = env + while current_env is not None: + for k, v in current_env.bindings.items(): + if k not in standard_params and k not in extra_params and not callable(v): + # Add non-standard, non-callable bindings to extra_params + if isinstance(v, (int, float, str, bool)) or v is None: + extra_params[k] = v + current_env = current_env.parent + + # Call the primitive with interpreter and env for expression evaluation + return prim_ascii_fx_zone( + frame, + cols=cols, + char_size=char_size, + alphabet=alphabet, + color_mode=color_mode, + background=background, + contrast=contrast, + char_hue=char_hue, + char_saturation=char_saturation, + char_brightness=char_brightness, + char_scale=char_scale, + char_rotation=char_rotation, + char_jitter=char_jitter, + cell_effect=cell_effect, + energy=energy, + rotation_scale=rotation_scale, + _interp=self, + _env=env, + **extra_params + ) + + def _define_effect(self, expr: Any, env: Environment) -> EffectDefinition: + """ + Parse effect definition. + + Required syntax: + (define-effect name + :params ( + (param1 :type int :default 8 :desc "description") + ) + body) + + Effects MUST use :params syntax. Legacy ((param default) ...) is not supported. + """ + name = expr[1].name if _is_symbol(expr[1]) else expr[1] + + params = {} + body = None + found_params = False + + # Parse :params and body + i = 2 + while i < len(expr): + item = expr[i] + if _is_keyword(item) and item.name == "params": + # :params syntax + if i + 1 >= len(expr): + raise SyntaxError(f"Effect '{name}': Missing params list after :params keyword") + params_list = expr[i + 1] + params = self._parse_params_block(params_list) + found_params = True + i += 2 + elif _is_keyword(item): + # Skip other keywords (like :desc) + i += 2 + elif body is None: + # First non-keyword item is the body + if isinstance(item, list) and item: + first_elem = item[0] + # Check for legacy syntax and reject it + if isinstance(first_elem, list) and len(first_elem) >= 2: + raise SyntaxError( + f"Effect '{name}': Legacy parameter syntax ((name default) ...) is not supported. " + f"Use :params block instead." + ) + body = item + i += 1 + else: + i += 1 + + if body is None: + raise SyntaxError(f"Effect '{name}': No body found") + + if not found_params: + raise SyntaxError( + f"Effect '{name}': Missing :params block. " + f"For effects with no parameters, use empty :params ()" + ) + + effect = EffectDefinition(name, params, body) + self.effects[name] = effect + return effect + + def _parse_params_block(self, params_list: list) -> Dict[str, Any]: + """ + Parse :params block syntax: + ( + (param_name :type int :default 8 :range [4 32] :desc "description") + ) + """ + params = {} + for param_def in params_list: + if not isinstance(param_def, list) or len(param_def) < 1: + continue + + # First element is the parameter name + first = param_def[0] + if _is_symbol(first): + param_name = first.name + elif isinstance(first, str): + param_name = first + else: + continue + + # Parse keyword arguments + default = None + i = 1 + while i < len(param_def): + item = param_def[i] + if _is_keyword(item): + if i + 1 >= len(param_def): + break + kw_value = param_def[i + 1] + + if item.name == "default": + default = kw_value + i += 2 + else: + i += 1 + + params[param_name] = default + + return params + + def load_effect(self, path: str) -> EffectDefinition: + """Load an effect definition from a .sexp file.""" + expr = parse_file(path) + + # Handle multiple top-level expressions + if isinstance(expr, list) and expr and isinstance(expr[0], list): + for e in expr: + self.eval(e) + else: + self.eval(expr) + + # Return the last defined effect + if self.effects: + return list(self.effects.values())[-1] + return None + + def load_effect_from_string(self, sexp_content: str, effect_name: str = None) -> EffectDefinition: + """Load an effect definition from an S-expression string. + + Args: + sexp_content: The S-expression content as a string + effect_name: Optional name hint (used if effect doesn't define its own name) + + Returns: + The loaded EffectDefinition + """ + expr = parse(sexp_content) + + # Handle multiple top-level expressions + if isinstance(expr, list) and expr and isinstance(expr[0], list): + for e in expr: + self.eval(e) + else: + self.eval(expr) + + # Return the effect if we can find it by name + if effect_name and effect_name in self.effects: + return self.effects[effect_name] + + # Return the most recently loaded effect + if self.effects: + return list(self.effects.values())[-1] + + return None + + def run_effect(self, name: str, frame, params: Dict[str, Any], + state: Dict[str, Any]) -> tuple: + """ + Run an effect on frame(s). + + Args: + name: Effect name + frame: Input frame (H, W, 3) RGB uint8, or list of frames for multi-input + params: Effect parameters (overrides defaults) + state: Persistent state dict + + Returns: + (output_frame, new_state) + """ + if name not in self.effects: + raise ValueError(f"Unknown effect: {name}") + + effect = self.effects[name] + + # Create environment for this run + env = Environment(self.global_env) + + # Bind frame(s) - support both single frame and list of frames + if isinstance(frame, list): + # Multi-input effect + frames = frame + env.set('frame', frames[0] if frames else None) # Backwards compat + env.set('inputs', frames) + # Named frame bindings + for i, f in enumerate(frames): + env.set(f'frame-{chr(ord("a") + i)}', f) # frame-a, frame-b, etc. + else: + # Single-input effect + env.set('frame', frame) + + # Bind state + if state is None: + state = {} + env.set('__state__', state) + + # Validate that all provided params are known (except internal params) + # Extra params are allowed and will be passed through to cell_effect lambdas + known_params = set(effect.params.keys()) + internal_params = {'_time', 'seed', '_binding', 'effect', 'cid', 'hash', 'effect_path'} + extra_effect_params = {} # Unknown params passed through for cell_effect lambdas + for k in params.keys(): + if k not in known_params and k not in internal_params: + # Allow unknown params - they'll be passed to cell_effect lambdas via zone dict + extra_effect_params[k] = params[k] + + # Bind parameters (defaults + overrides) + for pname, pdefault in effect.params.items(): + value = params.get(pname) + if value is None: + # Evaluate default if it's an expression (list) + if isinstance(pdefault, list): + value = self.eval(pdefault, env) + else: + value = pdefault + env.set(pname, value) + + # Bind extra params (unknown params passed through for cell_effect lambdas) + for k, v in extra_effect_params.items(): + env.set(k, v) + + # Reset RNG with seed if provided + seed = params.get('seed', 42) + reset_rng(int(seed)) + + # Bind time if provided + time_val = params.get('_time', 0) + env.set('t', time_val) + env.set('_time', time_val) + + # Evaluate body + result = self.eval(effect.body, env) + + # Ensure result is an image + if not isinstance(result, np.ndarray): + result = frame + + return result, state + + def eval_with_zone(self, expr, env: Environment, zone) -> Any: + """ + Evaluate expression with zone-* variables injected. + + Args: + expr: Expression to evaluate (S-expression) + env: Parent environment with bound values + zone: ZoneContext object with cell data + + Zone variables injected: + zone-row, zone-col: Grid position (integers) + zone-row-norm, zone-col-norm: Normalized position (0-1) + zone-lum: Cell luminance (0-1) + zone-sat: Cell saturation (0-1) + zone-hue: Cell hue (0-360) + zone-r, zone-g, zone-b: RGB components (0-1) + + Returns: + Evaluated result (typically a number) + """ + # Create child environment with zone variables + zone_env = Environment(env) + zone_env.set('zone-row', zone.row) + zone_env.set('zone-col', zone.col) + zone_env.set('zone-row-norm', zone.row_norm) + zone_env.set('zone-col-norm', zone.col_norm) + zone_env.set('zone-lum', zone.luminance) + zone_env.set('zone-sat', zone.saturation) + zone_env.set('zone-hue', zone.hue) + zone_env.set('zone-r', zone.r) + zone_env.set('zone-g', zone.g) + zone_env.set('zone-b', zone.b) + + return self.eval(expr, zone_env) + + +# ============================================================================= +# Convenience Functions +# ============================================================================= + +_interpreter = None +_interpreter_minimal = None + + +def get_interpreter(minimal_primitives: bool = False) -> Interpreter: + """Get or create the global interpreter. + + Args: + minimal_primitives: If True, return interpreter with only core primitives. + Additional primitives must be loaded with require-primitives or with-primitives. + """ + global _interpreter, _interpreter_minimal + + if minimal_primitives: + if _interpreter_minimal is None: + _interpreter_minimal = Interpreter(minimal_primitives=True) + return _interpreter_minimal + else: + if _interpreter is None: + _interpreter = Interpreter(minimal_primitives=False) + return _interpreter + + +def load_effect(path: str) -> EffectDefinition: + """Load an effect from a .sexp file.""" + return get_interpreter().load_effect(path) + + +def load_effects_dir(directory: str): + """Load all .sexp effects from a directory.""" + interp = get_interpreter() + dir_path = Path(directory) + for path in dir_path.glob('*.sexp'): + try: + interp.load_effect(str(path)) + except Exception as e: + print(f"Warning: Failed to load {path}: {e}") + + +def run_effect(name: str, frame: np.ndarray, params: Dict[str, Any], + state: Dict[str, Any] = None) -> tuple: + """Run an effect.""" + return get_interpreter().run_effect(name, frame, params, state or {}) + + +def list_effects() -> List[str]: + """List loaded effect names.""" + return list(get_interpreter().effects.keys()) + + +# ============================================================================= +# Adapter for existing effect system +# ============================================================================= + +def make_process_frame(effect_path: str) -> Callable: + """ + Create a process_frame function from a .sexp effect. + + This allows S-expression effects to be used with the existing + effect system. + """ + interp = get_interpreter() + interp.load_effect(effect_path) + effect_name = Path(effect_path).stem + + def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + return interp.run_effect(effect_name, frame, params, state) + + return process_frame diff --git a/sexp_effects/parser.py b/sexp_effects/parser.py new file mode 100644 index 0000000..12bedfd --- /dev/null +++ b/sexp_effects/parser.py @@ -0,0 +1,168 @@ +""" +S-Expression Parser + +Parses S-expressions into Python data structures: +- Lists become Python lists +- Symbols become Symbol objects +- Numbers become int/float +- Strings become str +- Keywords (:foo) become Keyword objects +""" + +import re +from dataclasses import dataclass +from typing import Any, List, Union + + +@dataclass(frozen=True) +class Symbol: + """A symbol (identifier) in the S-expression.""" + name: str + + def __repr__(self): + return self.name + + +@dataclass(frozen=True) +class Keyword: + """A keyword like :foo in the S-expression.""" + name: str + + def __repr__(self): + return f":{self.name}" + + +# Token patterns +TOKEN_PATTERNS = [ + (r'\s+', None), # Whitespace (skip) + (r';[^\n]*', None), # Comments (skip) + (r'\(', 'LPAREN'), + (r'\)', 'RPAREN'), + (r'\[', 'LBRACKET'), + (r'\]', 'RBRACKET'), + (r"'", 'QUOTE'), + (r'"([^"\\]|\\.)*"', 'STRING'), + (r':[a-zA-Z_][a-zA-Z0-9_\-]*', 'KEYWORD'), + (r'-?[0-9]+\.[0-9]+', 'FLOAT'), + (r'-?[0-9]+', 'INT'), + (r'#t|#f|true|false', 'BOOL'), + (r'[a-zA-Z_+\-*/<>=!?][a-zA-Z0-9_+\-*/<>=!?]*', 'SYMBOL'), +] + +TOKEN_REGEX = '|'.join(f'(?P<{name}>{pattern})' if name else f'(?:{pattern})' + for pattern, name in TOKEN_PATTERNS) + + +def tokenize(source: str) -> List[tuple]: + """Tokenize S-expression source code.""" + tokens = [] + for match in re.finditer(TOKEN_REGEX, source): + kind = match.lastgroup + value = match.group() + if kind: + tokens.append((kind, value)) + return tokens + + +def parse(source: str) -> Any: + """Parse S-expression source into Python data structures.""" + tokens = tokenize(source) + pos = [0] # Use list for mutability in nested function + + def parse_expr(): + if pos[0] >= len(tokens): + raise SyntaxError("Unexpected end of input") + + kind, value = tokens[pos[0]] + + if kind == 'LPAREN': + pos[0] += 1 + items = [] + while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RPAREN': + items.append(parse_expr()) + if pos[0] >= len(tokens): + raise SyntaxError("Missing closing parenthesis") + pos[0] += 1 # Skip RPAREN + return items + + if kind == 'LBRACKET': + pos[0] += 1 + items = [] + while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RBRACKET': + items.append(parse_expr()) + if pos[0] >= len(tokens): + raise SyntaxError("Missing closing bracket") + pos[0] += 1 # Skip RBRACKET + return items + + elif kind == 'RPAREN': + raise SyntaxError("Unexpected closing parenthesis") + + elif kind == 'QUOTE': + pos[0] += 1 + return [Symbol('quote'), parse_expr()] + + elif kind == 'STRING': + pos[0] += 1 + # Remove quotes and unescape + return value[1:-1].replace('\\"', '"').replace('\\n', '\n') + + elif kind == 'INT': + pos[0] += 1 + return int(value) + + elif kind == 'FLOAT': + pos[0] += 1 + return float(value) + + elif kind == 'BOOL': + pos[0] += 1 + return value in ('#t', 'true') + + elif kind == 'KEYWORD': + pos[0] += 1 + return Keyword(value[1:]) # Remove leading : + + elif kind == 'SYMBOL': + pos[0] += 1 + return Symbol(value) + + else: + raise SyntaxError(f"Unknown token: {kind} {value}") + + result = parse_expr() + + # Check for multiple top-level expressions + if pos[0] < len(tokens): + # Allow multiple top-level expressions, return as list + results = [result] + while pos[0] < len(tokens): + results.append(parse_expr()) + return results + + return result + + +def parse_file(path: str) -> Any: + """Parse an S-expression file.""" + with open(path, 'r') as f: + return parse(f.read()) + + +# Convenience for pretty-printing +def to_sexp(obj: Any) -> str: + """Convert Python object back to S-expression string.""" + if isinstance(obj, list): + return '(' + ' '.join(to_sexp(x) for x in obj) + ')' + elif isinstance(obj, Symbol): + return obj.name + elif isinstance(obj, Keyword): + return f':{obj.name}' + elif isinstance(obj, str): + return f'"{obj}"' + elif isinstance(obj, bool): + return '#t' if obj else '#f' + elif isinstance(obj, (int, float)): + return str(obj) + else: + return repr(obj) diff --git a/sexp_effects/primitive_libs/__init__.py b/sexp_effects/primitive_libs/__init__.py new file mode 100644 index 0000000..47ee174 --- /dev/null +++ b/sexp_effects/primitive_libs/__init__.py @@ -0,0 +1,102 @@ +""" +Primitive Libraries System + +Provides modular loading of primitives. Core primitives are always available, +additional primitive libraries can be loaded on-demand with scoped availability. + +Usage in sexp: + ;; Load at recipe level - available throughout + (primitives math :path "primitive_libs/math.py") + + ;; Or use with-primitives for scoped access + (with-primitives "image" + (blur frame 3)) ;; blur only available inside + + ;; Nested scopes work + (with-primitives "math" + (with-primitives "color" + (hue-shift frame (* (sin t) 30)))) + +Library file format (primitive_libs/math.py): + import math + + def prim_sin(x): return math.sin(x) + def prim_cos(x): return math.cos(x) + + PRIMITIVES = { + 'sin': prim_sin, + 'cos': prim_cos, + } +""" + +import importlib.util +from pathlib import Path +from typing import Dict, Callable, Any, Optional + +# Cache of loaded primitive libraries +_library_cache: Dict[str, Dict[str, Any]] = {} + +# Core primitives - always available, cannot be overridden +CORE_PRIMITIVES: Dict[str, Any] = {} + + +def register_core_primitive(name: str, fn: Callable): + """Register a core primitive that's always available.""" + CORE_PRIMITIVES[name] = fn + + +def load_primitive_library(name: str, path: Optional[str] = None) -> Dict[str, Any]: + """ + Load a primitive library by name or path. + + Args: + name: Library name (e.g., "math", "image", "color") + path: Optional explicit path to library file + + Returns: + Dict of primitive name -> function + """ + # Check cache first + cache_key = path or name + if cache_key in _library_cache: + return _library_cache[cache_key] + + # Find library file + if path: + lib_path = Path(path) + else: + # Look in standard locations + lib_dir = Path(__file__).parent + lib_path = lib_dir / f"{name}.py" + + if not lib_path.exists(): + raise ValueError(f"Primitive library '{name}' not found at {lib_path}") + + if not lib_path.exists(): + raise ValueError(f"Primitive library file not found: {lib_path}") + + # Load the module + spec = importlib.util.spec_from_file_location(f"prim_lib_{name}", lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Get PRIMITIVES dict from module + if not hasattr(module, 'PRIMITIVES'): + raise ValueError(f"Primitive library '{name}' missing PRIMITIVES dict") + + primitives = module.PRIMITIVES + + # Cache and return + _library_cache[cache_key] = primitives + return primitives + + +def get_library_names() -> list: + """Get names of available primitive libraries.""" + lib_dir = Path(__file__).parent + return [p.stem for p in lib_dir.glob("*.py") if p.stem != "__init__"] + + +def clear_cache(): + """Clear the library cache (useful for testing).""" + _library_cache.clear() diff --git a/sexp_effects/primitive_libs/arrays.py b/sexp_effects/primitive_libs/arrays.py new file mode 100644 index 0000000..61da196 --- /dev/null +++ b/sexp_effects/primitive_libs/arrays.py @@ -0,0 +1,196 @@ +""" +Array Primitives Library + +Vectorized operations on numpy arrays for coordinate transformations. +""" +import numpy as np + + +# Arithmetic +def prim_arr_add(a, b): + return np.add(a, b) + + +def prim_arr_sub(a, b): + return np.subtract(a, b) + + +def prim_arr_mul(a, b): + return np.multiply(a, b) + + +def prim_arr_div(a, b): + return np.divide(a, b) + + +def prim_arr_mod(a, b): + return np.mod(a, b) + + +def prim_arr_neg(a): + return np.negative(a) + + +# Math functions +def prim_arr_sin(a): + return np.sin(a) + + +def prim_arr_cos(a): + return np.cos(a) + + +def prim_arr_tan(a): + return np.tan(a) + + +def prim_arr_sqrt(a): + return np.sqrt(np.maximum(a, 0)) + + +def prim_arr_pow(a, b): + return np.power(a, b) + + +def prim_arr_abs(a): + return np.abs(a) + + +def prim_arr_exp(a): + return np.exp(a) + + +def prim_arr_log(a): + return np.log(np.maximum(a, 1e-10)) + + +def prim_arr_atan2(y, x): + return np.arctan2(y, x) + + +# Comparison / selection +def prim_arr_min(a, b): + return np.minimum(a, b) + + +def prim_arr_max(a, b): + return np.maximum(a, b) + + +def prim_arr_clip(a, lo, hi): + return np.clip(a, lo, hi) + + +def prim_arr_where(cond, a, b): + return np.where(cond, a, b) + + +def prim_arr_floor(a): + return np.floor(a) + + +def prim_arr_ceil(a): + return np.ceil(a) + + +def prim_arr_round(a): + return np.round(a) + + +# Interpolation +def prim_arr_lerp(a, b, t): + return a + (b - a) * t + + +def prim_arr_smoothstep(edge0, edge1, x): + t = prim_arr_clip((x - edge0) / (edge1 - edge0), 0.0, 1.0) + return t * t * (3 - 2 * t) + + +# Creation +def prim_arr_zeros(shape): + return np.zeros(shape, dtype=np.float32) + + +def prim_arr_ones(shape): + return np.ones(shape, dtype=np.float32) + + +def prim_arr_full(shape, value): + return np.full(shape, value, dtype=np.float32) + + +def prim_arr_arange(start, stop, step=1): + return np.arange(start, stop, step, dtype=np.float32) + + +def prim_arr_linspace(start, stop, num): + return np.linspace(start, stop, num, dtype=np.float32) + + +def prim_arr_meshgrid(x, y): + return np.meshgrid(x, y) + + +# Coordinate transforms +def prim_polar_from_center(map_x, map_y, cx, cy): + """Convert Cartesian to polar coordinates centered at (cx, cy).""" + dx = map_x - cx + dy = map_y - cy + r = np.sqrt(dx**2 + dy**2) + theta = np.arctan2(dy, dx) + return (r, theta) + + +def prim_cart_from_polar(r, theta, cx, cy): + """Convert polar to Cartesian, adding center offset.""" + x = r * np.cos(theta) + cx + y = r * np.sin(theta) + cy + return (x, y) + + +PRIMITIVES = { + # Arithmetic + 'arr+': prim_arr_add, + 'arr-': prim_arr_sub, + 'arr*': prim_arr_mul, + 'arr/': prim_arr_div, + 'arr-mod': prim_arr_mod, + 'arr-neg': prim_arr_neg, + + # Math + 'arr-sin': prim_arr_sin, + 'arr-cos': prim_arr_cos, + 'arr-tan': prim_arr_tan, + 'arr-sqrt': prim_arr_sqrt, + 'arr-pow': prim_arr_pow, + 'arr-abs': prim_arr_abs, + 'arr-exp': prim_arr_exp, + 'arr-log': prim_arr_log, + 'arr-atan2': prim_arr_atan2, + + # Selection + 'arr-min': prim_arr_min, + 'arr-max': prim_arr_max, + 'arr-clip': prim_arr_clip, + 'arr-where': prim_arr_where, + 'arr-floor': prim_arr_floor, + 'arr-ceil': prim_arr_ceil, + 'arr-round': prim_arr_round, + + # Interpolation + 'arr-lerp': prim_arr_lerp, + 'arr-smoothstep': prim_arr_smoothstep, + + # Creation + 'arr-zeros': prim_arr_zeros, + 'arr-ones': prim_arr_ones, + 'arr-full': prim_arr_full, + 'arr-arange': prim_arr_arange, + 'arr-linspace': prim_arr_linspace, + 'arr-meshgrid': prim_arr_meshgrid, + + # Coordinates + 'polar-from-center': prim_polar_from_center, + 'cart-from-polar': prim_cart_from_polar, +} diff --git a/sexp_effects/primitive_libs/ascii.py b/sexp_effects/primitive_libs/ascii.py new file mode 100644 index 0000000..858f010 --- /dev/null +++ b/sexp_effects/primitive_libs/ascii.py @@ -0,0 +1,388 @@ +""" +ASCII Art Primitives Library + +ASCII art rendering with per-zone expression evaluation and cell effects. +""" +import numpy as np +import cv2 +from PIL import Image, ImageDraw, ImageFont +from typing import Any, Dict, List, Optional, Callable +import colorsys + + +# Character sets +CHAR_SETS = { + "standard": " .:-=+*#%@", + "blocks": " ░▒▓█", + "simple": " .:oO@", + "digits": "0123456789", + "binary": "01", + "ascii": " `.-':_,^=;><+!rc*/z?sLTv)J7(|Fi{C}fI31tlu[neoZ5Yxjya]2ESwqkP6h9d4VpOGbUAKXHm8RD#$Bg0MNWQ%&@", +} + +# Default font +_default_font = None + + +def _get_font(size: int): + """Get monospace font at given size.""" + global _default_font + try: + return ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size) + except: + return ImageFont.load_default() + + +def _parse_color(color_str: str) -> tuple: + """Parse color string to RGB tuple.""" + if color_str.startswith('#'): + hex_color = color_str[1:] + if len(hex_color) == 3: + hex_color = ''.join(c*2 for c in hex_color) + return tuple(int(hex_color[i:i+2], 16) for i in (0, 2, 4)) + + colors = { + 'black': (0, 0, 0), 'white': (255, 255, 255), + 'red': (255, 0, 0), 'green': (0, 255, 0), 'blue': (0, 0, 255), + 'yellow': (255, 255, 0), 'cyan': (0, 255, 255), 'magenta': (255, 0, 255), + 'gray': (128, 128, 128), 'grey': (128, 128, 128), + } + return colors.get(color_str.lower(), (0, 0, 0)) + + +def _cell_sample(frame: np.ndarray, cell_size: int): + """Sample frame into cells, returning colors and luminances. + + Uses cv2.resize with INTER_AREA (pixel-area averaging) which is + ~25x faster than numpy reshape+mean for block downsampling. + """ + h, w = frame.shape[:2] + rows = h // cell_size + cols = w // cell_size + + # Crop to exact grid then block-average via cv2 area interpolation. + cropped = frame[:rows * cell_size, :cols * cell_size] + colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA) + + luminances = ((0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]) / 255.0).astype(np.float32) + + return colors, luminances + + +def _luminance_to_char(lum: float, alphabet: str, contrast: float) -> str: + """Map luminance to character.""" + chars = CHAR_SETS.get(alphabet, alphabet) + lum = ((lum - 0.5) * contrast + 0.5) + lum = max(0, min(1, lum)) + idx = int(lum * (len(chars) - 1)) + return chars[idx] + + +def _render_char_cell(char: str, cell_size: int, color: tuple, bg_color: tuple) -> np.ndarray: + """Render a single character to a cell image.""" + img = Image.new('RGB', (cell_size, cell_size), bg_color) + draw = ImageDraw.Draw(img) + font = _get_font(cell_size) + + # Center the character + bbox = draw.textbbox((0, 0), char, font=font) + text_w = bbox[2] - bbox[0] + text_h = bbox[3] - bbox[1] + x = (cell_size - text_w) // 2 + y = (cell_size - text_h) // 2 - bbox[1] + + draw.text((x, y), char, fill=color, font=font) + return np.array(img) + + +def prim_ascii_fx_zone( + frame: np.ndarray, + cols: int = 80, + char_size: int = None, + alphabet: str = "standard", + color_mode: str = "color", + background: str = "black", + contrast: float = 1.5, + char_hue = None, + char_saturation = None, + char_brightness = None, + char_scale = None, + char_rotation = None, + char_jitter = None, + cell_effect = None, + energy: float = None, + rotation_scale: float = 0, + _interp = None, + _env = None, + **extra_params +) -> np.ndarray: + """ + Render frame as ASCII art with per-zone effects. + + Args: + frame: Input image + cols: Number of character columns + char_size: Cell size in pixels (overrides cols if set) + alphabet: Character set name or custom string + color_mode: "color", "mono", "invert", or color name + background: Background color name or hex + contrast: Contrast for character selection + char_hue/saturation/brightness/scale/rotation/jitter: Per-zone expressions + cell_effect: Lambda (cell, zone) -> cell for per-cell effects + energy: Energy value from audio analysis + rotation_scale: Max rotation degrees + _interp: Interpreter (auto-injected) + _env: Environment (auto-injected) + **extra_params: Additional params passed to zone dict + """ + h, w = frame.shape[:2] + + # Calculate cell size + if char_size is None or char_size == 0: + cell_size = max(4, w // cols) + else: + cell_size = max(4, int(char_size)) + + # Sample cells + colors, luminances = _cell_sample(frame, cell_size) + rows, cols_actual = luminances.shape + + # Parse background color + bg_color = _parse_color(background) + + # Create output image + out_h = rows * cell_size + out_w = cols_actual * cell_size + output = np.full((out_h, out_w, 3), bg_color, dtype=np.uint8) + + # Check if we have cell_effect + has_cell_effect = cell_effect is not None + + # Process each cell + for r in range(rows): + for c in range(cols_actual): + lum = luminances[r, c] + cell_color = tuple(colors[r, c]) + + # Build zone context + zone = { + 'row': r, + 'col': c, + 'row-norm': r / max(1, rows - 1), + 'col-norm': c / max(1, cols_actual - 1), + 'lum': float(lum), + 'r': cell_color[0] / 255, + 'g': cell_color[1] / 255, + 'b': cell_color[2] / 255, + 'cell_size': cell_size, + } + + # Add HSV + r_f, g_f, b_f = cell_color[0]/255, cell_color[1]/255, cell_color[2]/255 + hsv = colorsys.rgb_to_hsv(r_f, g_f, b_f) + zone['hue'] = hsv[0] * 360 + zone['sat'] = hsv[1] + + # Add energy and rotation_scale + if energy is not None: + zone['energy'] = energy + zone['rotation_scale'] = rotation_scale + + # Add extra params + for k, v in extra_params.items(): + if isinstance(v, (int, float, str, bool)) or v is None: + zone[k] = v + + # Get character + char = _luminance_to_char(lum, alphabet, contrast) + zone['char'] = char + + # Determine cell color based on mode + if color_mode == "mono": + render_color = (255, 255, 255) + elif color_mode == "invert": + render_color = tuple(255 - c for c in cell_color) + elif color_mode == "color": + render_color = cell_color + else: + render_color = _parse_color(color_mode) + + zone['color'] = render_color + + # Render character to cell + cell_img = _render_char_cell(char, cell_size, render_color, bg_color) + + # Apply cell_effect if provided + if has_cell_effect and _interp is not None: + cell_img = _apply_cell_effect(cell_img, zone, cell_effect, _interp, _env, extra_params) + + # Paste cell to output + y1, y2 = r * cell_size, (r + 1) * cell_size + x1, x2 = c * cell_size, (c + 1) * cell_size + output[y1:y2, x1:x2] = cell_img + + # Resize to match input dimensions + if output.shape[:2] != frame.shape[:2]: + output = cv2.resize(output, (w, h), interpolation=cv2.INTER_LINEAR) + + return output + + +def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params): + """Apply cell_effect lambda to a cell image. + + cell_effect is a Lambda object with params and body. + We create a child environment with zone variables and cell, + then evaluate the lambda body. + """ + # Get Environment class from the interpreter's module + Environment = type(env) + + # Create child environment with zone variables + cell_env = Environment(env) + + # Bind zone variables + for k, v in zone.items(): + cell_env.set(k, v) + + # Also bind with zone- prefix for consistency + cell_env.set('zone-row', zone.get('row', 0)) + cell_env.set('zone-col', zone.get('col', 0)) + cell_env.set('zone-row-norm', zone.get('row-norm', 0)) + cell_env.set('zone-col-norm', zone.get('col-norm', 0)) + cell_env.set('zone-lum', zone.get('lum', 0)) + cell_env.set('zone-sat', zone.get('sat', 0)) + cell_env.set('zone-hue', zone.get('hue', 0)) + cell_env.set('zone-r', zone.get('r', 0)) + cell_env.set('zone-g', zone.get('g', 0)) + cell_env.set('zone-b', zone.get('b', 0)) + + # Inject loaded effects as callable functions + if hasattr(interp, 'effects'): + for effect_name in interp.effects: + def make_effect_fn(name): + def effect_fn(frame, *args): + params = {} + if name == 'blur' and len(args) >= 1: + params['radius'] = args[0] + elif name == 'rotate' and len(args) >= 1: + params['angle'] = args[0] + elif name == 'brightness' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'contrast' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'saturation' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'hue_shift' and len(args) >= 1: + params['degrees'] = args[0] + elif name == 'rgb_split' and len(args) >= 2: + params['offset_x'] = args[0] + params['offset_y'] = args[1] + elif name == 'pixelate' and len(args) >= 1: + params['size'] = args[0] + elif name == 'invert': + pass + result, _ = interp.run_effect(name, frame, params, {}) + return result + return effect_fn + cell_env.set(effect_name, make_effect_fn(effect_name)) + + # Bind cell image and zone dict + cell_env.set('cell', cell_img) + cell_env.set('zone', zone) + + # Evaluate the cell_effect lambda + # Lambda has params and body - we need to bind the params then evaluate + if hasattr(cell_effect, 'params') and hasattr(cell_effect, 'body'): + # Bind lambda parameters: (lambda [cell zone] body) + if len(cell_effect.params) >= 1: + cell_env.set(cell_effect.params[0], cell_img) + if len(cell_effect.params) >= 2: + cell_env.set(cell_effect.params[1], zone) + + result = interp.eval(cell_effect.body, cell_env) + elif isinstance(cell_effect, list): + # Raw S-expression lambda like (lambda [cell zone] body) or (fn [cell zone] body) + # Check if it's a lambda expression + head = cell_effect[0] if cell_effect else None + head_name = head.name if head and hasattr(head, 'name') else str(head) if head else None + is_lambda = head_name in ('lambda', 'fn') + + if is_lambda: + # (lambda [params...] body) + params = cell_effect[1] if len(cell_effect) > 1 else [] + body = cell_effect[2] if len(cell_effect) > 2 else None + + # Bind lambda parameters + if isinstance(params, list) and len(params) >= 1: + param_name = params[0].name if hasattr(params[0], 'name') else str(params[0]) + cell_env.set(param_name, cell_img) + if isinstance(params, list) and len(params) >= 2: + param_name = params[1].name if hasattr(params[1], 'name') else str(params[1]) + cell_env.set(param_name, zone) + + result = interp.eval(body, cell_env) if body else cell_img + else: + # Some other expression - just evaluate it + result = interp.eval(cell_effect, cell_env) + elif callable(cell_effect): + # It's a callable + result = cell_effect(cell_img, zone) + else: + raise ValueError(f"cell_effect must be a Lambda, list, or callable, got {type(cell_effect)}") + + if isinstance(result, np.ndarray) and result.shape == cell_img.shape: + return result + elif isinstance(result, np.ndarray): + # Shape mismatch - resize to fit + result = cv2.resize(result, (cell_img.shape[1], cell_img.shape[0])) + return result + + raise ValueError(f"cell_effect must return an image array, got {type(result)}") + + +def _get_legacy_ascii_primitives(): + """Import ASCII primitives from legacy primitives module. + + These are loaded lazily to avoid import issues during module loading. + By the time a primitive library is loaded, sexp_effects.primitives + is already in sys.modules (imported by sexp_effects.__init__). + """ + from sexp_effects.primitives import ( + prim_cell_sample, + prim_luminance_to_chars, + prim_render_char_grid, + prim_render_char_grid_fx, + prim_alphabet_char, + prim_alphabet_length, + prim_map_char_grid, + prim_map_colors, + prim_make_char_grid, + prim_set_char, + prim_get_char, + prim_char_grid_dimensions, + cell_sample_extended, + ) + return { + 'cell-sample': prim_cell_sample, + 'cell-sample-extended': cell_sample_extended, + 'luminance-to-chars': prim_luminance_to_chars, + 'render-char-grid': prim_render_char_grid, + 'render-char-grid-fx': prim_render_char_grid_fx, + 'alphabet-char': prim_alphabet_char, + 'alphabet-length': prim_alphabet_length, + 'map-char-grid': prim_map_char_grid, + 'map-colors': prim_map_colors, + 'make-char-grid': prim_make_char_grid, + 'set-char': prim_set_char, + 'get-char': prim_get_char, + 'char-grid-dimensions': prim_char_grid_dimensions, + } + + +PRIMITIVES = { + 'ascii-fx-zone': prim_ascii_fx_zone, + **_get_legacy_ascii_primitives(), +} diff --git a/sexp_effects/primitive_libs/blending.py b/sexp_effects/primitive_libs/blending.py new file mode 100644 index 0000000..0bf345d --- /dev/null +++ b/sexp_effects/primitive_libs/blending.py @@ -0,0 +1,116 @@ +""" +Blending Primitives Library + +Image blending and compositing operations. +""" +import numpy as np + + +def prim_blend_images(a, b, alpha): + """Blend two images: a * (1-alpha) + b * alpha.""" + alpha = max(0.0, min(1.0, alpha)) + return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8) + + +def prim_blend_mode(a, b, mode): + """Blend using Photoshop-style blend modes.""" + a = a.astype(float) / 255 + b = b.astype(float) / 255 + + if mode == "multiply": + result = a * b + elif mode == "screen": + result = 1 - (1 - a) * (1 - b) + elif mode == "overlay": + mask = a < 0.5 + result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b)) + elif mode == "soft-light": + mask = b < 0.5 + result = np.where(mask, + a - (1 - 2 * b) * a * (1 - a), + a + (2 * b - 1) * (np.sqrt(a) - a)) + elif mode == "hard-light": + mask = b < 0.5 + result = np.where(mask, 2 * a * b, 1 - 2 * (1 - a) * (1 - b)) + elif mode == "color-dodge": + result = np.clip(a / (1 - b + 0.001), 0, 1) + elif mode == "color-burn": + result = 1 - np.clip((1 - a) / (b + 0.001), 0, 1) + elif mode == "difference": + result = np.abs(a - b) + elif mode == "exclusion": + result = a + b - 2 * a * b + elif mode == "add": + result = np.clip(a + b, 0, 1) + elif mode == "subtract": + result = np.clip(a - b, 0, 1) + elif mode == "darken": + result = np.minimum(a, b) + elif mode == "lighten": + result = np.maximum(a, b) + else: + # Default to normal (just return b) + result = b + + return (result * 255).astype(np.uint8) + + +def prim_mask(img, mask_img): + """Apply grayscale mask to image (white=opaque, black=transparent).""" + if len(mask_img.shape) == 3: + mask = mask_img[:, :, 0].astype(float) / 255 + else: + mask = mask_img.astype(float) / 255 + + mask = mask[:, :, np.newaxis] + return (img.astype(float) * mask).astype(np.uint8) + + +def prim_alpha_composite(base, overlay, alpha_channel): + """Composite overlay onto base using alpha channel.""" + if len(alpha_channel.shape) == 3: + alpha = alpha_channel[:, :, 0].astype(float) / 255 + else: + alpha = alpha_channel.astype(float) / 255 + + alpha = alpha[:, :, np.newaxis] + result = base.astype(float) * (1 - alpha) + overlay.astype(float) * alpha + return result.astype(np.uint8) + + +def prim_overlay(base, overlay, x, y, alpha=1.0): + """Overlay image at position (x, y) with optional alpha.""" + result = base.copy() + x, y = int(x), int(y) + oh, ow = overlay.shape[:2] + bh, bw = base.shape[:2] + + # Clip to bounds + sx1 = max(0, -x) + sy1 = max(0, -y) + dx1 = max(0, x) + dy1 = max(0, y) + sx2 = min(ow, bw - x) + sy2 = min(oh, bh - y) + + if sx2 > sx1 and sy2 > sy1: + src = overlay[sy1:sy2, sx1:sx2] + dst = result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] + blended = (dst.astype(float) * (1 - alpha) + src.astype(float) * alpha) + result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = blended.astype(np.uint8) + + return result + + +PRIMITIVES = { + # Basic blending + 'blend-images': prim_blend_images, + 'blend-mode': prim_blend_mode, + + # Masking + 'mask': prim_mask, + 'alpha-composite': prim_alpha_composite, + + # Overlay + 'overlay': prim_overlay, +} diff --git a/sexp_effects/primitive_libs/color.py b/sexp_effects/primitive_libs/color.py new file mode 100644 index 0000000..0b6854b --- /dev/null +++ b/sexp_effects/primitive_libs/color.py @@ -0,0 +1,137 @@ +""" +Color Primitives Library + +Color manipulation: RGB, HSV, blending, luminance. +""" +import numpy as np +import colorsys + + +def prim_rgb(r, g, b): + """Create RGB color as [r, g, b] (0-255).""" + return [int(max(0, min(255, r))), + int(max(0, min(255, g))), + int(max(0, min(255, b)))] + + +def prim_red(c): + return c[0] + + +def prim_green(c): + return c[1] + + +def prim_blue(c): + return c[2] + + +def prim_luminance(c): + """Perceived luminance (0-1) using standard weights.""" + return (0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]) / 255 + + +def prim_rgb_to_hsv(c): + """Convert RGB [0-255] to HSV [h:0-360, s:0-1, v:0-1].""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + h, s, v = colorsys.rgb_to_hsv(r, g, b) + return [h * 360, s, v] + + +def prim_hsv_to_rgb(hsv): + """Convert HSV [h:0-360, s:0-1, v:0-1] to RGB [0-255].""" + h, s, v = hsv[0] / 360, hsv[1], hsv[2] + r, g, b = colorsys.hsv_to_rgb(h, s, v) + return [int(r * 255), int(g * 255), int(b * 255)] + + +def prim_rgb_to_hsl(c): + """Convert RGB [0-255] to HSL [h:0-360, s:0-1, l:0-1].""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + h, l, s = colorsys.rgb_to_hls(r, g, b) + return [h * 360, s, l] + + +def prim_hsl_to_rgb(hsl): + """Convert HSL [h:0-360, s:0-1, l:0-1] to RGB [0-255].""" + h, s, l = hsl[0] / 360, hsl[1], hsl[2] + r, g, b = colorsys.hls_to_rgb(h, l, s) + return [int(r * 255), int(g * 255), int(b * 255)] + + +def prim_blend_color(c1, c2, alpha): + """Blend two colors: c1 * (1-alpha) + c2 * alpha.""" + return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)] + + +def prim_average_color(img): + """Get average color of an image.""" + mean = np.mean(img, axis=(0, 1)) + return [int(mean[0]), int(mean[1]), int(mean[2])] + + +def prim_dominant_color(img, k=1): + """Get dominant color using k-means (simplified: just average for now).""" + return prim_average_color(img) + + +def prim_invert_color(c): + """Invert a color.""" + return [255 - c[0], 255 - c[1], 255 - c[2]] + + +def prim_grayscale_color(c): + """Convert color to grayscale.""" + gray = int(0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]) + return [gray, gray, gray] + + +def prim_saturate(c, amount): + """Adjust saturation of color. amount=0 is grayscale, 1 is unchanged, >1 is more saturated.""" + hsv = prim_rgb_to_hsv(c) + hsv[1] = max(0, min(1, hsv[1] * amount)) + return prim_hsv_to_rgb(hsv) + + +def prim_brighten(c, amount): + """Adjust brightness. amount=0 is black, 1 is unchanged, >1 is brighter.""" + return [int(max(0, min(255, c[i] * amount))) for i in range(3)] + + +def prim_shift_hue(c, degrees): + """Shift hue by degrees.""" + hsv = prim_rgb_to_hsv(c) + hsv[0] = (hsv[0] + degrees) % 360 + return prim_hsv_to_rgb(hsv) + + +PRIMITIVES = { + # Construction + 'rgb': prim_rgb, + + # Component access + 'red': prim_red, + 'green': prim_green, + 'blue': prim_blue, + 'luminance': prim_luminance, + + # Color space conversion + 'rgb->hsv': prim_rgb_to_hsv, + 'hsv->rgb': prim_hsv_to_rgb, + 'rgb->hsl': prim_rgb_to_hsl, + 'hsl->rgb': prim_hsl_to_rgb, + + # Blending + 'blend-color': prim_blend_color, + + # Analysis + 'average-color': prim_average_color, + 'dominant-color': prim_dominant_color, + + # Manipulation + 'invert-color': prim_invert_color, + 'grayscale-color': prim_grayscale_color, + 'saturate': prim_saturate, + 'brighten': prim_brighten, + 'shift-hue': prim_shift_hue, +} diff --git a/sexp_effects/primitive_libs/color_ops.py b/sexp_effects/primitive_libs/color_ops.py new file mode 100644 index 0000000..dd9076c --- /dev/null +++ b/sexp_effects/primitive_libs/color_ops.py @@ -0,0 +1,90 @@ +""" +Color Operations Primitives Library + +Vectorized color adjustments: brightness, contrast, saturation, invert, HSV. +These operate on entire images for fast processing. +""" +import numpy as np +import cv2 + + +def prim_adjust(img, brightness=0, contrast=1): + """Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+.""" + result = (img.astype(np.float32) - 128) * contrast + 128 + brightness + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_mix_gray(img, amount): + """Mix image with its grayscale version. 0=original, 1=grayscale.""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + gray_rgb = np.stack([gray, gray, gray], axis=-1) + result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_invert_img(img): + """Invert all pixel values.""" + return (255 - img).astype(np.uint8) + + +def prim_shift_hsv(img, h=0, s=1, v=1): + """Shift HSV: h=degrees offset, s/v=multipliers.""" + hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32) + hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180 + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255) + hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255) + return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) + + +def prim_add_noise(img, amount): + """Add gaussian noise to image.""" + noise = np.random.normal(0, amount, img.shape) + result = img.astype(np.float32) + noise + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_quantize(img, levels): + """Reduce to N color levels per channel.""" + levels = max(2, int(levels)) + factor = 256 / levels + result = (img // factor) * factor + factor // 2 + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_sepia(img, intensity=1.0): + """Apply sepia tone effect.""" + sepia_matrix = np.array([ + [0.393, 0.769, 0.189], + [0.349, 0.686, 0.168], + [0.272, 0.534, 0.131] + ]) + sepia = np.dot(img, sepia_matrix.T) + result = img.astype(np.float32) * (1 - intensity) + sepia * intensity + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_grayscale(img): + """Convert to grayscale (still RGB output).""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + return np.stack([gray, gray, gray], axis=-1).astype(np.uint8) + + +PRIMITIVES = { + # Brightness/Contrast + 'adjust': prim_adjust, + + # Saturation + 'mix-gray': prim_mix_gray, + 'grayscale': prim_grayscale, + + # HSV manipulation + 'shift-hsv': prim_shift_hsv, + + # Inversion + 'invert-img': prim_invert_img, + + # Effects + 'add-noise': prim_add_noise, + 'quantize': prim_quantize, + 'sepia': prim_sepia, +} diff --git a/sexp_effects/primitive_libs/core.py b/sexp_effects/primitive_libs/core.py new file mode 100644 index 0000000..352cbd3 --- /dev/null +++ b/sexp_effects/primitive_libs/core.py @@ -0,0 +1,271 @@ +""" +Core Primitives - Always available, minimal essential set. + +These are the primitives that form the foundation of the language. +They cannot be overridden by libraries. +""" + + +# Arithmetic +def prim_add(*args): + if len(args) == 0: + return 0 + result = args[0] + for arg in args[1:]: + result = result + arg + return result + + +def prim_sub(a, b=None): + if b is None: + return -a + return a - b + + +def prim_mul(*args): + if len(args) == 0: + return 1 + result = args[0] + for arg in args[1:]: + result = result * arg + return result + + +def prim_div(a, b): + return a / b + + +def prim_mod(a, b): + return a % b + + +def prim_abs(x): + return abs(x) + + +def prim_min(*args): + return min(args) + + +def prim_max(*args): + return max(args) + + +def prim_round(x): + return round(x) + + +def prim_floor(x): + import math + return math.floor(x) + + +def prim_ceil(x): + import math + return math.ceil(x) + + +# Comparison +def prim_lt(a, b): + return a < b + + +def prim_gt(a, b): + return a > b + + +def prim_le(a, b): + return a <= b + + +def prim_ge(a, b): + return a >= b + + +def prim_eq(a, b): + if isinstance(a, float) or isinstance(b, float): + return abs(a - b) < 1e-9 + return a == b + + +def prim_ne(a, b): + return not prim_eq(a, b) + + +# Logic +def prim_not(x): + return not x + + +def prim_and(*args): + for a in args: + if not a: + return False + return True + + +def prim_or(*args): + for a in args: + if a: + return True + return False + + +# Basic data access +def prim_get(obj, key, default=None): + """Get value from dict or list.""" + if isinstance(obj, dict): + return obj.get(key, default) + elif isinstance(obj, (list, tuple)): + try: + return obj[int(key)] + except (IndexError, ValueError): + return default + return default + + +def prim_nth(seq, i): + i = int(i) + if 0 <= i < len(seq): + return seq[i] + return None + + +def prim_first(seq): + return seq[0] if seq else None + + +def prim_length(seq): + return len(seq) + + +def prim_list(*args): + return list(args) + + +# Type checking +def prim_is_number(x): + return isinstance(x, (int, float)) + + +def prim_is_string(x): + return isinstance(x, str) + + +def prim_is_list(x): + return isinstance(x, (list, tuple)) + + +def prim_is_dict(x): + return isinstance(x, dict) + + +def prim_is_nil(x): + return x is None + + +# Higher-order / iteration +def prim_reduce(seq, init, fn): + """(reduce seq init fn) — fold left: fn(fn(fn(init, s0), s1), s2) ...""" + acc = init + for item in seq: + acc = fn(acc, item) + return acc + + +def prim_map(seq, fn): + """(map seq fn) — apply fn to each element, return new list.""" + return [fn(item) for item in seq] + + +def prim_range(*args): + """(range end), (range start end), or (range start end step) — integer range.""" + if len(args) == 1: + return list(range(int(args[0]))) + elif len(args) == 2: + return list(range(int(args[0]), int(args[1]))) + elif len(args) >= 3: + return list(range(int(args[0]), int(args[1]), int(args[2]))) + return [] + + +# Random +import random +_rng = random.Random() + +def prim_rand(): + """Return random float in [0, 1).""" + return _rng.random() + +def prim_rand_int(lo, hi): + """Return random integer in [lo, hi].""" + return _rng.randint(int(lo), int(hi)) + +def prim_rand_range(lo, hi): + """Return random float in [lo, hi).""" + return lo + _rng.random() * (hi - lo) + +def prim_map_range(val, from_lo, from_hi, to_lo, to_hi): + """Map value from one range to another.""" + if from_hi == from_lo: + return to_lo + t = (val - from_lo) / (from_hi - from_lo) + return to_lo + t * (to_hi - to_lo) + + +# Core primitives dict +PRIMITIVES = { + # Arithmetic + '+': prim_add, + '-': prim_sub, + '*': prim_mul, + '/': prim_div, + 'mod': prim_mod, + 'abs': prim_abs, + 'min': prim_min, + 'max': prim_max, + 'round': prim_round, + 'floor': prim_floor, + 'ceil': prim_ceil, + + # Comparison + '<': prim_lt, + '>': prim_gt, + '<=': prim_le, + '>=': prim_ge, + '=': prim_eq, + '!=': prim_ne, + + # Logic + 'not': prim_not, + 'and': prim_and, + 'or': prim_or, + + # Data access + 'get': prim_get, + 'nth': prim_nth, + 'first': prim_first, + 'length': prim_length, + 'len': prim_length, + 'list': prim_list, + + # Type predicates + 'number?': prim_is_number, + 'string?': prim_is_string, + 'list?': prim_is_list, + 'dict?': prim_is_dict, + 'nil?': prim_is_nil, + 'is-nil': prim_is_nil, + + # Higher-order / iteration + 'reduce': prim_reduce, + 'fold': prim_reduce, + 'map': prim_map, + 'range': prim_range, + + # Random + 'rand': prim_rand, + 'rand-int': prim_rand_int, + 'rand-range': prim_rand_range, + 'map-range': prim_map_range, +} diff --git a/sexp_effects/primitive_libs/drawing.py b/sexp_effects/primitive_libs/drawing.py new file mode 100644 index 0000000..ddd1a01 --- /dev/null +++ b/sexp_effects/primitive_libs/drawing.py @@ -0,0 +1,136 @@ +""" +Drawing Primitives Library + +Draw shapes, text, and characters on images. +""" +import numpy as np +import cv2 +from PIL import Image, ImageDraw, ImageFont + + +# Default font (will be loaded lazily) +_default_font = None + + +def _get_default_font(size=16): + """Get default font, creating if needed.""" + global _default_font + if _default_font is None or _default_font.size != size: + try: + _default_font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", size) + except: + _default_font = ImageFont.load_default() + return _default_font + + +def prim_draw_char(img, char, x, y, font_size=16, color=None): + """Draw a single character at (x, y).""" + if color is None: + color = [255, 255, 255] + + pil_img = Image.fromarray(img) + draw = ImageDraw.Draw(pil_img) + font = _get_default_font(font_size) + draw.text((x, y), char, fill=tuple(color), font=font) + return np.array(pil_img) + + +def prim_draw_text(img, text, x, y, font_size=16, color=None): + """Draw text string at (x, y).""" + if color is None: + color = [255, 255, 255] + + pil_img = Image.fromarray(img) + draw = ImageDraw.Draw(pil_img) + font = _get_default_font(font_size) + draw.text((x, y), text, fill=tuple(color), font=font) + return np.array(pil_img) + + +def prim_fill_rect(img, x, y, w, h, color=None): + """Fill a rectangle with color.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + x, y, w, h = int(x), int(y), int(w), int(h) + result[y:y+h, x:x+w] = color + return result + + +def prim_draw_rect(img, x, y, w, h, color=None, thickness=1): + """Draw rectangle outline.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + cv2.rectangle(result, (int(x), int(y)), (int(x+w), int(y+h)), + tuple(color), thickness) + return result + + +def prim_draw_line(img, x1, y1, x2, y2, color=None, thickness=1): + """Draw a line from (x1, y1) to (x2, y2).""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + cv2.line(result, (int(x1), int(y1)), (int(x2), int(y2)), + tuple(color), thickness) + return result + + +def prim_draw_circle(img, cx, cy, radius, color=None, thickness=1, fill=False): + """Draw a circle.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + t = -1 if fill else thickness + cv2.circle(result, (int(cx), int(cy)), int(radius), tuple(color), t) + return result + + +def prim_draw_ellipse(img, cx, cy, rx, ry, angle=0, color=None, thickness=1, fill=False): + """Draw an ellipse.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + t = -1 if fill else thickness + cv2.ellipse(result, (int(cx), int(cy)), (int(rx), int(ry)), + angle, 0, 360, tuple(color), t) + return result + + +def prim_draw_polygon(img, points, color=None, thickness=1, fill=False): + """Draw a polygon from list of [x, y] points.""" + if color is None: + color = [255, 255, 255] + + result = img.copy() + pts = np.array(points, dtype=np.int32).reshape((-1, 1, 2)) + + if fill: + cv2.fillPoly(result, [pts], tuple(color)) + else: + cv2.polylines(result, [pts], True, tuple(color), thickness) + + return result + + +PRIMITIVES = { + # Text + 'draw-char': prim_draw_char, + 'draw-text': prim_draw_text, + + # Rectangles + 'fill-rect': prim_fill_rect, + 'draw-rect': prim_draw_rect, + + # Lines and shapes + 'draw-line': prim_draw_line, + 'draw-circle': prim_draw_circle, + 'draw-ellipse': prim_draw_ellipse, + 'draw-polygon': prim_draw_polygon, +} diff --git a/sexp_effects/primitive_libs/filters.py b/sexp_effects/primitive_libs/filters.py new file mode 100644 index 0000000..a66f107 --- /dev/null +++ b/sexp_effects/primitive_libs/filters.py @@ -0,0 +1,119 @@ +""" +Filters Primitives Library + +Image filters: blur, sharpen, edges, convolution. +""" +import numpy as np +import cv2 + + +def prim_blur(img, radius): + """Gaussian blur with given radius.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.GaussianBlur(img, (ksize, ksize), 0) + + +def prim_box_blur(img, radius): + """Box blur with given radius.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.blur(img, (ksize, ksize)) + + +def prim_median_blur(img, radius): + """Median blur (good for noise removal).""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.medianBlur(img, ksize) + + +def prim_bilateral(img, d=9, sigma_color=75, sigma_space=75): + """Bilateral filter (edge-preserving blur).""" + return cv2.bilateralFilter(img, d, sigma_color, sigma_space) + + +def prim_sharpen(img, amount=1.0): + """Sharpen image using unsharp mask.""" + blurred = cv2.GaussianBlur(img, (0, 0), 3) + return cv2.addWeighted(img, 1.0 + amount, blurred, -amount, 0) + + +def prim_edges(img, low=50, high=150): + """Canny edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + edges = cv2.Canny(gray, low, high) + return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) + + +def prim_sobel(img, ksize=3): + """Sobel edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=ksize) + sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=ksize) + mag = np.sqrt(sobelx**2 + sobely**2) + mag = np.clip(mag, 0, 255).astype(np.uint8) + return cv2.cvtColor(mag, cv2.COLOR_GRAY2RGB) + + +def prim_laplacian(img, ksize=3): + """Laplacian edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + lap = cv2.Laplacian(gray, cv2.CV_64F, ksize=ksize) + lap = np.abs(lap) + lap = np.clip(lap, 0, 255).astype(np.uint8) + return cv2.cvtColor(lap, cv2.COLOR_GRAY2RGB) + + +def prim_emboss(img): + """Emboss effect.""" + kernel = np.array([[-2, -1, 0], + [-1, 1, 1], + [ 0, 1, 2]]) + result = cv2.filter2D(img, -1, kernel) + return np.clip(result + 128, 0, 255).astype(np.uint8) + + +def prim_dilate(img, size=1): + """Morphological dilation.""" + kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8) + return cv2.dilate(img, kernel) + + +def prim_erode(img, size=1): + """Morphological erosion.""" + kernel = np.ones((size * 2 + 1, size * 2 + 1), np.uint8) + return cv2.erode(img, kernel) + + +def prim_convolve(img, kernel): + """Apply custom convolution kernel.""" + kernel = np.array(kernel, dtype=np.float32) + return cv2.filter2D(img, -1, kernel) + + +PRIMITIVES = { + # Blur + 'blur': prim_blur, + 'box-blur': prim_box_blur, + 'median-blur': prim_median_blur, + 'bilateral': prim_bilateral, + + # Sharpen + 'sharpen': prim_sharpen, + + # Edges + 'edges': prim_edges, + 'sobel': prim_sobel, + 'laplacian': prim_laplacian, + + # Effects + 'emboss': prim_emboss, + + # Morphology + 'dilate': prim_dilate, + 'erode': prim_erode, + + # Custom + 'convolve': prim_convolve, +} diff --git a/sexp_effects/primitive_libs/geometry.py b/sexp_effects/primitive_libs/geometry.py new file mode 100644 index 0000000..5b385a4 --- /dev/null +++ b/sexp_effects/primitive_libs/geometry.py @@ -0,0 +1,143 @@ +""" +Geometry Primitives Library + +Geometric transforms: rotate, scale, flip, translate, remap. +""" +import numpy as np +import cv2 + + +def prim_translate(img, dx, dy): + """Translate image by (dx, dy) pixels.""" + h, w = img.shape[:2] + M = np.float32([[1, 0, dx], [0, 1, dy]]) + return cv2.warpAffine(img, M, (w, h)) + + +def prim_rotate(img, angle, cx=None, cy=None): + """Rotate image by angle degrees around center (cx, cy).""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0) + return cv2.warpAffine(img, M, (w, h)) + + +def prim_scale(img, sx, sy, cx=None, cy=None): + """Scale image by (sx, sy) around center (cx, cy).""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + # Build transform matrix + M = np.float32([ + [sx, 0, cx * (1 - sx)], + [0, sy, cy * (1 - sy)] + ]) + return cv2.warpAffine(img, M, (w, h)) + + +def prim_flip_h(img): + """Flip image horizontally.""" + return cv2.flip(img, 1) + + +def prim_flip_v(img): + """Flip image vertically.""" + return cv2.flip(img, 0) + + +def prim_flip(img, direction="horizontal"): + """Flip image in given direction.""" + if direction in ("horizontal", "h"): + return prim_flip_h(img) + elif direction in ("vertical", "v"): + return prim_flip_v(img) + elif direction in ("both", "hv", "vh"): + return cv2.flip(img, -1) + return img + + +def prim_transpose(img): + """Transpose image (swap x and y).""" + return np.transpose(img, (1, 0, 2)) + + +def prim_remap(img, map_x, map_y): + """Remap image using coordinate maps.""" + return cv2.remap(img, map_x.astype(np.float32), + map_y.astype(np.float32), + cv2.INTER_LINEAR) + + +def prim_make_coords(w, h): + """Create coordinate grids for remapping.""" + x = np.arange(w, dtype=np.float32) + y = np.arange(h, dtype=np.float32) + map_x, map_y = np.meshgrid(x, y) + return (map_x, map_y) + + +def prim_perspective(img, src_pts, dst_pts): + """Apply perspective transform.""" + src = np.float32(src_pts) + dst = np.float32(dst_pts) + M = cv2.getPerspectiveTransform(src, dst) + h, w = img.shape[:2] + return cv2.warpPerspective(img, M, (w, h)) + + +def prim_affine(img, src_pts, dst_pts): + """Apply affine transform using 3 point pairs.""" + src = np.float32(src_pts) + dst = np.float32(dst_pts) + M = cv2.getAffineTransform(src, dst) + h, w = img.shape[:2] + return cv2.warpAffine(img, M, (w, h)) + + +def _get_legacy_geometry_primitives(): + """Import geometry primitives from legacy primitives module.""" + from sexp_effects.primitives import ( + prim_coords_x, + prim_coords_y, + prim_ripple_displace, + prim_fisheye_displace, + prim_kaleidoscope_displace, + ) + return { + 'coords-x': prim_coords_x, + 'coords-y': prim_coords_y, + 'ripple-displace': prim_ripple_displace, + 'fisheye-displace': prim_fisheye_displace, + 'kaleidoscope-displace': prim_kaleidoscope_displace, + } + + +PRIMITIVES = { + # Basic transforms + 'translate': prim_translate, + 'rotate-img': prim_rotate, + 'scale-img': prim_scale, + + # Flips + 'flip-h': prim_flip_h, + 'flip-v': prim_flip_v, + 'flip': prim_flip, + 'transpose': prim_transpose, + + # Remapping + 'remap': prim_remap, + 'make-coords': prim_make_coords, + + # Advanced transforms + 'perspective': prim_perspective, + 'affine': prim_affine, + + # Displace / coordinate ops (from legacy primitives) + **_get_legacy_geometry_primitives(), +} diff --git a/sexp_effects/primitive_libs/image.py b/sexp_effects/primitive_libs/image.py new file mode 100644 index 0000000..beae3ce --- /dev/null +++ b/sexp_effects/primitive_libs/image.py @@ -0,0 +1,144 @@ +""" +Image Primitives Library + +Basic image operations: dimensions, pixels, resize, crop, paste. +""" +import numpy as np +import cv2 + + +def prim_width(img): + return img.shape[1] + + +def prim_height(img): + return img.shape[0] + + +def prim_make_image(w, h, color=None): + """Create a new image filled with color (default black).""" + if color is None: + color = [0, 0, 0] + img = np.zeros((h, w, 3), dtype=np.uint8) + img[:] = color + return img + + +def prim_copy(img): + return img.copy() + + +def prim_pixel(img, x, y): + """Get pixel color at (x, y) as [r, g, b].""" + h, w = img.shape[:2] + if 0 <= x < w and 0 <= y < h: + return list(img[int(y), int(x)]) + return [0, 0, 0] + + +def prim_set_pixel(img, x, y, color): + """Set pixel at (x, y) to color, returns modified image.""" + result = img.copy() + h, w = result.shape[:2] + if 0 <= x < w and 0 <= y < h: + result[int(y), int(x)] = color + return result + + +def prim_sample(img, x, y): + """Bilinear sample at float coordinates, returns [r, g, b] as floats.""" + h, w = img.shape[:2] + x = max(0, min(w - 1.001, x)) + y = max(0, min(h - 1.001, y)) + + x0, y0 = int(x), int(y) + x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1) + fx, fy = x - x0, y - y0 + + c00 = img[y0, x0].astype(float) + c10 = img[y0, x1].astype(float) + c01 = img[y1, x0].astype(float) + c11 = img[y1, x1].astype(float) + + top = c00 * (1 - fx) + c10 * fx + bottom = c01 * (1 - fx) + c11 * fx + return list(top * (1 - fy) + bottom * fy) + + +def prim_channel(img, c): + """Extract single channel (0=R, 1=G, 2=B).""" + return img[:, :, c] + + +def prim_merge_channels(r, g, b): + """Merge three single-channel arrays into RGB image.""" + return np.stack([r, g, b], axis=2).astype(np.uint8) + + +def prim_resize(img, w, h, mode="linear"): + """Resize image to w x h.""" + interp = cv2.INTER_LINEAR + if mode == "nearest": + interp = cv2.INTER_NEAREST + elif mode == "cubic": + interp = cv2.INTER_CUBIC + elif mode == "area": + interp = cv2.INTER_AREA + return cv2.resize(img, (int(w), int(h)), interpolation=interp) + + +def prim_crop(img, x, y, w, h): + """Crop rectangle from image.""" + x, y, w, h = int(x), int(y), int(w), int(h) + ih, iw = img.shape[:2] + x = max(0, min(x, iw - 1)) + y = max(0, min(y, ih - 1)) + w = min(w, iw - x) + h = min(h, ih - y) + return img[y:y+h, x:x+w].copy() + + +def prim_paste(dst, src, x, y): + """Paste src onto dst at position (x, y).""" + result = dst.copy() + x, y = int(x), int(y) + sh, sw = src.shape[:2] + dh, dw = dst.shape[:2] + + # Clip to bounds + sx1 = max(0, -x) + sy1 = max(0, -y) + dx1 = max(0, x) + dy1 = max(0, y) + sx2 = min(sw, dw - x) + sy2 = min(sh, dh - y) + + if sx2 > sx1 and sy2 > sy1: + result[dy1:dy1+(sy2-sy1), dx1:dx1+(sx2-sx1)] = src[sy1:sy2, sx1:sx2] + + return result + + +PRIMITIVES = { + # Dimensions + 'width': prim_width, + 'height': prim_height, + + # Creation + 'make-image': prim_make_image, + 'copy': prim_copy, + + # Pixel access + 'pixel': prim_pixel, + 'set-pixel': prim_set_pixel, + 'sample': prim_sample, + + # Channels + 'channel': prim_channel, + 'merge-channels': prim_merge_channels, + + # Geometry + 'resize': prim_resize, + 'crop': prim_crop, + 'paste': prim_paste, +} diff --git a/sexp_effects/primitive_libs/math.py b/sexp_effects/primitive_libs/math.py new file mode 100644 index 0000000..140ad3e --- /dev/null +++ b/sexp_effects/primitive_libs/math.py @@ -0,0 +1,164 @@ +""" +Math Primitives Library + +Trigonometry, rounding, clamping, random numbers, etc. +""" +import math +import random as rand_module + + +def prim_sin(x): + return math.sin(x) + + +def prim_cos(x): + return math.cos(x) + + +def prim_tan(x): + return math.tan(x) + + +def prim_asin(x): + return math.asin(x) + + +def prim_acos(x): + return math.acos(x) + + +def prim_atan(x): + return math.atan(x) + + +def prim_atan2(y, x): + return math.atan2(y, x) + + +def prim_sqrt(x): + return math.sqrt(x) + + +def prim_pow(x, y): + return math.pow(x, y) + + +def prim_exp(x): + return math.exp(x) + + +def prim_log(x, base=None): + if base is None: + return math.log(x) + return math.log(x, base) + + +def prim_abs(x): + return abs(x) + + +def prim_floor(x): + return math.floor(x) + + +def prim_ceil(x): + return math.ceil(x) + + +def prim_round(x): + return round(x) + + +def prim_min(*args): + if len(args) == 1 and hasattr(args[0], '__iter__'): + return min(args[0]) + return min(args) + + +def prim_max(*args): + if len(args) == 1 and hasattr(args[0], '__iter__'): + return max(args[0]) + return max(args) + + +def prim_clamp(x, lo, hi): + return max(lo, min(hi, x)) + + +def prim_lerp(a, b, t): + """Linear interpolation: a + (b - a) * t""" + return a + (b - a) * t + + +def prim_smoothstep(edge0, edge1, x): + """Smooth interpolation between 0 and 1.""" + t = prim_clamp((x - edge0) / (edge1 - edge0), 0.0, 1.0) + return t * t * (3 - 2 * t) + + +def prim_random(lo=0.0, hi=1.0): + return rand_module.uniform(lo, hi) + + +def prim_randint(lo, hi): + return rand_module.randint(lo, hi) + + +def prim_gaussian(mean=0.0, std=1.0): + return rand_module.gauss(mean, std) + + +def prim_sign(x): + if x > 0: + return 1 + elif x < 0: + return -1 + return 0 + + +def prim_fract(x): + """Fractional part of x.""" + return x - math.floor(x) + + +PRIMITIVES = { + # Trigonometry + 'sin': prim_sin, + 'cos': prim_cos, + 'tan': prim_tan, + 'asin': prim_asin, + 'acos': prim_acos, + 'atan': prim_atan, + 'atan2': prim_atan2, + + # Powers and roots + 'sqrt': prim_sqrt, + 'pow': prim_pow, + 'exp': prim_exp, + 'log': prim_log, + + # Rounding + 'abs': prim_abs, + 'floor': prim_floor, + 'ceil': prim_ceil, + 'round': prim_round, + 'sign': prim_sign, + 'fract': prim_fract, + + # Min/max/clamp + 'min': prim_min, + 'max': prim_max, + 'clamp': prim_clamp, + 'lerp': prim_lerp, + 'smoothstep': prim_smoothstep, + + # Random + 'random': prim_random, + 'randint': prim_randint, + 'gaussian': prim_gaussian, + + # Constants + 'pi': math.pi, + 'tau': math.tau, + 'e': math.e, +} diff --git a/sexp_effects/primitive_libs/streaming.py b/sexp_effects/primitive_libs/streaming.py new file mode 100644 index 0000000..3e2fc51 --- /dev/null +++ b/sexp_effects/primitive_libs/streaming.py @@ -0,0 +1,304 @@ +""" +Streaming primitives for video/audio processing. + +These primitives handle video source reading and audio analysis, +keeping the interpreter completely generic. +""" + +import numpy as np +import subprocess +import json +from pathlib import Path + + +class VideoSource: + """Video source with persistent streaming pipe for fast sequential reads.""" + + def __init__(self, path: str, fps: float = 30): + self.path = Path(path) + self.fps = fps # Output fps for the stream + self._frame_size = None + self._duration = None + self._proc = None # Persistent ffmpeg process + self._stream_time = 0.0 # Current position in stream + self._frame_time = 1.0 / fps # Time per frame at output fps + self._last_read_time = -1 + self._cached_frame = None + + # Get video info + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(self.path)] + result = subprocess.run(cmd, capture_output=True, text=True) + info = json.loads(result.stdout) + + for stream in info.get("streams", []): + if stream.get("codec_type") == "video": + self._frame_size = (stream.get("width", 720), stream.get("height", 720)) + # Try direct duration field first + if "duration" in stream: + self._duration = float(stream["duration"]) + # Fall back to tags.DURATION (webm format: "00:01:00.124000000") + elif "tags" in stream and "DURATION" in stream["tags"]: + dur_str = stream["tags"]["DURATION"] + parts = dur_str.split(":") + if len(parts) == 3: + h, m, s = parts + self._duration = int(h) * 3600 + int(m) * 60 + float(s) + break + + if not self._frame_size: + self._frame_size = (720, 720) + + def _start_stream(self, seek_time: float = 0): + """Start or restart the ffmpeg streaming process.""" + if self._proc: + self._proc.kill() + self._proc = None + + w, h = self._frame_size + cmd = [ + "ffmpeg", "-v", "quiet", + "-ss", f"{seek_time:.3f}", + "-i", str(self.path), + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{w}x{h}", + "-r", str(self.fps), # Output at specified fps + "-" + ] + self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + self._stream_time = seek_time + + def _read_frame_from_stream(self) -> np.ndarray: + """Read one frame from the stream.""" + w, h = self._frame_size + frame_size = w * h * 3 + + if not self._proc or self._proc.poll() is not None: + return None + + data = self._proc.stdout.read(frame_size) + if len(data) < frame_size: + return None + + return np.frombuffer(data, dtype=np.uint8).reshape((h, w, 3)).copy() + + def read(self) -> np.ndarray: + """Read frame (uses last cached or t=0).""" + if self._cached_frame is not None: + return self._cached_frame + return self.read_at(0) + + def read_at(self, t: float) -> np.ndarray: + """Read frame at specific time using streaming with smart seeking.""" + # Cache check - return same frame for same time + if t == self._last_read_time and self._cached_frame is not None: + return self._cached_frame + + w, h = self._frame_size + + # Loop time if video is shorter + seek_time = t + if self._duration and self._duration > 0: + seek_time = t % self._duration + + # Decide whether to seek or continue streaming + # Seek if: no stream, going backwards (more than 1 frame), or jumping more than 2 seconds ahead + # Allow small backward tolerance to handle floating point and timing jitter + need_seek = ( + self._proc is None or + self._proc.poll() is not None or + seek_time < self._stream_time - self._frame_time or # More than 1 frame backward + seek_time > self._stream_time + 2.0 + ) + + if need_seek: + import sys + reason = "no proc" if self._proc is None else "proc dead" if self._proc.poll() is not None else "backward" if seek_time < self._stream_time else "jump" + print(f"SEEK {self.path.name}: t={t:.4f} seek={seek_time:.4f} stream={self._stream_time:.4f} ({reason})", file=sys.stderr) + self._start_stream(seek_time) + + # Skip frames to reach target time + while self._stream_time + self._frame_time <= seek_time: + frame = self._read_frame_from_stream() + if frame is None: + # Stream ended, restart from seek point + self._start_stream(seek_time) + break + self._stream_time += self._frame_time + + # Read the target frame + frame = self._read_frame_from_stream() + if frame is None: + import sys + print(f"NULL FRAME {self.path.name}: t={t:.2f} seek={seek_time:.2f}", file=sys.stderr) + frame = np.zeros((h, w, 3), dtype=np.uint8) + else: + self._stream_time += self._frame_time + + self._last_read_time = t + self._cached_frame = frame + return frame + + def skip(self): + """No-op for seek-based reading.""" + pass + + @property + def size(self): + return self._frame_size + + def close(self): + if self._proc: + self._proc.kill() + self._proc = None + + +class AudioAnalyzer: + """Audio analyzer for energy and beat detection.""" + + def __init__(self, path: str, sample_rate: int = 22050): + self.path = Path(path) + self.sample_rate = sample_rate + + # Load audio via ffmpeg + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"] + result = subprocess.run(cmd, capture_output=True) + self._audio = np.frombuffer(result.stdout, dtype=np.float32) + + # Get duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + self.duration = float(info.get("format", {}).get("duration", 60)) + + # Beat detection state + self._flux_history = [] + self._last_beat_time = -1 + self._beat_count = 0 + self._last_beat_check_time = -1 + # Cache beat result for current time (so multiple scans see same result) + self._beat_cache_time = -1 + self._beat_cache_result = False + + def get_energy(self, t: float) -> float: + """Get energy level at time t (0-1).""" + idx = int(t * self.sample_rate) + start = max(0, idx - 512) + end = min(len(self._audio), idx + 512) + if start >= end: + return 0.0 + return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0) + + def get_beat(self, t: float) -> bool: + """Check if there's a beat at time t.""" + # Return cached result if same time (multiple scans query same frame) + if t == self._beat_cache_time: + return self._beat_cache_result + + idx = int(t * self.sample_rate) + size = 2048 + + start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2) + if end - start < size/2: + self._beat_cache_time = t + self._beat_cache_result = False + return False + curr = self._audio[start:end] + + pstart, pend = max(0, start - 512), max(0, end - 512) + if pend <= pstart: + self._beat_cache_time = t + self._beat_cache_result = False + return False + prev = self._audio[pstart:pend] + + curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr)))) + prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev)))) + + n = min(len(curr_spec), len(prev_spec)) + flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1) + + self._flux_history.append((t, flux)) + if len(self._flux_history) > 50: + self._flux_history = self._flux_history[-50:] + + if len(self._flux_history) < 5: + self._beat_cache_time = t + self._beat_cache_result = False + return False + + recent = [f for _, f in self._flux_history[-20:]] + threshold = np.mean(recent) + 1.5 * np.std(recent) + + is_beat = flux > threshold and (t - self._last_beat_time) > 0.1 + if is_beat: + self._last_beat_time = t + if t > self._last_beat_check_time: + self._beat_count += 1 + self._last_beat_check_time = t + + # Cache result for this time + self._beat_cache_time = t + self._beat_cache_result = is_beat + return is_beat + + def get_beat_count(self, t: float) -> int: + """Get cumulative beat count up to time t.""" + # Ensure beat detection has run up to this time + self.get_beat(t) + return self._beat_count + + +# === Primitives === + +def prim_make_video_source(path: str, fps: float = 30): + """Create a video source from a file path.""" + return VideoSource(path, fps) + + +def prim_source_read(source: VideoSource, t: float = None): + """Read a frame from a video source.""" + import sys + if t is not None: + frame = source.read_at(t) + # Debug: show source and time + if int(t * 10) % 10 == 0: # Every second + print(f"READ {source.path.name}: t={t:.2f} stream={source._stream_time:.2f}", file=sys.stderr) + return frame + return source.read() + + +def prim_source_skip(source: VideoSource): + """Skip a frame (keep pipe in sync).""" + source.skip() + + +def prim_source_size(source: VideoSource): + """Get (width, height) of source.""" + return source.size + + +def prim_make_audio_analyzer(path: str): + """Create an audio analyzer from a file path.""" + return AudioAnalyzer(path) + + +def prim_audio_energy(analyzer: AudioAnalyzer, t: float) -> float: + """Get energy level (0-1) at time t.""" + return analyzer.get_energy(t) + + +def prim_audio_beat(analyzer: AudioAnalyzer, t: float) -> bool: + """Check if there's a beat at time t.""" + return analyzer.get_beat(t) + + +def prim_audio_beat_count(analyzer: AudioAnalyzer, t: float) -> int: + """Get cumulative beat count up to time t.""" + return analyzer.get_beat_count(t) + + +def prim_audio_duration(analyzer: AudioAnalyzer) -> float: + """Get audio duration in seconds.""" + return analyzer.duration diff --git a/sexp_effects/primitives.py b/sexp_effects/primitives.py new file mode 100644 index 0000000..8bdca5c --- /dev/null +++ b/sexp_effects/primitives.py @@ -0,0 +1,3043 @@ +""" +Safe Primitives for S-Expression Effects + +These are the building blocks that user-defined effects can use. +All primitives operate only on image data - no filesystem, network, etc. +""" + +import numpy as np +import cv2 +from typing import Any, Callable, Dict, List, Tuple, Optional +from dataclasses import dataclass +import math + + +@dataclass +class ZoneContext: + """Context for a single cell/zone in ASCII art grid.""" + row: int + col: int + row_norm: float # Normalized row position 0-1 + col_norm: float # Normalized col position 0-1 + luminance: float # Cell luminance 0-1 + saturation: float # Cell saturation 0-1 + hue: float # Cell hue 0-360 + r: float # Red component 0-1 + g: float # Green component 0-1 + b: float # Blue component 0-1 + + +class DeterministicRNG: + """Seeded RNG for reproducible effects.""" + + def __init__(self, seed: int = 42): + self._rng = np.random.RandomState(seed) + + def random(self, low: float = 0, high: float = 1) -> float: + return self._rng.uniform(low, high) + + def randint(self, low: int, high: int) -> int: + return self._rng.randint(low, high + 1) + + def gaussian(self, mean: float = 0, std: float = 1) -> float: + return self._rng.normal(mean, std) + + +# Global RNG instance (reset per frame with seed param) +_rng = DeterministicRNG() + + +def reset_rng(seed: int): + """Reset the global RNG with a new seed.""" + global _rng + _rng = DeterministicRNG(seed) + + +# ============================================================================= +# Color Names (FFmpeg/X11 compatible) +# ============================================================================= + +NAMED_COLORS = { + # Basic colors + "black": (0, 0, 0), + "white": (255, 255, 255), + "red": (255, 0, 0), + "green": (0, 128, 0), + "blue": (0, 0, 255), + "yellow": (255, 255, 0), + "cyan": (0, 255, 255), + "magenta": (255, 0, 255), + + # Grays + "gray": (128, 128, 128), + "grey": (128, 128, 128), + "darkgray": (169, 169, 169), + "darkgrey": (169, 169, 169), + "lightgray": (211, 211, 211), + "lightgrey": (211, 211, 211), + "dimgray": (105, 105, 105), + "dimgrey": (105, 105, 105), + "silver": (192, 192, 192), + + # Reds + "darkred": (139, 0, 0), + "firebrick": (178, 34, 34), + "crimson": (220, 20, 60), + "indianred": (205, 92, 92), + "lightcoral": (240, 128, 128), + "salmon": (250, 128, 114), + "darksalmon": (233, 150, 122), + "lightsalmon": (255, 160, 122), + "tomato": (255, 99, 71), + "orangered": (255, 69, 0), + "coral": (255, 127, 80), + + # Oranges + "orange": (255, 165, 0), + "darkorange": (255, 140, 0), + + # Yellows + "gold": (255, 215, 0), + "lightyellow": (255, 255, 224), + "lemonchiffon": (255, 250, 205), + "papayawhip": (255, 239, 213), + "moccasin": (255, 228, 181), + "peachpuff": (255, 218, 185), + "palegoldenrod": (238, 232, 170), + "khaki": (240, 230, 140), + "darkkhaki": (189, 183, 107), + + # Greens + "lime": (0, 255, 0), + "limegreen": (50, 205, 50), + "forestgreen": (34, 139, 34), + "darkgreen": (0, 100, 0), + "seagreen": (46, 139, 87), + "mediumseagreen": (60, 179, 113), + "springgreen": (0, 255, 127), + "mediumspringgreen": (0, 250, 154), + "lightgreen": (144, 238, 144), + "palegreen": (152, 251, 152), + "darkseagreen": (143, 188, 143), + "greenyellow": (173, 255, 47), + "chartreuse": (127, 255, 0), + "lawngreen": (124, 252, 0), + "olivedrab": (107, 142, 35), + "olive": (128, 128, 0), + "darkolivegreen": (85, 107, 47), + "yellowgreen": (154, 205, 50), + + # Cyans/Teals + "aqua": (0, 255, 255), + "teal": (0, 128, 128), + "darkcyan": (0, 139, 139), + "lightcyan": (224, 255, 255), + "aquamarine": (127, 255, 212), + "mediumaquamarine": (102, 205, 170), + "paleturquoise": (175, 238, 238), + "turquoise": (64, 224, 208), + "mediumturquoise": (72, 209, 204), + "darkturquoise": (0, 206, 209), + "cadetblue": (95, 158, 160), + + # Blues + "navy": (0, 0, 128), + "darkblue": (0, 0, 139), + "mediumblue": (0, 0, 205), + "royalblue": (65, 105, 225), + "cornflowerblue": (100, 149, 237), + "steelblue": (70, 130, 180), + "dodgerblue": (30, 144, 255), + "deepskyblue": (0, 191, 255), + "lightskyblue": (135, 206, 250), + "skyblue": (135, 206, 235), + "lightsteelblue": (176, 196, 222), + "lightblue": (173, 216, 230), + "powderblue": (176, 224, 230), + "slateblue": (106, 90, 205), + "mediumslateblue": (123, 104, 238), + "darkslateblue": (72, 61, 139), + "midnightblue": (25, 25, 112), + + # Purples/Violets + "purple": (128, 0, 128), + "darkmagenta": (139, 0, 139), + "darkviolet": (148, 0, 211), + "blueviolet": (138, 43, 226), + "darkorchid": (153, 50, 204), + "mediumorchid": (186, 85, 211), + "orchid": (218, 112, 214), + "violet": (238, 130, 238), + "plum": (221, 160, 221), + "thistle": (216, 191, 216), + "lavender": (230, 230, 250), + "indigo": (75, 0, 130), + "mediumpurple": (147, 112, 219), + "fuchsia": (255, 0, 255), + "hotpink": (255, 105, 180), + "deeppink": (255, 20, 147), + "mediumvioletred": (199, 21, 133), + "palevioletred": (219, 112, 147), + + # Pinks + "pink": (255, 192, 203), + "lightpink": (255, 182, 193), + "mistyrose": (255, 228, 225), + + # Browns + "brown": (165, 42, 42), + "maroon": (128, 0, 0), + "saddlebrown": (139, 69, 19), + "sienna": (160, 82, 45), + "chocolate": (210, 105, 30), + "peru": (205, 133, 63), + "sandybrown": (244, 164, 96), + "burlywood": (222, 184, 135), + "tan": (210, 180, 140), + "rosybrown": (188, 143, 143), + "goldenrod": (218, 165, 32), + "darkgoldenrod": (184, 134, 11), + + # Whites + "snow": (255, 250, 250), + "honeydew": (240, 255, 240), + "mintcream": (245, 255, 250), + "azure": (240, 255, 255), + "aliceblue": (240, 248, 255), + "ghostwhite": (248, 248, 255), + "whitesmoke": (245, 245, 245), + "seashell": (255, 245, 238), + "beige": (245, 245, 220), + "oldlace": (253, 245, 230), + "floralwhite": (255, 250, 240), + "ivory": (255, 255, 240), + "antiquewhite": (250, 235, 215), + "linen": (250, 240, 230), + "lavenderblush": (255, 240, 245), + "wheat": (245, 222, 179), + "cornsilk": (255, 248, 220), + "blanchedalmond": (255, 235, 205), + "bisque": (255, 228, 196), + "navajowhite": (255, 222, 173), + + # Special + "transparent": (0, 0, 0), # Note: no alpha support, just black +} + + +def parse_color(color_spec: str) -> Optional[Tuple[int, int, int]]: + """ + Parse a color specification into RGB tuple. + + Supports: + - Named colors: "red", "green", "lime", "navy", etc. + - Hex colors: "#FF0000", "#f00", "0xFF0000" + - Special modes: "color", "mono", "invert" return None (handled separately) + + Returns: + RGB tuple (r, g, b) or None for special modes + """ + if color_spec is None: + return None + + color_spec = str(color_spec).strip().lower() + + # Special modes handled elsewhere + if color_spec in ("color", "mono", "invert"): + return None + + # Check named colors + if color_spec in NAMED_COLORS: + return NAMED_COLORS[color_spec] + + # Handle hex colors + hex_str = None + if color_spec.startswith("#"): + hex_str = color_spec[1:] + elif color_spec.startswith("0x"): + hex_str = color_spec[2:] + elif all(c in "0123456789abcdef" for c in color_spec) and len(color_spec) in (3, 6): + hex_str = color_spec + + if hex_str: + try: + if len(hex_str) == 3: + # Short form: #RGB -> #RRGGBB + r = int(hex_str[0] * 2, 16) + g = int(hex_str[1] * 2, 16) + b = int(hex_str[2] * 2, 16) + return (r, g, b) + elif len(hex_str) == 6: + r = int(hex_str[0:2], 16) + g = int(hex_str[2:4], 16) + b = int(hex_str[4:6], 16) + return (r, g, b) + except ValueError: + pass + + # Unknown color - default to None (will use original colors) + return None + + +# ============================================================================= +# Image Primitives +# ============================================================================= + +def prim_width(img: np.ndarray) -> int: + """Get image width.""" + return img.shape[1] + + +def prim_height(img: np.ndarray) -> int: + """Get image height.""" + return img.shape[0] + + +def prim_make_image(w: int, h: int, color: List[int]) -> np.ndarray: + """Create a new image filled with color.""" + img = np.zeros((int(h), int(w), 3), dtype=np.uint8) + if color: + img[:, :] = color[:3] + return img + + +def prim_copy(img: np.ndarray) -> np.ndarray: + """Copy an image.""" + return img.copy() + + +def prim_pixel(img: np.ndarray, x: int, y: int) -> List[int]: + """Get pixel at (x, y) as [r, g, b].""" + h, w = img.shape[:2] + x, y = int(x), int(y) + if 0 <= x < w and 0 <= y < h: + return list(img[y, x]) + return [0, 0, 0] + + +def prim_set_pixel(img: np.ndarray, x: int, y: int, color: List[int]) -> np.ndarray: + """Set pixel at (x, y). Returns modified image.""" + h, w = img.shape[:2] + x, y = int(x), int(y) + if 0 <= x < w and 0 <= y < h: + img[y, x] = color[:3] + return img + + +def prim_sample(img: np.ndarray, x: float, y: float) -> List[float]: + """Bilinear sample at float coordinates.""" + h, w = img.shape[:2] + x = np.clip(x, 0, w - 1) + y = np.clip(y, 0, h - 1) + + x0, y0 = int(x), int(y) + x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1) + fx, fy = x - x0, y - y0 + + c00 = img[y0, x0].astype(float) + c10 = img[y0, x1].astype(float) + c01 = img[y1, x0].astype(float) + c11 = img[y1, x1].astype(float) + + c = (c00 * (1 - fx) * (1 - fy) + + c10 * fx * (1 - fy) + + c01 * (1 - fx) * fy + + c11 * fx * fy) + + return list(c) + + +def prim_channel(img: np.ndarray, c: int) -> np.ndarray: + """Extract a single channel as 2D array.""" + return img[:, :, int(c)].copy() + + +def prim_merge_channels(r: np.ndarray, g: np.ndarray, b: np.ndarray) -> np.ndarray: + """Merge three channels into RGB image.""" + return np.stack([r, g, b], axis=-1).astype(np.uint8) + + +def prim_resize(img: np.ndarray, w: int, h: int, mode: str = "linear") -> np.ndarray: + """Resize image. Mode: linear, nearest, area.""" + w, h = int(w), int(h) + if w < 1 or h < 1: + return img + interp = { + "linear": cv2.INTER_LINEAR, + "nearest": cv2.INTER_NEAREST, + "area": cv2.INTER_AREA, + }.get(mode, cv2.INTER_LINEAR) + return cv2.resize(img, (w, h), interpolation=interp) + + +def prim_crop(img: np.ndarray, x: int, y: int, w: int, h: int) -> np.ndarray: + """Crop a region from image.""" + ih, iw = img.shape[:2] + x, y, w, h = int(x), int(y), int(w), int(h) + x = max(0, min(x, iw)) + y = max(0, min(y, ih)) + w = max(0, min(w, iw - x)) + h = max(0, min(h, ih - y)) + return img[y:y + h, x:x + w].copy() + + +def prim_paste(dst: np.ndarray, src: np.ndarray, x: int, y: int) -> np.ndarray: + """Paste src onto dst at position (x, y).""" + dh, dw = dst.shape[:2] + sh, sw = src.shape[:2] + x, y = int(x), int(y) + + # Calculate valid regions + sx1 = max(0, -x) + sy1 = max(0, -y) + sx2 = min(sw, dw - x) + sy2 = min(sh, dh - y) + + dx1 = max(0, x) + dy1 = max(0, y) + dx2 = dx1 + (sx2 - sx1) + dy2 = dy1 + (sy2 - sy1) + + if dx2 > dx1 and dy2 > dy1: + dst[dy1:dy2, dx1:dx2] = src[sy1:sy2, sx1:sx2] + + return dst + + +# ============================================================================= +# Color Primitives +# ============================================================================= + +def prim_rgb(r: float, g: float, b: float) -> List[int]: + """Create RGB color.""" + return [int(np.clip(r, 0, 255)), + int(np.clip(g, 0, 255)), + int(np.clip(b, 0, 255))] + + +def prim_red(c: List[int]) -> int: + return c[0] if c else 0 + + +def prim_green(c: List[int]) -> int: + return c[1] if len(c) > 1 else 0 + + +def prim_blue(c: List[int]) -> int: + return c[2] if len(c) > 2 else 0 + + +def prim_luminance(c: List[int]) -> float: + """Calculate luminance (grayscale value).""" + if not c: + return 0 + return 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] + + +def prim_rgb_to_hsv(c: List[int]) -> List[float]: + """Convert RGB to HSV.""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + mx, mn = max(r, g, b), min(r, g, b) + diff = mx - mn + + if diff == 0: + h = 0 + elif mx == r: + h = (60 * ((g - b) / diff) + 360) % 360 + elif mx == g: + h = (60 * ((b - r) / diff) + 120) % 360 + else: + h = (60 * ((r - g) / diff) + 240) % 360 + + s = 0 if mx == 0 else diff / mx + v = mx + + return [h, s * 100, v * 100] + + +def prim_hsv_to_rgb(hsv: List[float]) -> List[int]: + """Convert HSV to RGB.""" + h, s, v = hsv[0], hsv[1] / 100, hsv[2] / 100 + c = v * s + x = c * (1 - abs((h / 60) % 2 - 1)) + m = v - c + + if h < 60: + r, g, b = c, x, 0 + elif h < 120: + r, g, b = x, c, 0 + elif h < 180: + r, g, b = 0, c, x + elif h < 240: + r, g, b = 0, x, c + elif h < 300: + r, g, b = x, 0, c + else: + r, g, b = c, 0, x + + return [int((r + m) * 255), int((g + m) * 255), int((b + m) * 255)] + + +def prim_blend_color(c1: List[int], c2: List[int], alpha: float) -> List[int]: + """Blend two colors.""" + alpha = np.clip(alpha, 0, 1) + return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)] + + +def prim_average_color(img: np.ndarray) -> List[int]: + """Get average color of image/region.""" + return [int(x) for x in img.mean(axis=(0, 1))] + + +# ============================================================================= +# Image Operations (Bulk) +# ============================================================================= + +def prim_map_pixels(img: np.ndarray, fn: Callable) -> np.ndarray: + """Apply function to each pixel: fn(x, y, [r,g,b]) -> [r,g,b].""" + result = img.copy() + h, w = img.shape[:2] + for y in range(h): + for x in range(w): + color = list(img[y, x]) + new_color = fn(x, y, color) + if new_color is not None: + result[y, x] = new_color[:3] + return result + + +def prim_map_rows(img: np.ndarray, fn: Callable) -> np.ndarray: + """Apply function to each row: fn(y, row) -> row.""" + result = img.copy() + h = img.shape[0] + for y in range(h): + row = img[y].copy() + new_row = fn(y, row) + if new_row is not None: + result[y] = new_row + return result + + +def prim_for_grid(img: np.ndarray, cell_size: int, fn: Callable) -> np.ndarray: + """Iterate over grid cells: fn(gx, gy, cell_img) for side effects.""" + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + for gy in range(rows): + for gx in range(cols): + y, x = gy * cell_size, gx * cell_size + cell = img[y:y + cell_size, x:x + cell_size] + fn(gx, gy, cell) + + return img + + +def prim_fold_pixels(img: np.ndarray, init: Any, fn: Callable) -> Any: + """Fold over pixels: fn(acc, x, y, color) -> acc.""" + acc = init + h, w = img.shape[:2] + for y in range(h): + for x in range(w): + color = list(img[y, x]) + acc = fn(acc, x, y, color) + return acc + + +# ============================================================================= +# Convolution / Filters +# ============================================================================= + +def prim_convolve(img: np.ndarray, kernel: List[List[float]]) -> np.ndarray: + """Apply convolution kernel.""" + k = np.array(kernel, dtype=np.float32) + return cv2.filter2D(img, -1, k) + + +def prim_blur(img: np.ndarray, radius: int) -> np.ndarray: + """Gaussian blur.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.GaussianBlur(img, (ksize, ksize), 0) + + +def prim_box_blur(img: np.ndarray, radius: int) -> np.ndarray: + """Box blur (faster than Gaussian).""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.blur(img, (ksize, ksize)) + + +def prim_edges(img: np.ndarray, low: int = 50, high: int = 150) -> np.ndarray: + """Canny edge detection, returns grayscale edges.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + edges = cv2.Canny(gray, int(low), int(high)) + return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) + + +def prim_sobel(img: np.ndarray) -> np.ndarray: + """Sobel edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).astype(np.float32) + sx = cv2.Sobel(gray, cv2.CV_32F, 1, 0) + sy = cv2.Sobel(gray, cv2.CV_32F, 0, 1) + magnitude = np.sqrt(sx ** 2 + sy ** 2) + magnitude = np.clip(magnitude, 0, 255).astype(np.uint8) + return cv2.cvtColor(magnitude, cv2.COLOR_GRAY2RGB) + + +def prim_dilate(img: np.ndarray, size: int = 1) -> np.ndarray: + """Morphological dilation.""" + kernel = np.ones((size, size), np.uint8) + return cv2.dilate(img, kernel, iterations=1) + + +def prim_erode(img: np.ndarray, size: int = 1) -> np.ndarray: + """Morphological erosion.""" + kernel = np.ones((size, size), np.uint8) + return cv2.erode(img, kernel, iterations=1) + + +# ============================================================================= +# Geometric Transforms +# ============================================================================= + +def prim_translate(img: np.ndarray, dx: float, dy: float) -> np.ndarray: + """Translate image.""" + h, w = img.shape[:2] + M = np.float32([[1, 0, dx], [0, 1, dy]]) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_rotate(img: np.ndarray, angle: float, cx: float = None, cy: float = None) -> np.ndarray: + """Rotate image around center.""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_scale(img: np.ndarray, sx: float, sy: float, cx: float = None, cy: float = None) -> np.ndarray: + """Scale image around center.""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + M = np.float32([ + [sx, 0, cx * (1 - sx)], + [0, sy, cy * (1 - sy)] + ]) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_flip_h(img: np.ndarray) -> np.ndarray: + """Flip horizontally.""" + return cv2.flip(img, 1) + + +def prim_flip_v(img: np.ndarray) -> np.ndarray: + """Flip vertically.""" + return cv2.flip(img, 0) + + +def prim_remap(img: np.ndarray, map_x: np.ndarray, map_y: np.ndarray) -> np.ndarray: + """Remap using coordinate maps.""" + return cv2.remap(img, map_x.astype(np.float32), map_y.astype(np.float32), + cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + +def prim_make_coords(w: int, h: int) -> Tuple[np.ndarray, np.ndarray]: + """Create coordinate grid (map_x, map_y).""" + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + return map_x, map_y + + +# ============================================================================= +# Blending +# ============================================================================= + +def prim_blend_images(a: np.ndarray, b: np.ndarray, alpha: float) -> np.ndarray: + """Blend two images. Auto-resizes b to match a if sizes differ.""" + alpha = np.clip(alpha, 0, 1) + # Auto-resize b to match a if different sizes + if a.shape[:2] != b.shape[:2]: + b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR) + return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8) + + +def prim_blend_mode(a: np.ndarray, b: np.ndarray, mode: str) -> np.ndarray: + """Blend with various modes: add, multiply, screen, overlay, difference. + Auto-resizes b to match a if sizes differ.""" + # Auto-resize b to match a if different sizes + if a.shape[:2] != b.shape[:2]: + b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR) + af = a.astype(float) / 255 + bf = b.astype(float) / 255 + + if mode == "add": + result = af + bf + elif mode == "multiply": + result = af * bf + elif mode == "screen": + result = 1 - (1 - af) * (1 - bf) + elif mode == "overlay": + mask = af < 0.5 + result = np.where(mask, 2 * af * bf, 1 - 2 * (1 - af) * (1 - bf)) + elif mode == "difference": + result = np.abs(af - bf) + elif mode == "lighten": + result = np.maximum(af, bf) + elif mode == "darken": + result = np.minimum(af, bf) + else: + result = af + + return (np.clip(result, 0, 1) * 255).astype(np.uint8) + + +def prim_mask(img: np.ndarray, mask_img: np.ndarray) -> np.ndarray: + """Apply grayscale mask to image.""" + if len(mask_img.shape) == 3: + mask = cv2.cvtColor(mask_img, cv2.COLOR_RGB2GRAY) + else: + mask = mask_img + mask_f = mask.astype(float) / 255 + result = img.astype(float) * mask_f[:, :, np.newaxis] + return result.astype(np.uint8) + + +# ============================================================================= +# Drawing +# ============================================================================= + +# Simple font (5x7 bitmap characters) +FONT_5X7 = { + ' ': [0, 0, 0, 0, 0, 0, 0], + '.': [0, 0, 0, 0, 0, 0, 4], + ':': [0, 0, 4, 0, 4, 0, 0], + '-': [0, 0, 0, 14, 0, 0, 0], + '=': [0, 0, 14, 0, 14, 0, 0], + '+': [0, 4, 4, 31, 4, 4, 0], + '*': [0, 4, 21, 14, 21, 4, 0], + '#': [10, 31, 10, 10, 31, 10, 0], + '%': [19, 19, 4, 8, 25, 25, 0], + '@': [14, 17, 23, 21, 23, 16, 14], + '0': [14, 17, 19, 21, 25, 17, 14], + '1': [4, 12, 4, 4, 4, 4, 14], + '2': [14, 17, 1, 2, 4, 8, 31], + '3': [31, 2, 4, 2, 1, 17, 14], + '4': [2, 6, 10, 18, 31, 2, 2], + '5': [31, 16, 30, 1, 1, 17, 14], + '6': [6, 8, 16, 30, 17, 17, 14], + '7': [31, 1, 2, 4, 8, 8, 8], + '8': [14, 17, 17, 14, 17, 17, 14], + '9': [14, 17, 17, 15, 1, 2, 12], +} + +# Add uppercase letters +for i, c in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ'): + FONT_5X7[c] = [0] * 7 # Placeholder + + +def prim_draw_char(img: np.ndarray, char: str, x: int, y: int, + size: int, color: List[int]) -> np.ndarray: + """Draw a character at position.""" + # Use OpenCV's built-in font for simplicity + font = cv2.FONT_HERSHEY_SIMPLEX + scale = size / 20.0 + thickness = max(1, int(size / 10)) + cv2.putText(img, char, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness) + return img + + +def prim_draw_text(img: np.ndarray, text: str, x: int, y: int, + size: int, color: List[int]) -> np.ndarray: + """Draw text at position.""" + font = cv2.FONT_HERSHEY_SIMPLEX + scale = size / 20.0 + thickness = max(1, int(size / 10)) + cv2.putText(img, text, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness) + return img + + +def prim_fill_rect(img: np.ndarray, x: int, y: int, w: int, h: int, + color: List[int]) -> np.ndarray: + """Fill rectangle.""" + x, y, w, h = int(x), int(y), int(w), int(h) + img[y:y + h, x:x + w] = color[:3] + return img + + +def prim_draw_line(img: np.ndarray, x1: int, y1: int, x2: int, y2: int, + color: List[int], thickness: int = 1) -> np.ndarray: + """Draw line.""" + cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), tuple(color[:3]), int(thickness)) + return img + + +# ============================================================================= +# Math Primitives +# ============================================================================= + +def prim_sin(x: float) -> float: + return math.sin(x) + + +def prim_cos(x: float) -> float: + return math.cos(x) + + +def prim_tan(x: float) -> float: + return math.tan(x) + + +def prim_atan2(y: float, x: float) -> float: + return math.atan2(y, x) + + +def prim_sqrt(x: float) -> float: + return math.sqrt(max(0, x)) + + +def prim_pow(x: float, y: float) -> float: + return math.pow(x, y) + + +def prim_abs(x: float) -> float: + return abs(x) + + +def prim_floor(x: float) -> int: + return int(math.floor(x)) + + +def prim_ceil(x: float) -> int: + return int(math.ceil(x)) + + +def prim_round(x: float) -> int: + return int(round(x)) + + +def prim_min(*args) -> float: + return min(args) + + +def prim_max(*args) -> float: + return max(args) + + +def prim_clamp(x: float, lo: float, hi: float) -> float: + return max(lo, min(hi, x)) + + +def prim_lerp(a: float, b: float, t: float) -> float: + """Linear interpolation.""" + return a + (b - a) * t + + +def prim_mod(a: float, b: float) -> float: + return a % b + + +def prim_random(lo: float = 0, hi: float = 1) -> float: + """Random number from global RNG.""" + return _rng.random(lo, hi) + + +def prim_randint(lo: int, hi: int) -> int: + """Random integer from global RNG.""" + return _rng.randint(lo, hi) + + +def prim_gaussian(mean: float = 0, std: float = 1) -> float: + """Gaussian random from global RNG.""" + return _rng.gaussian(mean, std) + + +def prim_assert(condition, message: str = "Assertion failed"): + """Assert that condition is true, raise error with message if false.""" + if not condition: + raise RuntimeError(f"Assertion error: {message}") + return True + + +# ============================================================================= +# Array/List Primitives +# ============================================================================= + +def prim_length(seq) -> int: + return len(seq) + + +def prim_nth(seq, i: int): + i = int(i) + if 0 <= i < len(seq): + return seq[i] + return None + + +def prim_first(seq): + return seq[0] if seq else None + + +def prim_rest(seq): + return seq[1:] if seq else [] + + +def prim_take(seq, n: int): + return seq[:int(n)] + + +def prim_drop(seq, n: int): + return seq[int(n):] + + +def prim_cons(x, seq): + return [x] + list(seq) + + +def prim_append(*seqs): + result = [] + for s in seqs: + result.extend(s) + return result + + +def prim_reverse(seq): + return list(reversed(seq)) + + +def prim_range(start: int, end: int, step: int = 1) -> List[int]: + return list(range(int(start), int(end), int(step))) + + +def prim_roll(arr: np.ndarray, shift: int, axis: int = 0) -> np.ndarray: + """Circular roll of array.""" + return np.roll(arr, int(shift), axis=int(axis)) + + +def prim_list(*args) -> list: + """Create a list.""" + return list(args) + + +# ============================================================================= +# Primitive Registry +# ============================================================================= + +def prim_add(*args): + return sum(args) + +def prim_sub(a, b=None): + if b is None: + return -a # Unary negation + return a - b + +def prim_mul(*args): + result = 1 + for x in args: + result *= x + return result + +def prim_div(a, b): + return a / b if b != 0 else 0 + +def prim_lt(a, b): + return a < b + +def prim_gt(a, b): + return a > b + +def prim_le(a, b): + return a <= b + +def prim_ge(a, b): + return a >= b + +def prim_eq(a, b): + # Handle None/nil comparisons with numpy arrays + if a is None: + return b is None + if b is None: + return a is None + if isinstance(a, np.ndarray) or isinstance(b, np.ndarray): + if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): + return np.array_equal(a, b) + return False # array vs non-array + return a == b + +def prim_ne(a, b): + return not prim_eq(a, b) + + +# ============================================================================= +# Vectorized Bulk Operations (true primitives for composing effects) +# ============================================================================= + +def prim_color_matrix(img: np.ndarray, matrix: List[List[float]]) -> np.ndarray: + """Apply a 3x3 color transformation matrix to all pixels.""" + m = np.array(matrix, dtype=np.float32) + result = img.astype(np.float32) @ m.T + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_adjust(img: np.ndarray, brightness: float = 0, contrast: float = 1) -> np.ndarray: + """Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+.""" + result = (img.astype(np.float32) - 128) * contrast + 128 + brightness + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_mix_gray(img: np.ndarray, amount: float) -> np.ndarray: + """Mix image with its grayscale version. 0=original, 1=grayscale.""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + gray_rgb = np.stack([gray, gray, gray], axis=-1) + result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_invert_img(img: np.ndarray) -> np.ndarray: + """Invert all pixel values.""" + return (255 - img).astype(np.uint8) + + +def prim_add_noise(img: np.ndarray, amount: float) -> np.ndarray: + """Add gaussian noise to image.""" + noise = _rng._rng.normal(0, amount, img.shape) + result = img.astype(np.float32) + noise + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_quantize(img: np.ndarray, levels: int) -> np.ndarray: + """Reduce to N color levels per channel.""" + levels = max(2, int(levels)) + factor = 256 / levels + result = (img // factor) * factor + factor // 2 + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_shift_hsv(img: np.ndarray, h: float = 0, s: float = 1, v: float = 1) -> np.ndarray: + """Shift HSV: h=degrees offset, s/v=multipliers.""" + hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32) + hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180 + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255) + hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255) + return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) + + +# ============================================================================= +# Array Math Primitives (vectorized operations on coordinate arrays) +# ============================================================================= + +def prim_arr_add(a: np.ndarray, b) -> np.ndarray: + """Element-wise addition. b can be array or scalar.""" + return (np.asarray(a) + np.asarray(b)).astype(np.float32) + + +def prim_arr_sub(a: np.ndarray, b) -> np.ndarray: + """Element-wise subtraction. b can be array or scalar.""" + return (np.asarray(a) - np.asarray(b)).astype(np.float32) + + +def prim_arr_mul(a: np.ndarray, b) -> np.ndarray: + """Element-wise multiplication. b can be array or scalar.""" + return (np.asarray(a) * np.asarray(b)).astype(np.float32) + + +def prim_arr_div(a: np.ndarray, b) -> np.ndarray: + """Element-wise division. b can be array or scalar.""" + b = np.asarray(b) + # Avoid division by zero + with np.errstate(divide='ignore', invalid='ignore'): + result = np.asarray(a) / np.where(b == 0, 1e-10, b) + return result.astype(np.float32) + + +def prim_arr_mod(a: np.ndarray, b) -> np.ndarray: + """Element-wise modulo.""" + return (np.asarray(a) % np.asarray(b)).astype(np.float32) + + +def prim_arr_sin(a: np.ndarray) -> np.ndarray: + """Element-wise sine.""" + return np.sin(np.asarray(a)).astype(np.float32) + + +def prim_arr_cos(a: np.ndarray) -> np.ndarray: + """Element-wise cosine.""" + return np.cos(np.asarray(a)).astype(np.float32) + + +def prim_arr_tan(a: np.ndarray) -> np.ndarray: + """Element-wise tangent.""" + return np.tan(np.asarray(a)).astype(np.float32) + + +def prim_arr_sqrt(a: np.ndarray) -> np.ndarray: + """Element-wise square root.""" + return np.sqrt(np.maximum(0, np.asarray(a))).astype(np.float32) + + +def prim_arr_pow(a: np.ndarray, b) -> np.ndarray: + """Element-wise power.""" + return np.power(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_abs(a: np.ndarray) -> np.ndarray: + """Element-wise absolute value.""" + return np.abs(np.asarray(a)).astype(np.float32) + + +def prim_arr_neg(a: np.ndarray) -> np.ndarray: + """Element-wise negation.""" + return (-np.asarray(a)).astype(np.float32) + + +def prim_arr_exp(a: np.ndarray) -> np.ndarray: + """Element-wise exponential.""" + return np.exp(np.asarray(a)).astype(np.float32) + + +def prim_arr_atan2(y: np.ndarray, x: np.ndarray) -> np.ndarray: + """Element-wise atan2(y, x).""" + return np.arctan2(np.asarray(y), np.asarray(x)).astype(np.float32) + + +def prim_arr_min(a: np.ndarray, b) -> np.ndarray: + """Element-wise minimum.""" + return np.minimum(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_max(a: np.ndarray, b) -> np.ndarray: + """Element-wise maximum.""" + return np.maximum(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_clip(a: np.ndarray, lo, hi) -> np.ndarray: + """Element-wise clip to range.""" + return np.clip(np.asarray(a), lo, hi).astype(np.float32) + + +def prim_arr_where(cond: np.ndarray, a, b) -> np.ndarray: + """Element-wise conditional: where cond is true, use a, else b.""" + return np.where(np.asarray(cond), np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_floor(a: np.ndarray) -> np.ndarray: + """Element-wise floor.""" + return np.floor(np.asarray(a)).astype(np.float32) + + +def prim_arr_lerp(a: np.ndarray, b: np.ndarray, t) -> np.ndarray: + """Element-wise linear interpolation.""" + a, b = np.asarray(a), np.asarray(b) + return (a + (b - a) * t).astype(np.float32) + + +# ============================================================================= +# Coordinate Transformation Primitives +# ============================================================================= + +def prim_polar_from_center(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create polar coordinates (r, theta) from image center. + + Usage: + (polar-from-center img) ; center of image + (polar-from-center img cx cy) ; custom center + (polar-from-center w h cx cy) ; explicit dimensions + + Returns: (r, theta) tuple of arrays + """ + if isinstance(img_or_w, np.ndarray): + h, w = img_or_w.shape[:2] + if h_or_cx is None: + cx, cy = w / 2, h / 2 + else: + cx, cy = h_or_cx, cx if cx is not None else h / 2 + else: + w = int(img_or_w) + h = int(h_or_cx) + cx = cx if cx is not None else w / 2 + cy = cy if cy is not None else h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + r = np.sqrt(dx**2 + dy**2) + theta = np.arctan2(dy, dx) + + return (r, theta) + + +def prim_cart_from_polar(r: np.ndarray, theta: np.ndarray, cx: float, cy: float) -> Tuple[np.ndarray, np.ndarray]: + """ + Convert polar coordinates back to Cartesian. + + Args: + r: radius array + theta: angle array + cx, cy: center point + + Returns: (x, y) tuple of coordinate arrays + """ + x = (cx + r * np.cos(theta)).astype(np.float32) + y = (cy + r * np.sin(theta)).astype(np.float32) + return (x, y) + + +def prim_normalize_coords(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create normalized coordinates (-1 to 1) from center. + + Returns: (x_norm, y_norm) tuple of arrays where center is (0,0) + """ + if isinstance(img_or_w, np.ndarray): + h, w = img_or_w.shape[:2] + if h_or_cx is None: + cx, cy = w / 2, h / 2 + else: + cx, cy = h_or_cx, cx if cx is not None else h / 2 + else: + w = int(img_or_w) + h = int(h_or_cx) + cx = cx if cx is not None else w / 2 + cy = cy if cy is not None else h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + + return (x_norm, y_norm) + + +def prim_coords_x(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray: + """Get x/first component from coordinate tuple.""" + return coords[0] + + +def prim_coords_y(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray: + """Get y/second component from coordinate tuple.""" + return coords[1] + + +def prim_make_coords_centered(w: int, h: int, cx: float = None, cy: float = None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create coordinate grids centered at (cx, cy). + Like make-coords but returns coordinates relative to center. + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + return (x_coords - cx, y_coords - cy) + + +# ============================================================================= +# Specialized Distortion Primitives +# ============================================================================= + +def prim_wave_displace(w: int, h: int, axis: str, freq: float, amp: float, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create wave displacement maps. + + Args: + w, h: dimensions + axis: "x" (horizontal waves) or "y" (vertical waves) + freq: wave frequency (waves per image width/height) + amp: wave amplitude in pixels + phase: phase offset in radians + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + + if axis == "x" or axis == "horizontal": + # Horizontal waves: displace x based on y + wave = np.sin(2 * np.pi * freq * map_y / h + phase) * amp + map_x = map_x + wave + elif axis == "y" or axis == "vertical": + # Vertical waves: displace y based on x + wave = np.sin(2 * np.pi * freq * map_x / w + phase) * amp + map_y = map_y + wave + elif axis == "both": + wave_x = np.sin(2 * np.pi * freq * map_y / h + phase) * amp + wave_y = np.sin(2 * np.pi * freq * map_x / w + phase) * amp + map_x = map_x + wave_x + map_y = map_y + wave_y + + return (map_x, map_y) + + +def prim_swirl_displace(w: int, h: int, strength: float, radius: float = 0.5, + cx: float = None, cy: float = None, falloff: str = "quadratic") -> Tuple[np.ndarray, np.ndarray]: + """ + Create swirl displacement maps. + + Args: + w, h: dimensions + strength: swirl strength in radians + radius: effect radius as fraction of max dimension + cx, cy: center (defaults to image center) + falloff: "linear", "quadratic", or "gaussian" + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + radius_px = max(w, h) * radius + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + angle = np.arctan2(dy, dx) + + # Normalized distance for falloff + norm_dist = dist / radius_px + + # Calculate falloff factor + if falloff == "linear": + factor = np.maximum(0, 1 - norm_dist) + elif falloff == "gaussian": + factor = np.exp(-norm_dist**2 * 2) + else: # quadratic + factor = np.maximum(0, 1 - norm_dist**2) + + # Apply swirl rotation + new_angle = angle + strength * factor + + # Calculate new coordinates + map_x = (cx + dist * np.cos(new_angle)).astype(np.float32) + map_y = (cy + dist * np.sin(new_angle)).astype(np.float32) + + return (map_x, map_y) + + +def prim_fisheye_displace(w: int, h: int, strength: float, cx: float = None, cy: float = None, + zoom_correct: bool = True) -> Tuple[np.ndarray, np.ndarray]: + """ + Create fisheye/barrel distortion displacement maps. + + Args: + w, h: dimensions + strength: distortion strength (-1 to 1, positive=bulge, negative=pinch) + cx, cy: center (defaults to image center) + zoom_correct: auto-zoom to hide black edges + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize coordinates + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + r = np.sqrt(x_norm**2 + y_norm**2) + + # Apply barrel/pincushion distortion + if strength > 0: + r_distorted = r * (1 + strength * r**2) + else: + r_distorted = r / (1 - strength * r**2 + 0.001) + + # Calculate scale factor + with np.errstate(divide='ignore', invalid='ignore'): + scale = np.where(r > 0, r_distorted / r, 1) + + # Apply zoom correction + if zoom_correct and strength > 0: + zoom = 1 + strength * 0.5 + scale = scale / zoom + + # Calculate new coordinates + map_x = (x_norm * scale * (w / 2) + cx).astype(np.float32) + map_y = (y_norm * scale * (h / 2) + cy).astype(np.float32) + + return (map_x, map_y) + + +def prim_kaleidoscope_displace(w: int, h: int, segments: int, rotation: float = 0, + cx: float = None, cy: float = None, zoom: float = 1.0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create kaleidoscope displacement maps. + + Args: + w, h: dimensions + segments: number of symmetry segments (3-16) + rotation: rotation angle in degrees + cx, cy: center (defaults to image center) + zoom: zoom factor + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + segments = max(3, min(int(segments), 16)) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + segment_angle = 2 * np.pi / segments + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Translate to center + x_centered = x_coords - cx + y_centered = y_coords - cy + + # Convert to polar + r = np.sqrt(x_centered**2 + y_centered**2) + theta = np.arctan2(y_centered, x_centered) + + # Apply rotation + theta = theta - np.deg2rad(rotation) + + # Fold angle into first segment and mirror + theta_normalized = theta % (2 * np.pi) + segment_idx = (theta_normalized / segment_angle).astype(int) + theta_in_segment = theta_normalized - segment_idx * segment_angle + + # Mirror alternating segments + mirror_mask = (segment_idx % 2) == 1 + theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment) + + # Apply zoom + r = r / zoom + + # Convert back to Cartesian + map_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32) + map_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32) + + return (map_x, map_y) + + +# ============================================================================= +# Character/ASCII Art Primitives +# ============================================================================= + +# Character sets ordered by visual density (light to dark) +CHAR_ALPHABETS = { + "standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$", + "blocks": " ░▒▓█", + "simple": " .-:=+*#%@", + "digits": " 0123456789", +} + +# Global atlas cache: keyed on (frozenset(chars), cell_size) -> +# (atlas_array, char_to_idx) where atlas_array is (N, cell_size, cell_size) uint8. +_char_atlas_cache = {} +_CHAR_ATLAS_CACHE_MAX = 32 + + +def _get_char_atlas(alphabet: str, cell_size: int) -> dict: + """Get or create character atlas for alphabet (legacy dict version).""" + atlas_arr, char_to_idx = _get_render_atlas(alphabet, cell_size) + # Build legacy dict from array + idx_to_char = {v: k for k, v in char_to_idx.items()} + return {idx_to_char[i]: atlas_arr[i] for i in range(len(atlas_arr))} + + +def _get_render_atlas(unique_chars_or_alphabet, cell_size: int): + """Get or build a stacked numpy atlas for vectorised rendering. + + Args: + unique_chars_or_alphabet: Either an alphabet name (str looked up in + CHAR_ALPHABETS), a literal character string, or a set/frozenset + of characters. + cell_size: Pixel size of each cell. + + Returns: + (atlas_array, char_to_idx) where + atlas_array: (num_chars, cell_size, cell_size) uint8 masks + char_to_idx: dict mapping character -> index in atlas_array + """ + if isinstance(unique_chars_or_alphabet, (set, frozenset)): + chars_tuple = tuple(sorted(unique_chars_or_alphabet)) + else: + resolved = CHAR_ALPHABETS.get(unique_chars_or_alphabet, unique_chars_or_alphabet) + chars_tuple = tuple(resolved) + + cache_key = (chars_tuple, cell_size) + cached = _char_atlas_cache.get(cache_key) + if cached is not None: + return cached + + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + n = len(chars_tuple) + atlas = np.zeros((n, cell_size, cell_size), dtype=np.uint8) + char_to_idx = {} + + for i, char in enumerate(chars_tuple): + char_to_idx[char] = i + if char and char != ' ': + try: + (text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness) + text_x = max(0, (cell_size - text_w) // 2) + text_y = (cell_size + text_h) // 2 + cv2.putText(atlas[i], char, (text_x, text_y), + font, font_scale, 255, thickness, cv2.LINE_AA) + except Exception: + pass + + # Evict oldest entry if cache is full + if len(_char_atlas_cache) >= _CHAR_ATLAS_CACHE_MAX: + _char_atlas_cache.pop(next(iter(_char_atlas_cache))) + + _char_atlas_cache[cache_key] = (atlas, char_to_idx) + return atlas, char_to_idx + + +def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Sample image into cell grid, returning average colors and luminances. + + Uses cv2.resize with INTER_AREA (pixel-area averaging) which is + ~25x faster than numpy reshape+mean for block downsampling. + + Args: + img: source image + cell_size: size of each cell in pixels + + Returns: (colors, luminances) tuple + - colors: (rows, cols, 3) array of average RGB per cell + - luminances: (rows, cols) array of average brightness 0-255 + """ + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + if rows < 1 or cols < 1: + return (np.zeros((1, 1, 3), dtype=np.uint8), + np.zeros((1, 1), dtype=np.float32)) + + # Crop to exact grid then block-average via cv2 area interpolation. + grid_h, grid_w = rows * cell_size, cols * cell_size + cropped = img[:grid_h, :grid_w] + colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA) + + # Compute luminance + luminances = (0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]).astype(np.float32) + + return (colors, luminances) + + +def cell_sample_extended(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray, List[List[ZoneContext]]]: + """ + Sample image into cell grid, returning colors, luminances, and full zone contexts. + + Args: + img: source image (RGB) + cell_size: size of each cell in pixels + + Returns: (colors, luminances, zone_contexts) tuple + - colors: (rows, cols, 3) array of average RGB per cell + - luminances: (rows, cols) array of average brightness 0-255 + - zone_contexts: 2D list of ZoneContext objects with full cell data + """ + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + if rows < 1 or cols < 1: + return (np.zeros((1, 1, 3), dtype=np.uint8), + np.zeros((1, 1), dtype=np.float32), + [[ZoneContext(0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)]]) + + # Crop to grid + grid_h, grid_w = rows * cell_size, cols * cell_size + cropped = img[:grid_h, :grid_w] + + # Reshape and average + reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3) + colors = reshaped.mean(axis=(1, 3)).astype(np.uint8) + + # Compute luminance (0-255) + luminances = (0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]).astype(np.float32) + + # Normalize colors to 0-1 for HSV/saturation calculations + colors_float = colors.astype(np.float32) / 255.0 + + # Compute HSV values for each cell + max_c = colors_float.max(axis=2) + min_c = colors_float.min(axis=2) + diff = max_c - min_c + + # Saturation + saturation = np.where(max_c > 0, diff / max_c, 0) + + # Hue (0-360) + hue = np.zeros((rows, cols), dtype=np.float32) + # Avoid division by zero + mask = diff > 0 + r, g, b = colors_float[:, :, 0], colors_float[:, :, 1], colors_float[:, :, 2] + + # Red is max + red_max = mask & (max_c == r) + hue[red_max] = 60 * (((g[red_max] - b[red_max]) / diff[red_max]) % 6) + + # Green is max + green_max = mask & (max_c == g) + hue[green_max] = 60 * ((b[green_max] - r[green_max]) / diff[green_max] + 2) + + # Blue is max + blue_max = mask & (max_c == b) + hue[blue_max] = 60 * ((r[blue_max] - g[blue_max]) / diff[blue_max] + 4) + + # Ensure hue is in 0-360 range + hue = hue % 360 + + # Build zone contexts + zone_contexts = [] + for row in range(rows): + row_contexts = [] + for col in range(cols): + ctx = ZoneContext( + row=row, + col=col, + row_norm=row / max(1, rows - 1) if rows > 1 else 0.5, + col_norm=col / max(1, cols - 1) if cols > 1 else 0.5, + luminance=luminances[row, col] / 255.0, # Normalize to 0-1 + saturation=float(saturation[row, col]), + hue=float(hue[row, col]), + r=float(colors_float[row, col, 0]), + g=float(colors_float[row, col, 1]), + b=float(colors_float[row, col, 2]), + ) + row_contexts.append(ctx) + zone_contexts.append(row_contexts) + + return (colors, luminances, zone_contexts) + + +def prim_luminance_to_chars(luminances: np.ndarray, alphabet: str, contrast: float = 1.0) -> List[List[str]]: + """ + Map luminance values to characters from alphabet. + + Args: + luminances: (rows, cols) array of brightness values 0-255 + alphabet: character set name or literal string (light to dark) + contrast: contrast boost factor + + Returns: 2D list of single-character strings + """ + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + num_chars = len(chars) + + # Apply contrast + lum = luminances.astype(np.float32) + if contrast != 1.0: + lum = (lum - 128) * contrast + 128 + lum = np.clip(lum, 0, 255) + + # Map to indices + indices = ((lum / 255) * (num_chars - 1)).astype(np.int32) + indices = np.clip(indices, 0, num_chars - 1) + + # Vectorised conversion via numpy char array lookup + chars_arr = np.array(list(chars)) + char_grid = chars_arr[indices.ravel()].reshape(indices.shape) + + return char_grid.tolist() + + +def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray, + cell_size: int, color_mode: str = "color", + background_color: str = "black", + invert_colors: bool = False) -> np.ndarray: + """ + Render a grid of characters onto an image. + + Uses vectorised numpy operations instead of per-cell Python loops: + the character atlas is looked up via fancy indexing and the full + mask + colour image are assembled in bulk. + + Args: + img: source image (for dimensions) + chars: 2D list of single characters + colors: (rows, cols, 3) array of colors per cell + cell_size: size of each cell + color_mode: "color" (original colors), "mono" (white), "invert", + or any color name/hex value ("green", "lime", "#00ff00") + background_color: background color name/hex ("black", "navy", "#001100") + invert_colors: if True, swap foreground and background colors + + Returns: rendered image + """ + # Parse color_mode - may be a named color or hex value + fg_color = parse_color(color_mode) + + # Parse background_color + if isinstance(background_color, (list, tuple)): + bg_color = tuple(int(c) for c in background_color[:3]) + else: + bg_color = parse_color(background_color) + if bg_color is None: + bg_color = (0, 0, 0) + + # Handle invert_colors - swap fg and bg + if invert_colors and fg_color is not None: + fg_color, bg_color = bg_color, fg_color + + cell_size = max(1, int(cell_size)) + + if not chars or not chars[0]: + return img.copy() + + rows = len(chars) + cols = len(chars[0]) + h, w = rows * cell_size, cols * cell_size + + bg = list(bg_color) + + # --- Build atlas & index grid --- + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + atlas, char_to_idx = _get_render_atlas(unique_chars, cell_size) + + # Convert 2D char list to index array using ordinal lookup table + # (avoids per-cell Python dict lookup). + space_idx = char_to_idx.get(' ', 0) + max_ord = max(ord(ch) for ch in char_to_idx) + 1 + ord_lookup = np.full(max_ord, space_idx, dtype=np.int32) + for ch, idx in char_to_idx.items(): + if ch: + ord_lookup[ord(ch)] = idx + + flat = [ch for row in chars for ch in row] + ords = np.frombuffer(np.array(flat, dtype='U1'), dtype=np.uint32) + char_indices = ord_lookup[ords].reshape(rows, cols) + + # --- Vectorised mask assembly --- + # atlas[char_indices] -> (rows, cols, cell_size, cell_size) + # Transpose to (rows, cell_size, cols, cell_size) then reshape to full image. + all_masks = atlas[char_indices] + full_mask = all_masks.transpose(0, 2, 1, 3).reshape(h, w) + + # Expand per-cell colours to per-pixel (only when needed). + need_color_full = (color_mode in ("color", "invert") + or (fg_color is None and color_mode != "mono")) + + if need_color_full: + color_full = np.repeat( + np.repeat(colors[:rows, :cols], cell_size, axis=0), + cell_size, axis=1) + + # --- Vectorised colour composite --- + # Use element-wise multiply/np.where instead of boolean-indexed scatter + # for much better memory access patterns. + mask_u8 = (full_mask > 0).astype(np.uint8)[:, :, np.newaxis] + + if color_mode == "invert": + # Background is source colour; characters are black. + # result = color_full * (1 - mask) + result = color_full * (1 - mask_u8) + elif fg_color is not None: + # Fixed foreground colour on background. + fg = np.array(fg_color, dtype=np.uint8) + bg_arr = np.array(bg, dtype=np.uint8) + result = np.where(mask_u8, fg, bg_arr).astype(np.uint8) + elif color_mode == "mono": + bg_arr = np.array(bg, dtype=np.uint8) + result = np.where(mask_u8, np.uint8(255), bg_arr).astype(np.uint8) + else: + # "color" mode – each cell uses its source colour on bg. + if bg == [0, 0, 0]: + result = color_full * mask_u8 + else: + bg_arr = np.array(bg, dtype=np.uint8) + result = np.where(mask_u8, color_full, bg_arr).astype(np.uint8) + + # Resize to match original if needed + orig_h, orig_w = img.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(h, orig_h) + copy_w = min(w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_render_char_grid_fx(img: np.ndarray, chars: List[List[str]], colors: np.ndarray, + luminances: np.ndarray, cell_size: int, + color_mode: str = "color", + background_color: str = "black", + invert_colors: bool = False, + char_jitter: float = 0.0, + char_scale: float = 1.0, + char_rotation: float = 0.0, + char_hue_shift: float = 0.0, + jitter_source: str = "none", + scale_source: str = "none", + rotation_source: str = "none", + hue_source: str = "none") -> np.ndarray: + """ + Render a grid of characters with per-character effects. + + Args: + img: source image (for dimensions) + chars: 2D list of single characters + colors: (rows, cols, 3) array of colors per cell + luminances: (rows, cols) array of luminance values (0-255) + cell_size: size of each cell + color_mode: "color", "mono", "invert", or any color name/hex + background_color: background color name/hex + invert_colors: if True, swap foreground and background colors + char_jitter: base jitter amount in pixels + char_scale: base scale factor (1.0 = normal) + char_rotation: base rotation in degrees + char_hue_shift: base hue shift in degrees (0-360) + jitter_source: source for jitter modulation ("none", "luminance", "position", "random") + scale_source: source for scale modulation + rotation_source: source for rotation modulation + hue_source: source for hue modulation + + Per-character effect sources: + "none" - use base value only + "luminance" - modulate by cell luminance (0-1) + "inv_luminance" - modulate by inverse luminance (dark = high) + "saturation" - modulate by cell color saturation + "position_x" - modulate by horizontal position (0-1) + "position_y" - modulate by vertical position (0-1) + "position_diag" - modulate by diagonal position + "random" - random per-cell value (deterministic from position) + "center_dist" - distance from center (0=center, 1=corner) + + Returns: rendered image + """ + # Parse colors + fg_color = parse_color(color_mode) + + if isinstance(background_color, (list, tuple)): + bg_color = tuple(int(c) for c in background_color[:3]) + else: + bg_color = parse_color(background_color) + if bg_color is None: + bg_color = (0, 0, 0) + + if invert_colors and fg_color is not None: + fg_color, bg_color = bg_color, fg_color + + cell_size = max(1, int(cell_size)) + + if not chars or not chars[0]: + return img.copy() + + rows = len(chars) + cols = len(chars[0]) + h, w = rows * cell_size, cols * cell_size + + bg = list(bg_color) + result = np.full((h, w, 3), bg, dtype=np.uint8) + + # Normalize luminances to 0-1 + lum_normalized = luminances.astype(np.float32) / 255.0 + + # Compute saturation from colors + colors_float = colors.astype(np.float32) / 255.0 + max_c = colors_float.max(axis=2) + min_c = colors_float.min(axis=2) + saturation = np.where(max_c > 0, (max_c - min_c) / max_c, 0) + + # Helper to get modulation value for a cell + def get_mod_value(source: str, r: int, c: int) -> float: + if source == "none": + return 1.0 + elif source == "luminance": + return lum_normalized[r, c] + elif source == "inv_luminance": + return 1.0 - lum_normalized[r, c] + elif source == "saturation": + return saturation[r, c] + elif source == "position_x": + return c / max(1, cols - 1) if cols > 1 else 0.5 + elif source == "position_y": + return r / max(1, rows - 1) if rows > 1 else 0.5 + elif source == "position_diag": + px = c / max(1, cols - 1) if cols > 1 else 0.5 + py = r / max(1, rows - 1) if rows > 1 else 0.5 + return (px + py) / 2.0 + elif source == "random": + # Deterministic random based on position + seed = (r * 1000 + c) % 10000 + return ((seed * 9301 + 49297) % 233280) / 233280.0 + elif source == "center_dist": + cx, cy = (cols - 1) / 2.0, (rows - 1) / 2.0 + dx = (c - cx) / max(1, cx) if cx > 0 else 0 + dy = (r - cy) / max(1, cy) if cy > 0 else 0 + return min(1.0, math.sqrt(dx*dx + dy*dy)) + else: + return 1.0 + + # Build character atlas at base size + font = cv2.FONT_HERSHEY_SIMPLEX + base_font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + # For rotation/scale, we need to render characters larger then transform + max_scale = max(1.0, char_scale * 1.5) # Allow headroom for scaling + atlas_size = int(cell_size * max_scale * 1.5) + + atlas = {} + for char in unique_chars: + if char and char != ' ': + try: + char_img = np.zeros((atlas_size, atlas_size), dtype=np.uint8) + scaled_font = base_font_scale * max_scale + (text_w, text_h), _ = cv2.getTextSize(char, font, scaled_font, thickness) + text_x = max(0, (atlas_size - text_w) // 2) + text_y = (atlas_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, scaled_font, 255, thickness, cv2.LINE_AA) + atlas[char] = char_img + except: + atlas[char] = None + else: + atlas[char] = None + + # Render characters with effects + for r in range(rows): + for c in range(cols): + char = chars[r][c] + if not char or char == ' ': + continue + + char_img = atlas.get(char) + if char_img is None: + continue + + # Get per-cell modulation values + jitter_mod = get_mod_value(jitter_source, r, c) + scale_mod = get_mod_value(scale_source, r, c) + rot_mod = get_mod_value(rotation_source, r, c) + hue_mod = get_mod_value(hue_source, r, c) + + # Compute effective values + eff_jitter = char_jitter * jitter_mod + eff_scale = char_scale * (0.5 + 0.5 * scale_mod) if scale_source != "none" else char_scale + eff_rotation = char_rotation * (rot_mod * 2 - 1) # -1 to 1 range + eff_hue_shift = char_hue_shift * hue_mod + + # Apply transformations + transformed = char_img.copy() + + # Rotation + if abs(eff_rotation) > 0.5: + center = (atlas_size // 2, atlas_size // 2) + rot_matrix = cv2.getRotationMatrix2D(center, eff_rotation, 1.0) + transformed = cv2.warpAffine(transformed, rot_matrix, (atlas_size, atlas_size)) + + # Scale - resize to target size + target_size = max(1, int(cell_size * eff_scale)) + if target_size != atlas_size: + transformed = cv2.resize(transformed, (target_size, target_size), interpolation=cv2.INTER_LINEAR) + + # Compute position with jitter + base_y = r * cell_size + base_x = c * cell_size + + if eff_jitter > 0: + # Deterministic jitter based on position + jx = ((r * 7 + c * 13) % 100) / 100.0 - 0.5 + jy = ((r * 11 + c * 17) % 100) / 100.0 - 0.5 + base_x += int(jx * eff_jitter * 2) + base_y += int(jy * eff_jitter * 2) + + # Center the character in the cell + offset = (target_size - cell_size) // 2 + y1 = base_y - offset + x1 = base_x - offset + + # Determine color + if fg_color is not None: + color = np.array(fg_color, dtype=np.uint8) + elif color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + # Fill cell with source color first + cy1 = max(0, r * cell_size) + cy2 = min(h, (r + 1) * cell_size) + cx1 = max(0, c * cell_size) + cx2 = min(w, (c + 1) * cell_size) + result[cy1:cy2, cx1:cx2] = colors[r, c] + color = np.array([0, 0, 0], dtype=np.uint8) + else: # color mode + color = colors[r, c].copy() + + # Apply hue shift + if abs(eff_hue_shift) > 0.5 and color_mode not in ("mono", "invert") and fg_color is None: + # Convert to HSV, shift hue, convert back + color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV) + # Cast to int to avoid uint8 overflow, then back to uint8 + new_hue = (int(color_hsv[0, 0, 0]) + int(eff_hue_shift * 180 / 360)) % 180 + color_hsv[0, 0, 0] = np.uint8(new_hue) + color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten() + + # Blit character to result + mask = transformed > 0 + th, tw = transformed.shape[:2] + + for dy in range(th): + for dx in range(tw): + py = y1 + dy + px = x1 + dx + if 0 <= py < h and 0 <= px < w and mask[dy, dx]: + result[py, px] = color + + # Resize to match original if needed + orig_h, orig_w = img.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(h, orig_h) + copy_w = min(w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def _render_with_cell_effect( + frame: np.ndarray, + chars: List[List[str]], + colors: np.ndarray, + luminances: np.ndarray, + zone_contexts: List[List['ZoneContext']], + cell_size: int, + bg_color: tuple, + fg_color: tuple, + color_mode: str, + cell_effect, # Lambda or callable: (cell_image, zone_dict) -> cell_image + extra_params: dict, + interp, + env, + result: np.ndarray, +) -> np.ndarray: + """ + Render ASCII art using a cell_effect lambda for arbitrary per-cell transforms. + + Each character is rendered to a cell image, the cell_effect is called with + (cell_image, zone_dict), and the returned cell is composited into result. + + This allows arbitrary effects (rotate, blur, etc.) to be applied per-character. + """ + grid_rows = len(chars) + grid_cols = len(chars[0]) if chars else 0 + out_h, out_w = result.shape[:2] + + # Build character atlas (cell-sized colored characters on transparent bg) + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + # Helper to render a single character cell + def render_char_cell(char: str, color: np.ndarray) -> np.ndarray: + """Render a character onto a cell-sized RGB image.""" + cell = np.full((cell_size, cell_size, 3), bg_color, dtype=np.uint8) + if not char or char == ' ': + return cell + + try: + (text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness) + text_x = max(0, (cell_size - text_w) // 2) + text_y = (cell_size + text_h) // 2 + + # Render character in white on mask, then apply color + mask = np.zeros((cell_size, cell_size), dtype=np.uint8) + cv2.putText(mask, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA) + + # Apply color where mask is set + for ch in range(3): + cell[:, :, ch] = np.where(mask > 0, color[ch], bg_color[ch]) + except: + pass + + return cell + + # Helper to evaluate cell_effect (handles artdag Lambda objects) + def eval_cell_effect(cell_img: np.ndarray, zone_dict: dict) -> np.ndarray: + """Call cell_effect with (cell_image, zone_dict), handle Lambda objects.""" + if callable(cell_effect): + return cell_effect(cell_img, zone_dict) + + # Check if it's an artdag Lambda object + try: + from artdag.sexp.parser import Lambda as ArtdagLambda + from artdag.sexp.evaluator import evaluate as artdag_evaluate + if isinstance(cell_effect, ArtdagLambda): + # Build env with closure values + eval_env = dict(cell_effect.closure) if cell_effect.closure else {} + # Bind lambda parameters + if len(cell_effect.params) >= 2: + eval_env[cell_effect.params[0]] = cell_img + eval_env[cell_effect.params[1]] = zone_dict + elif len(cell_effect.params) == 1: + # Single param gets zone_dict with cell as 'cell' key + zone_dict['cell'] = cell_img + eval_env[cell_effect.params[0]] = zone_dict + + # Add primitives to eval env + eval_env.update(PRIMITIVES) + + # Add effect runner - allows calling any loaded sexp effect on a cell + # Usage: (apply-effect "effect_name" cell {"param" value ...}) + # Or: (apply-effect "effect_name" cell) for defaults + def apply_effect_fn(effect_name, frame, params=None): + """Run a loaded sexp effect on a frame (cell).""" + if interp and hasattr(interp, 'run_effect'): + if params is None: + params = {} + result, _ = interp.run_effect(effect_name, frame, params, {}) + return result + return frame + eval_env['apply-effect'] = apply_effect_fn + + # Also inject loaded effects directly as callable functions + # These wrappers take positional args in common order for each effect + # Usage: (blur cell 5) or (rotate cell 45) etc. + if interp and hasattr(interp, 'effects'): + for effect_name in interp.effects: + # Create a wrapper that calls run_effect with positional-to-named mapping + def make_effect_fn(name): + def effect_fn(frame, *args): + # Map common positional args to named params + params = {} + if name == 'blur' and len(args) >= 1: + params['radius'] = args[0] + elif name == 'rotate' and len(args) >= 1: + params['angle'] = args[0] + elif name == 'brightness' and len(args) >= 1: + params['factor'] = args[0] + elif name == 'contrast' and len(args) >= 1: + params['factor'] = args[0] + elif name == 'saturation' and len(args) >= 1: + params['factor'] = args[0] + elif name == 'hue_shift' and len(args) >= 1: + params['degrees'] = args[0] + elif name == 'rgb_split' and len(args) >= 1: + params['offset_x'] = args[0] + if len(args) >= 2: + params['offset_y'] = args[1] + elif name == 'pixelate' and len(args) >= 1: + params['block_size'] = args[0] + elif name == 'wave' and len(args) >= 1: + params['amplitude'] = args[0] + if len(args) >= 2: + params['frequency'] = args[1] + elif name == 'noise' and len(args) >= 1: + params['amount'] = args[0] + elif name == 'posterize' and len(args) >= 1: + params['levels'] = args[0] + elif name == 'threshold' and len(args) >= 1: + params['level'] = args[0] + elif name == 'sharpen' and len(args) >= 1: + params['amount'] = args[0] + elif len(args) == 1 and isinstance(args[0], dict): + # Accept dict as single arg + params = args[0] + result, _ = interp.run_effect(name, frame, params, {}) + return result + return effect_fn + eval_env[effect_name] = make_effect_fn(effect_name) + + result = artdag_evaluate(cell_effect.body, eval_env) + if isinstance(result, np.ndarray): + return result + return cell_img + except ImportError: + pass + + # Fallback: return cell unchanged + return cell_img + + # Render each cell + for r in range(grid_rows): + for c in range(grid_cols): + char = chars[r][c] + zone = zone_contexts[r][c] + + # Determine character color + if fg_color is not None: + color = np.array(fg_color, dtype=np.uint8) + elif color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + color = np.array([0, 0, 0], dtype=np.uint8) + else: + color = colors[r, c].copy() + + # Render character to cell image + cell_img = render_char_cell(char, color) + + # Build zone dict + zone_dict = { + 'row': zone.row, + 'col': zone.col, + 'row-norm': zone.row_norm, + 'col-norm': zone.col_norm, + 'lum': zone.luminance, + 'sat': zone.saturation, + 'hue': zone.hue, + 'r': zone.r, + 'g': zone.g, + 'b': zone.b, + 'char': char, + 'color': color.tolist(), + 'cell_size': cell_size, + } + # Add extra params (energy, rotation_scale, etc.) + if extra_params: + zone_dict.update(extra_params) + + # Call cell_effect + modified_cell = eval_cell_effect(cell_img, zone_dict) + + # Ensure result is valid + if modified_cell is None or not isinstance(modified_cell, np.ndarray): + modified_cell = cell_img + if modified_cell.shape[:2] != (cell_size, cell_size): + # Resize if cell size changed + modified_cell = cv2.resize(modified_cell, (cell_size, cell_size)) + if len(modified_cell.shape) == 2: + # Convert grayscale to RGB + modified_cell = cv2.cvtColor(modified_cell, cv2.COLOR_GRAY2RGB) + + # Composite into result + y1 = r * cell_size + x1 = c * cell_size + y2 = min(y1 + cell_size, out_h) + x2 = min(x1 + cell_size, out_w) + ch = y2 - y1 + cw = x2 - x1 + result[y1:y2, x1:x2] = modified_cell[:ch, :cw] + + # Resize to match original frame if needed + orig_h, orig_w = frame.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + bg = list(bg_color) + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(out_h, orig_h) + copy_w = min(out_w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_ascii_fx_zone( + frame: np.ndarray, + cols: int, + char_size_override: int, # If set, overrides cols-based calculation + alphabet: str, + color_mode: str, + background: str, + contrast: float, + char_hue_expr, # Expression, literal, or None + char_sat_expr, # Expression, literal, or None + char_bright_expr, # Expression, literal, or None + char_scale_expr, # Expression, literal, or None + char_rotation_expr, # Expression, literal, or None + char_jitter_expr, # Expression, literal, or None + interp, # Interpreter for expression evaluation + env, # Environment with bound values + extra_params=None, # Extra params to include in zone dict for lambdas + cell_effect=None, # Lambda (cell_image, zone_dict) -> cell_image for arbitrary cell effects +) -> np.ndarray: + """ + Render ASCII art with per-zone expression-driven transforms. + + Args: + frame: Source image (H, W, 3) RGB uint8 + cols: Number of character columns + char_size_override: If set, use this cell size instead of cols-based + alphabet: Character set name or literal string + color_mode: "color", "mono", "invert", or color name/hex + background: Background color name or hex + contrast: Contrast boost for character selection + char_hue_expr: Expression for hue shift (evaluated per zone) + char_sat_expr: Expression for saturation adjustment (evaluated per zone) + char_bright_expr: Expression for brightness adjustment (evaluated per zone) + char_scale_expr: Expression for scale factor (evaluated per zone) + char_rotation_expr: Expression for rotation degrees (evaluated per zone) + char_jitter_expr: Expression for position jitter (evaluated per zone) + interp: Interpreter instance for expression evaluation + env: Environment with bound variables + cell_effect: Optional lambda that receives (cell_image, zone_dict) and returns + a modified cell_image. When provided, each character is rendered + to a cell image, passed to this lambda, and the result composited. + This allows arbitrary effects to be applied per-character. + + Zone variables available in expressions: + zone-row, zone-col: Grid position (integers) + zone-row-norm, zone-col-norm: Normalized position (0-1) + zone-lum: Cell luminance (0-1) + zone-sat: Cell saturation (0-1) + zone-hue: Cell hue (0-360) + zone-r, zone-g, zone-b: RGB components (0-1) + + Returns: Rendered image + """ + h, w = frame.shape[:2] + # Use char_size if provided, otherwise calculate from cols + if char_size_override is not None: + cell_size = max(4, int(char_size_override)) + else: + cell_size = max(4, w // cols) + + # Get zone data using extended sampling + colors, luminances, zone_contexts = cell_sample_extended(frame, cell_size) + + # Convert luminances to characters + chars = prim_luminance_to_chars(luminances, alphabet, contrast) + + grid_rows = len(chars) + grid_cols = len(chars[0]) if chars else 0 + + # Parse colors + fg_color = parse_color(color_mode) + if isinstance(background, (list, tuple)): + bg_color = tuple(int(c) for c in background[:3]) + else: + bg_color = parse_color(background) + if bg_color is None: + bg_color = (0, 0, 0) + + # Arrays for per-zone transform values + hue_shifts = np.zeros((grid_rows, grid_cols), dtype=np.float32) + saturations = np.ones((grid_rows, grid_cols), dtype=np.float32) + brightness = np.ones((grid_rows, grid_cols), dtype=np.float32) + scales = np.ones((grid_rows, grid_cols), dtype=np.float32) + rotations = np.zeros((grid_rows, grid_cols), dtype=np.float32) + jitters = np.zeros((grid_rows, grid_cols), dtype=np.float32) + + # Helper to evaluate expression or return literal value + def eval_expr(expr, zone, char): + if expr is None: + return None + if isinstance(expr, (int, float)): + return expr + + # Build zone dict for lambda calls + zone_dict = { + 'row': zone.row, + 'col': zone.col, + 'row-norm': zone.row_norm, + 'col-norm': zone.col_norm, + 'lum': zone.luminance, + 'sat': zone.saturation, + 'hue': zone.hue, + 'r': zone.r, + 'g': zone.g, + 'b': zone.b, + 'char': char, + } + # Add extra params (energy, rotation_scale, etc.) for lambdas to access + if extra_params: + zone_dict.update(extra_params) + + # Check if it's a Python callable + if callable(expr): + return expr(zone_dict) + + # Check if it's an artdag Lambda object + try: + from artdag.sexp.parser import Lambda as ArtdagLambda + from artdag.sexp.evaluator import evaluate as artdag_evaluate + if isinstance(expr, ArtdagLambda): + # Build env with zone dict and any closure values + eval_env = dict(expr.closure) if expr.closure else {} + # Bind the lambda parameter to zone_dict + if expr.params: + eval_env[expr.params[0]] = zone_dict + return artdag_evaluate(expr.body, eval_env) + except ImportError: + pass + + # It's an expression - evaluate with zone context (sexp_effects style) + return interp.eval_with_zone(expr, env, zone) + + # Evaluate expressions for each zone + for r in range(grid_rows): + for c in range(grid_cols): + zone = zone_contexts[r][c] + char = chars[r][c] + + val = eval_expr(char_hue_expr, zone, char) + if val is not None: + hue_shifts[r, c] = float(val) + + val = eval_expr(char_sat_expr, zone, char) + if val is not None: + saturations[r, c] = float(val) + + val = eval_expr(char_bright_expr, zone, char) + if val is not None: + brightness[r, c] = float(val) + + val = eval_expr(char_scale_expr, zone, char) + if val is not None: + scales[r, c] = float(val) + + val = eval_expr(char_rotation_expr, zone, char) + if val is not None: + rotations[r, c] = float(val) + + val = eval_expr(char_jitter_expr, zone, char) + if val is not None: + jitters[r, c] = float(val) + + # Now render with computed transform arrays + out_h, out_w = grid_rows * cell_size, grid_cols * cell_size + bg = list(bg_color) + result = np.full((out_h, out_w, 3), bg, dtype=np.uint8) + + # If cell_effect is provided, use the cell-mapper rendering path + if cell_effect is not None: + return _render_with_cell_effect( + frame, chars, colors, luminances, zone_contexts, + cell_size, bg_color, fg_color, color_mode, + cell_effect, extra_params, interp, env, result + ) + + # Build character atlas + font = cv2.FONT_HERSHEY_SIMPLEX + base_font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + # For rotation/scale, render characters larger then transform + max_scale = max(1.0, np.max(scales) * 1.5) + atlas_size = int(cell_size * max_scale * 1.5) + + atlas = {} + for char in unique_chars: + if char and char != ' ': + try: + char_img = np.zeros((atlas_size, atlas_size), dtype=np.uint8) + scaled_font = base_font_scale * max_scale + (text_w, text_h), _ = cv2.getTextSize(char, font, scaled_font, thickness) + text_x = max(0, (atlas_size - text_w) // 2) + text_y = (atlas_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, scaled_font, 255, thickness, cv2.LINE_AA) + atlas[char] = char_img + except: + atlas[char] = None + else: + atlas[char] = None + + # Render characters with per-zone effects + for r in range(grid_rows): + for c in range(grid_cols): + char = chars[r][c] + if not char or char == ' ': + continue + + char_img = atlas.get(char) + if char_img is None: + continue + + # Get per-cell values + eff_scale = scales[r, c] + eff_rotation = rotations[r, c] + eff_jitter = jitters[r, c] + eff_hue_shift = hue_shifts[r, c] + eff_brightness = brightness[r, c] + eff_saturation = saturations[r, c] + + # Apply transformations to character + transformed = char_img.copy() + + # Rotation + if abs(eff_rotation) > 0.5: + center = (atlas_size // 2, atlas_size // 2) + rot_matrix = cv2.getRotationMatrix2D(center, eff_rotation, 1.0) + transformed = cv2.warpAffine(transformed, rot_matrix, (atlas_size, atlas_size)) + + # Scale - resize to target size + target_size = max(1, int(cell_size * eff_scale)) + if target_size != atlas_size: + transformed = cv2.resize(transformed, (target_size, target_size), interpolation=cv2.INTER_LINEAR) + + # Compute position with jitter + base_y = r * cell_size + base_x = c * cell_size + + if eff_jitter > 0: + # Deterministic jitter based on position + jx = ((r * 7 + c * 13) % 100) / 100.0 - 0.5 + jy = ((r * 11 + c * 17) % 100) / 100.0 - 0.5 + base_x += int(jx * eff_jitter * 2) + base_y += int(jy * eff_jitter * 2) + + # Center the character in the cell + offset = (target_size - cell_size) // 2 + y1 = base_y - offset + x1 = base_x - offset + + # Determine color + if fg_color is not None: + color = np.array(fg_color, dtype=np.uint8) + elif color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + cy1 = max(0, r * cell_size) + cy2 = min(out_h, (r + 1) * cell_size) + cx1 = max(0, c * cell_size) + cx2 = min(out_w, (c + 1) * cell_size) + result[cy1:cy2, cx1:cx2] = colors[r, c] + color = np.array([0, 0, 0], dtype=np.uint8) + else: # color mode - use source colors + color = colors[r, c].copy() + + # Apply hue shift + if abs(eff_hue_shift) > 0.5 and color_mode not in ("mono", "invert") and fg_color is None: + color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV) + new_hue = (int(color_hsv[0, 0, 0]) + int(eff_hue_shift * 180 / 360)) % 180 + color_hsv[0, 0, 0] = np.uint8(new_hue) + color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten() + + # Apply saturation adjustment + if abs(eff_saturation - 1.0) > 0.01 and color_mode not in ("mono", "invert") and fg_color is None: + color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV) + new_sat = np.clip(int(color_hsv[0, 0, 1] * eff_saturation), 0, 255) + color_hsv[0, 0, 1] = np.uint8(new_sat) + color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten() + + # Apply brightness adjustment + if abs(eff_brightness - 1.0) > 0.01: + color = np.clip(color.astype(np.float32) * eff_brightness, 0, 255).astype(np.uint8) + + # Blit character to result + mask = transformed > 0 + th, tw = transformed.shape[:2] + + for dy in range(th): + for dx in range(tw): + py = y1 + dy + px = x1 + dx + if 0 <= py < out_h and 0 <= px < out_w and mask[dy, dx]: + result[py, px] = color + + # Resize to match original if needed + orig_h, orig_w = frame.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(out_h, orig_h) + copy_w = min(out_w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_make_char_grid(rows: int, cols: int, fill_char: str = " ") -> List[List[str]]: + """Create a character grid filled with a character.""" + return [[fill_char for _ in range(cols)] for _ in range(rows)] + + +def prim_set_char(chars: List[List[str]], row: int, col: int, char: str) -> List[List[str]]: + """Set a character at position (returns modified copy).""" + result = [r[:] for r in chars] # shallow copy rows + if 0 <= row < len(result) and 0 <= col < len(result[0]): + result[row][col] = char + return result + + +def prim_get_char(chars: List[List[str]], row: int, col: int) -> str: + """Get character at position.""" + if 0 <= row < len(chars) and 0 <= col < len(chars[0]): + return chars[row][col] + return " " + + +def prim_char_grid_dimensions(chars: List[List[str]]) -> Tuple[int, int]: + """Get (rows, cols) of character grid.""" + if not chars: + return (0, 0) + return (len(chars), len(chars[0]) if chars[0] else 0) + + +def prim_alphabet_char(alphabet: str, index: int) -> str: + """Get character at index from alphabet (wraps around).""" + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + if not chars: + return " " + return chars[int(index) % len(chars)] + + +def prim_alphabet_length(alphabet: str) -> int: + """Get length of alphabet.""" + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + return len(chars) + + +def prim_map_char_grid(chars: List[List[str]], luminances: np.ndarray, fn: Callable) -> List[List[str]]: + """ + Map a function over character grid. + + fn receives (row, col, char, luminance) and returns new character. + This allows per-cell character selection based on position, brightness, etc. + + Example: + (map-char-grid chars luminances + (lambda (r c ch lum) + (if (> lum 128) + (alphabet-char "blocks" (floor (/ lum 50))) + ch))) + """ + if not chars or not chars[0]: + return chars + + rows = len(chars) + cols = len(chars[0]) + result = [] + + for r in range(rows): + row = [] + for c in range(cols): + ch = chars[r][c] + lum = float(luminances[r, c]) if r < luminances.shape[0] and c < luminances.shape[1] else 0 + new_ch = fn(r, c, ch, lum) + row.append(str(new_ch) if new_ch else " ") + result.append(row) + + return result + + +def prim_map_colors(colors: np.ndarray, fn: Callable) -> np.ndarray: + """ + Map a function over color grid. + + fn receives (row, col, color) and returns new [r, g, b]. + Color is a list [r, g, b]. + """ + if colors.size == 0: + return colors + + rows, cols = colors.shape[:2] + result = colors.copy() + + for r in range(rows): + for c in range(cols): + color = list(colors[r, c]) + new_color = fn(r, c, color) + if new_color is not None: + result[r, c] = new_color[:3] + + return result + + +# ============================================================================= +# Glitch Art Primitives +# ============================================================================= + +def prim_pixelsort(img: np.ndarray, sort_by: str = "lightness", + threshold_low: float = 50, threshold_high: float = 200, + angle: float = 0, reverse: bool = False) -> np.ndarray: + """ + Pixel sorting glitch effect. + + Args: + img: source image + sort_by: "lightness", "hue", "saturation", "red", "green", "blue" + threshold_low: pixels below this aren't sorted + threshold_high: pixels above this aren't sorted + angle: 0 = horizontal, 90 = vertical + reverse: reverse sort order + """ + h, w = img.shape[:2] + + # Rotate for vertical sorting + if 45 <= (angle % 180) <= 135: + frame = np.transpose(img, (1, 0, 2)) + h, w = frame.shape[:2] + rotated = True + else: + frame = img + rotated = False + + result = frame.copy() + + # Get sort values + if sort_by == "lightness": + sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + elif sort_by == "hue": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + sort_values = hsv[:, :, 0].astype(np.float32) + elif sort_by == "saturation": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + sort_values = hsv[:, :, 1].astype(np.float32) + elif sort_by == "red": + sort_values = frame[:, :, 0].astype(np.float32) + elif sort_by == "green": + sort_values = frame[:, :, 1].astype(np.float32) + elif sort_by == "blue": + sort_values = frame[:, :, 2].astype(np.float32) + else: + sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + + # Create mask + mask = (sort_values >= threshold_low) & (sort_values <= threshold_high) + + # Sort each row + for y in range(h): + row = result[y].copy() + row_mask = mask[y] + row_values = sort_values[y] + + # Find contiguous segments + segments = [] + start = None + for i, val in enumerate(row_mask): + if val and start is None: + start = i + elif not val and start is not None: + segments.append((start, i)) + start = None + if start is not None: + segments.append((start, len(row_mask))) + + # Sort each segment + for seg_start, seg_end in segments: + if seg_end - seg_start > 1: + segment_values = row_values[seg_start:seg_end] + sort_indices = np.argsort(segment_values) + if reverse: + sort_indices = sort_indices[::-1] + row[seg_start:seg_end] = row[seg_start:seg_end][sort_indices] + + result[y] = row + + # Rotate back + if rotated: + result = np.transpose(result, (1, 0, 2)) + + return np.ascontiguousarray(result) + + +def prim_datamosh(img: np.ndarray, prev_frame: np.ndarray, + block_size: int = 32, corruption: float = 0.3, + max_offset: int = 50, color_corrupt: bool = True) -> np.ndarray: + """ + Datamosh/glitch block corruption effect. + + Args: + img: current frame + prev_frame: previous frame (or None) + block_size: size of corruption blocks + corruption: probability 0-1 of corrupting each block + max_offset: maximum pixel shift + color_corrupt: also apply color channel shifts + """ + if corruption <= 0: + return img.copy() + + block_size = max(8, min(int(block_size), 128)) + h, w = img.shape[:2] + result = img.copy() + + for by in range(0, h, block_size): + for bx in range(0, w, block_size): + bh = min(block_size, h - by) + bw = min(block_size, w - bx) + + if _rng.random() < corruption: + corruption_type = _rng.randint(0, 3) + + if corruption_type == 0 and max_offset > 0: + # Shift + ox = _rng.randint(-max_offset, max_offset) + oy = _rng.randint(-max_offset, max_offset) + src_x = max(0, min(bx + ox, w - bw)) + src_y = max(0, min(by + oy, h - bh)) + result[by:by+bh, bx:bx+bw] = img[src_y:src_y+bh, src_x:src_x+bw] + + elif corruption_type == 1 and prev_frame is not None: + # Duplicate from previous frame + if prev_frame.shape == img.shape: + result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw] + + elif corruption_type == 2 and color_corrupt: + # Color channel shift + block = result[by:by+bh, bx:bx+bw].copy() + shift = _rng.randint(1, 3) + channel = _rng.randint(0, 2) + block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0) + result[by:by+bh, bx:bx+bw] = block + + else: + # Swap with another block + other_bx = _rng.randint(0, max(0, w - bw)) + other_by = _rng.randint(0, max(0, h - bh)) + temp = result[by:by+bh, bx:bx+bw].copy() + result[by:by+bh, bx:bx+bw] = img[other_by:other_by+bh, other_bx:other_bx+bw] + result[other_by:other_by+bh, other_bx:other_bx+bw] = temp + + return result + + +def prim_ripple_displace(w: int, h: int, freq: float, amp: float, cx: float = None, cy: float = None, + decay: float = 0, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create radial ripple displacement maps. + + Args: + w, h: dimensions + freq: ripple frequency + amp: ripple amplitude in pixels + cx, cy: center + decay: how fast ripples decay with distance (0 = no decay) + phase: phase offset + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + + # Calculate ripple displacement (radial) + ripple = np.sin(2 * np.pi * freq * dist / max(w, h) + phase) * amp + + # Apply decay + if decay > 0: + ripple = ripple * np.exp(-dist * decay / max(w, h)) + + # Displace along radial direction + with np.errstate(divide='ignore', invalid='ignore'): + norm_dx = np.where(dist > 0, dx / dist, 0) + norm_dy = np.where(dist > 0, dy / dist, 0) + + map_x = (x_coords + ripple * norm_dx).astype(np.float32) + map_y = (y_coords + ripple * norm_dy).astype(np.float32) + + return (map_x, map_y) + + +PRIMITIVES = { + # Arithmetic + '+': prim_add, + '-': prim_sub, + '*': prim_mul, + '/': prim_div, + + # Comparison + '<': prim_lt, + '>': prim_gt, + '<=': prim_le, + '>=': prim_ge, + '=': prim_eq, + '!=': prim_ne, + + # Image + 'width': prim_width, + 'height': prim_height, + 'make-image': prim_make_image, + 'copy': prim_copy, + 'pixel': prim_pixel, + 'set-pixel': prim_set_pixel, + 'sample': prim_sample, + 'channel': prim_channel, + 'merge-channels': prim_merge_channels, + 'resize': prim_resize, + 'crop': prim_crop, + 'paste': prim_paste, + + # Color + 'rgb': prim_rgb, + 'red': prim_red, + 'green': prim_green, + 'blue': prim_blue, + 'luminance': prim_luminance, + 'rgb->hsv': prim_rgb_to_hsv, + 'hsv->rgb': prim_hsv_to_rgb, + 'blend-color': prim_blend_color, + 'average-color': prim_average_color, + + # Vectorized bulk operations + 'color-matrix': prim_color_matrix, + 'adjust': prim_adjust, + 'mix-gray': prim_mix_gray, + 'invert-img': prim_invert_img, + 'add-noise': prim_add_noise, + 'quantize': prim_quantize, + 'shift-hsv': prim_shift_hsv, + + # Bulk operations + 'map-pixels': prim_map_pixels, + 'map-rows': prim_map_rows, + 'for-grid': prim_for_grid, + 'fold-pixels': prim_fold_pixels, + + # Filters + 'convolve': prim_convolve, + 'blur': prim_blur, + 'box-blur': prim_box_blur, + 'edges': prim_edges, + 'sobel': prim_sobel, + 'dilate': prim_dilate, + 'erode': prim_erode, + + # Geometry + 'translate': prim_translate, + 'rotate-img': prim_rotate, + 'scale-img': prim_scale, + 'flip-h': prim_flip_h, + 'flip-v': prim_flip_v, + 'remap': prim_remap, + 'make-coords': prim_make_coords, + + # Blending + 'blend-images': prim_blend_images, + 'blend-mode': prim_blend_mode, + 'mask': prim_mask, + + # Drawing + 'draw-char': prim_draw_char, + 'draw-text': prim_draw_text, + 'fill-rect': prim_fill_rect, + 'draw-line': prim_draw_line, + + # Math + 'sin': prim_sin, + 'cos': prim_cos, + 'tan': prim_tan, + 'atan2': prim_atan2, + 'sqrt': prim_sqrt, + 'pow': prim_pow, + 'abs': prim_abs, + 'floor': prim_floor, + 'ceil': prim_ceil, + 'round': prim_round, + 'min': prim_min, + 'max': prim_max, + 'clamp': prim_clamp, + 'lerp': prim_lerp, + 'mod': prim_mod, + 'random': prim_random, + 'randint': prim_randint, + 'gaussian': prim_gaussian, + 'assert': prim_assert, + 'pi': math.pi, + 'tau': math.tau, + + # Array + 'length': prim_length, + 'len': prim_length, # alias + 'nth': prim_nth, + 'first': prim_first, + 'rest': prim_rest, + 'take': prim_take, + 'drop': prim_drop, + 'cons': prim_cons, + 'append': prim_append, + 'reverse': prim_reverse, + 'range': prim_range, + 'roll': prim_roll, + 'list': prim_list, + + # Array math (vectorized operations on coordinate arrays) + 'arr+': prim_arr_add, + 'arr-': prim_arr_sub, + 'arr*': prim_arr_mul, + 'arr/': prim_arr_div, + 'arr-mod': prim_arr_mod, + 'arr-sin': prim_arr_sin, + 'arr-cos': prim_arr_cos, + 'arr-tan': prim_arr_tan, + 'arr-sqrt': prim_arr_sqrt, + 'arr-pow': prim_arr_pow, + 'arr-abs': prim_arr_abs, + 'arr-neg': prim_arr_neg, + 'arr-exp': prim_arr_exp, + 'arr-atan2': prim_arr_atan2, + 'arr-min': prim_arr_min, + 'arr-max': prim_arr_max, + 'arr-clip': prim_arr_clip, + 'arr-where': prim_arr_where, + 'arr-floor': prim_arr_floor, + 'arr-lerp': prim_arr_lerp, + + # Coordinate transformations + 'polar-from-center': prim_polar_from_center, + 'cart-from-polar': prim_cart_from_polar, + 'normalize-coords': prim_normalize_coords, + 'coords-x': prim_coords_x, + 'coords-y': prim_coords_y, + 'make-coords-centered': prim_make_coords_centered, + + # Specialized distortion maps + 'wave-displace': prim_wave_displace, + 'swirl-displace': prim_swirl_displace, + 'fisheye-displace': prim_fisheye_displace, + 'kaleidoscope-displace': prim_kaleidoscope_displace, + 'ripple-displace': prim_ripple_displace, + + # Character/ASCII art + 'cell-sample': prim_cell_sample, + 'cell-sample-extended': cell_sample_extended, + 'luminance-to-chars': prim_luminance_to_chars, + 'render-char-grid': prim_render_char_grid, + 'render-char-grid-fx': prim_render_char_grid_fx, + 'ascii-fx-zone': prim_ascii_fx_zone, + 'make-char-grid': prim_make_char_grid, + 'set-char': prim_set_char, + 'get-char': prim_get_char, + 'char-grid-dimensions': prim_char_grid_dimensions, + 'alphabet-char': prim_alphabet_char, + 'alphabet-length': prim_alphabet_length, + 'map-char-grid': prim_map_char_grid, + 'map-colors': prim_map_colors, + + # Glitch art + 'pixelsort': prim_pixelsort, + 'datamosh': prim_datamosh, + +} diff --git a/sexp_effects/test_interpreter.py b/sexp_effects/test_interpreter.py new file mode 100644 index 0000000..550b21a --- /dev/null +++ b/sexp_effects/test_interpreter.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +""" +Test the S-expression effect interpreter. +""" + +import numpy as np +import sys +from pathlib import Path + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from sexp_effects import ( + get_interpreter, + load_effects_dir, + run_effect, + list_effects, + parse, +) + + +def test_parser(): + """Test S-expression parser.""" + print("Testing parser...") + + # Simple expressions + assert parse("42") == 42 + assert parse("3.14") == 3.14 + assert parse('"hello"') == "hello" + assert parse("true") == True + + # Lists + assert parse("(+ 1 2)")[0].name == "+" + assert parse("(+ 1 2)")[1] == 1 + + # Nested + expr = parse("(define x (+ 1 2))") + assert expr[0].name == "define" + + print(" Parser OK") + + +def test_interpreter_basics(): + """Test basic interpreter operations.""" + print("Testing interpreter basics...") + + interp = get_interpreter() + + # Math + assert interp.eval(parse("(+ 1 2)")) == 3 + assert interp.eval(parse("(* 3 4)")) == 12 + assert interp.eval(parse("(- 10 3)")) == 7 + + # Comparison + assert interp.eval(parse("(< 1 2)")) == True + assert interp.eval(parse("(> 1 2)")) == False + + # Let binding + assert interp.eval(parse("(let ((x 5)) x)")) == 5 + assert interp.eval(parse("(let ((x 5) (y 3)) (+ x y))")) == 8 + + # Lambda + result = interp.eval(parse("((lambda (x) (* x 2)) 5)")) + assert result == 10 + + # If + assert interp.eval(parse("(if true 1 2)")) == 1 + assert interp.eval(parse("(if false 1 2)")) == 2 + + print(" Interpreter basics OK") + + +def test_primitives(): + """Test image primitives.""" + print("Testing primitives...") + + interp = get_interpreter() + + # Create test image + img = np.zeros((100, 100, 3), dtype=np.uint8) + img[50, 50] = [255, 128, 64] + + interp.global_env.set('test_img', img) + + # Width/height + assert interp.eval(parse("(width test_img)")) == 100 + assert interp.eval(parse("(height test_img)")) == 100 + + # Pixel + pixel = interp.eval(parse("(pixel test_img 50 50)")) + assert pixel == [255, 128, 64] + + # RGB + color = interp.eval(parse("(rgb 100 150 200)")) + assert color == [100, 150, 200] + + # Luminance + lum = interp.eval(parse("(luminance (rgb 100 100 100))")) + assert abs(lum - 100) < 1 + + print(" Primitives OK") + + +def test_effect_loading(): + """Test loading effects from .sexp files.""" + print("Testing effect loading...") + + # Load all effects + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + effects = list_effects() + print(f" Loaded {len(effects)} effects: {', '.join(sorted(effects))}") + + assert len(effects) > 0 + print(" Effect loading OK") + + +def test_effect_execution(): + """Test running effects on images.""" + print("Testing effect execution...") + + # Create test image + img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) + + # Load effects + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + # Test each effect + effects = list_effects() + passed = 0 + failed = [] + + for name in sorted(effects): + try: + result, state = run_effect(name, img.copy(), {'_time': 0.5}, {}) + assert isinstance(result, np.ndarray) + assert result.shape == img.shape + passed += 1 + print(f" {name}: OK") + except Exception as e: + failed.append((name, str(e))) + print(f" {name}: FAILED - {e}") + + print(f" Passed: {passed}/{len(effects)}") + if failed: + print(f" Failed: {[f[0] for f in failed]}") + + return passed, failed + + +def test_ascii_fx_zone(): + """Test ascii_fx_zone effect with zone expressions.""" + print("Testing ascii_fx_zone...") + + interp = get_interpreter() + + # Load the effect + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + # Create gradient test frame + frame = np.zeros((120, 160, 3), dtype=np.uint8) + for x in range(160): + frame[:, x] = int(x / 160 * 255) + frame = np.stack([frame[:,:,0]]*3, axis=2) + + # Test 1: Basic without expressions + result, _ = run_effect('ascii_fx_zone', frame, {'cols': 20}, {}) + assert result.shape == frame.shape + print(" Basic run: OK") + + # Test 2: With zone-lum expression + expr = parse('(* zone-lum 180)') + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_hue': expr + }, {}) + assert result.shape == frame.shape + print(" Zone-lum expression: OK") + + # Test 3: With multiple expressions + scale_expr = parse('(+ 0.5 (* zone-lum 0.5))') + rot_expr = parse('(* zone-row-norm 30)') + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_scale': scale_expr, + 'char_rotation': rot_expr + }, {}) + assert result.shape == frame.shape + print(" Multiple expressions: OK") + + # Test 4: With numeric literals + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_hue': 90, + 'char_scale': 1.2 + }, {}) + assert result.shape == frame.shape + print(" Numeric literals: OK") + + # Test 5: Zone position expressions + col_expr = parse('(* zone-col-norm 360)') + result, _ = run_effect('ascii_fx_zone', frame, { + 'cols': 20, + 'char_hue': col_expr + }, {}) + assert result.shape == frame.shape + print(" Zone position expression: OK") + + print(" ascii_fx_zone OK") + + +def main(): + print("=" * 60) + print("S-Expression Effect Interpreter Tests") + print("=" * 60) + + test_parser() + test_interpreter_basics() + test_primitives() + test_effect_loading() + test_ascii_fx_zone() + passed, failed = test_effect_execution() + + print("=" * 60) + if not failed: + print("All tests passed!") + else: + print(f"Tests completed with {len(failed)} failures") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/streaming/__init__.py b/streaming/__init__.py new file mode 100644 index 0000000..2c007cc --- /dev/null +++ b/streaming/__init__.py @@ -0,0 +1,44 @@ +""" +Streaming video compositor for real-time effect processing. + +This module provides a frame-by-frame streaming architecture that: +- Reads from multiple video sources with automatic looping +- Applies effects inline (no intermediate files) +- Composites layers with time-varying weights +- Outputs to display, file, or stream + +Usage: + from streaming import StreamingCompositor, VideoSource, AudioAnalyzer + + compositor = StreamingCompositor( + sources=["video1.mp4", "video2.mp4"], + effects_per_source=[...], + compositor_config={...}, + ) + + # With live audio + audio = AudioAnalyzer(device=0) + compositor.run(output="output.mp4", duration=60, audio=audio) + + # With preview window + compositor.run(output="preview", duration=60) + +Backends: + - numpy: Works everywhere, ~3-5 fps (default) + - glsl: Requires GPU, 30+ fps real-time (future) +""" + +from .sources import VideoSource, ImageSource +from .compositor import StreamingCompositor +from .backends import NumpyBackend, get_backend +from .output import DisplayOutput, FileOutput + +__all__ = [ + "StreamingCompositor", + "VideoSource", + "ImageSource", + "NumpyBackend", + "get_backend", + "DisplayOutput", + "FileOutput", +] diff --git a/streaming/audio.py b/streaming/audio.py new file mode 100644 index 0000000..9d20937 --- /dev/null +++ b/streaming/audio.py @@ -0,0 +1,486 @@ +""" +Live audio analysis for reactive effects. + +Provides real-time audio features: +- Energy (RMS amplitude) +- Beat detection +- Frequency bands (bass, mid, high) +""" + +import numpy as np +from typing import Optional +import threading +import time + + +class AudioAnalyzer: + """ + Real-time audio analyzer using sounddevice. + + Captures audio from microphone/line-in and computes + features in real-time for effect parameter bindings. + + Example: + analyzer = AudioAnalyzer(device=0) + analyzer.start() + + # In compositor loop: + energy = analyzer.get_energy() + beat = analyzer.get_beat() + + analyzer.stop() + """ + + def __init__( + self, + device: int = None, + sample_rate: int = 44100, + block_size: int = 1024, + buffer_seconds: float = 0.5, + ): + """ + Initialize audio analyzer. + + Args: + device: Audio input device index (None = default) + sample_rate: Audio sample rate + block_size: Samples per block + buffer_seconds: Ring buffer duration + """ + self.sample_rate = sample_rate + self.block_size = block_size + self.device = device + + # Ring buffer for recent audio + buffer_size = int(sample_rate * buffer_seconds) + self._buffer = np.zeros(buffer_size, dtype=np.float32) + self._buffer_pos = 0 + self._lock = threading.Lock() + + # Beat detection state + self._last_energy = 0 + self._energy_history = [] + self._last_beat_time = 0 + self._beat_threshold = 1.5 # Energy ratio for beat detection + self._min_beat_interval = 0.1 # Min seconds between beats + + # Stream state + self._stream = None + self._running = False + + def _audio_callback(self, indata, frames, time_info, status): + """Called by sounddevice for each audio block.""" + with self._lock: + # Add to ring buffer + data = indata[:, 0] if len(indata.shape) > 1 else indata + n = len(data) + if self._buffer_pos + n <= len(self._buffer): + self._buffer[self._buffer_pos:self._buffer_pos + n] = data + else: + # Wrap around + first = len(self._buffer) - self._buffer_pos + self._buffer[self._buffer_pos:] = data[:first] + self._buffer[:n - first] = data[first:] + self._buffer_pos = (self._buffer_pos + n) % len(self._buffer) + + def start(self): + """Start audio capture.""" + try: + import sounddevice as sd + except ImportError: + print("Warning: sounddevice not installed. Audio analysis disabled.") + print("Install with: pip install sounddevice") + return + + self._stream = sd.InputStream( + device=self.device, + channels=1, + samplerate=self.sample_rate, + blocksize=self.block_size, + callback=self._audio_callback, + ) + self._stream.start() + self._running = True + + def stop(self): + """Stop audio capture.""" + if self._stream: + self._stream.stop() + self._stream.close() + self._stream = None + self._running = False + + def get_energy(self) -> float: + """ + Get current audio energy (RMS amplitude). + + Returns: + Energy value normalized to 0-1 range (approximately) + """ + with self._lock: + # Use recent samples + recent = 2048 + if self._buffer_pos >= recent: + data = self._buffer[self._buffer_pos - recent:self._buffer_pos] + else: + data = np.concatenate([ + self._buffer[-(recent - self._buffer_pos):], + self._buffer[:self._buffer_pos] + ]) + + # RMS energy + rms = np.sqrt(np.mean(data ** 2)) + + # Normalize (typical mic input is quite low) + normalized = min(1.0, rms * 10) + + return normalized + + def get_beat(self) -> bool: + """ + Detect if current moment is a beat. + + Simple onset detection based on energy spikes. + + Returns: + True if beat detected, False otherwise + """ + current_energy = self.get_energy() + now = time.time() + + # Update energy history + self._energy_history.append(current_energy) + if len(self._energy_history) > 20: + self._energy_history.pop(0) + + # Need enough history + if len(self._energy_history) < 5: + self._last_energy = current_energy + return False + + # Average recent energy + avg_energy = np.mean(self._energy_history[:-1]) + + # Beat if current energy is significantly above average + is_beat = ( + current_energy > avg_energy * self._beat_threshold and + now - self._last_beat_time > self._min_beat_interval and + current_energy > self._last_energy # Rising edge + ) + + if is_beat: + self._last_beat_time = now + + self._last_energy = current_energy + return is_beat + + def get_spectrum(self, bands: int = 3) -> np.ndarray: + """ + Get frequency spectrum divided into bands. + + Args: + bands: Number of frequency bands (default 3: bass, mid, high) + + Returns: + Array of band energies, normalized to 0-1 + """ + with self._lock: + # Use recent samples for FFT + n = 2048 + if self._buffer_pos >= n: + data = self._buffer[self._buffer_pos - n:self._buffer_pos] + else: + data = np.concatenate([ + self._buffer[-(n - self._buffer_pos):], + self._buffer[:self._buffer_pos] + ]) + + # FFT + fft = np.abs(np.fft.rfft(data * np.hanning(len(data)))) + + # Divide into bands + band_size = len(fft) // bands + result = np.zeros(bands) + for i in range(bands): + start = i * band_size + end = start + band_size + result[i] = np.mean(fft[start:end]) + + # Normalize + max_val = np.max(result) + if max_val > 0: + result = result / max_val + + return result + + @property + def is_running(self) -> bool: + return self._running + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args): + self.stop() + + +class FileAudioAnalyzer: + """ + Audio analyzer that reads from a file (for testing/development). + + Pre-computes analysis and plays back in sync with video. + """ + + def __init__(self, path: str, analysis_data: dict = None): + """ + Initialize from audio file. + + Args: + path: Path to audio file + analysis_data: Pre-computed analysis (times, values, etc.) + """ + self.path = path + self.analysis_data = analysis_data or {} + self._current_time = 0 + + def set_time(self, t: float): + """Set current playback time.""" + self._current_time = t + + def get_energy(self) -> float: + """Get energy at current time from pre-computed data.""" + track = self.analysis_data.get("energy", {}) + return self._interpolate(track, self._current_time) + + def get_beat(self) -> bool: + """Check if current time is near a beat.""" + track = self.analysis_data.get("beats", {}) + times = track.get("times", []) + + # Check if we're within 50ms of a beat + for beat_time in times: + if abs(beat_time - self._current_time) < 0.05: + return True + return False + + def _interpolate(self, track: dict, t: float) -> float: + """Interpolate value at time t.""" + times = track.get("times", []) + values = track.get("values", []) + + if not times or not values: + return 0.0 + + if t <= times[0]: + return values[0] + if t >= times[-1]: + return values[-1] + + # Find bracket and interpolate + for i in range(len(times) - 1): + if times[i] <= t <= times[i + 1]: + alpha = (t - times[i]) / (times[i + 1] - times[i]) + return values[i] * (1 - alpha) + values[i + 1] * alpha + + return values[-1] + + @property + def is_running(self) -> bool: + return True + + +class StreamingAudioAnalyzer: + """ + Real-time audio analyzer that streams from a file. + + Reads audio in sync with video time and computes features on-the-fly. + No pre-computation needed - analysis happens as frames are processed. + """ + + def __init__(self, path: str, sample_rate: int = 22050, hop_length: int = 512): + """ + Initialize streaming audio analyzer. + + Args: + path: Path to audio file + sample_rate: Sample rate for analysis + hop_length: Hop length for feature extraction + """ + import subprocess + import json + + self.path = path + self.sample_rate = sample_rate + self.hop_length = hop_length + self._current_time = 0.0 + + # Get audio duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(path)] + result = subprocess.run(cmd, capture_output=True, text=True) + info = json.loads(result.stdout) + self.duration = float(info["format"]["duration"]) + + # Audio buffer and state + self._audio_data = None + self._energy_history = [] + self._last_energy = 0 + self._last_beat_time = -1 + self._beat_threshold = 1.5 + self._min_beat_interval = 0.15 + + # Load audio lazily + self._loaded = False + + def _load_audio(self): + """Load audio data on first use.""" + if self._loaded: + return + + import subprocess + + # Use ffmpeg to decode audio to raw PCM + cmd = [ + "ffmpeg", "-v", "quiet", + "-i", str(self.path), + "-f", "f32le", # 32-bit float, little-endian + "-ac", "1", # mono + "-ar", str(self.sample_rate), + "-" + ] + result = subprocess.run(cmd, capture_output=True) + self._audio_data = np.frombuffer(result.stdout, dtype=np.float32) + self._loaded = True + + def set_time(self, t: float): + """Set current playback time.""" + self._current_time = t + + def get_energy(self) -> float: + """Compute energy at current time.""" + self._load_audio() + + if self._audio_data is None or len(self._audio_data) == 0: + return 0.0 + + # Get sample index for current time + sample_idx = int(self._current_time * self.sample_rate) + window_size = self.hop_length * 2 + + start = max(0, sample_idx - window_size // 2) + end = min(len(self._audio_data), sample_idx + window_size // 2) + + if start >= end: + return 0.0 + + # RMS energy + chunk = self._audio_data[start:end] + rms = np.sqrt(np.mean(chunk ** 2)) + + # Normalize to 0-1 range (approximate) + energy = min(1.0, rms * 3.0) + + self._last_energy = energy + return energy + + def get_beat(self) -> bool: + """Detect beat using spectral flux (change in frequency content).""" + self._load_audio() + + if self._audio_data is None or len(self._audio_data) == 0: + return False + + # Get audio chunks for current and previous frame + sample_idx = int(self._current_time * self.sample_rate) + chunk_size = self.hop_length * 2 + + # Current chunk + start = max(0, sample_idx - chunk_size // 2) + end = min(len(self._audio_data), sample_idx + chunk_size // 2) + if end - start < chunk_size // 2: + return False + current_chunk = self._audio_data[start:end] + + # Previous chunk (one hop back) + prev_start = max(0, start - self.hop_length) + prev_end = max(0, end - self.hop_length) + if prev_end <= prev_start: + return False + prev_chunk = self._audio_data[prev_start:prev_end] + + # Compute spectra + current_spec = np.abs(np.fft.rfft(current_chunk * np.hanning(len(current_chunk)))) + prev_spec = np.abs(np.fft.rfft(prev_chunk * np.hanning(len(prev_chunk)))) + + # Spectral flux: sum of positive differences (onset = new frequencies appearing) + min_len = min(len(current_spec), len(prev_spec)) + diff = current_spec[:min_len] - prev_spec[:min_len] + flux = np.sum(np.maximum(0, diff)) # Only count increases + + # Normalize by spectrum size + flux = flux / (min_len + 1) + + # Update flux history + self._energy_history.append((self._current_time, flux)) + while self._energy_history and self._energy_history[0][0] < self._current_time - 1.5: + self._energy_history.pop(0) + + if len(self._energy_history) < 3: + return False + + # Adaptive threshold based on recent flux values + flux_values = [f for t, f in self._energy_history] + mean_flux = np.mean(flux_values) + std_flux = np.std(flux_values) + 0.001 # Avoid division by zero + + # Beat if flux is above mean (more sensitive threshold) + threshold = mean_flux + std_flux * 0.3 # Lower = more sensitive + min_interval = 0.1 # Allow up to 600 BPM + time_ok = self._current_time - self._last_beat_time > min_interval + + is_beat = flux > threshold and time_ok + + if is_beat: + self._last_beat_time = self._current_time + + return is_beat + + def get_spectrum(self, bands: int = 3) -> np.ndarray: + """Get frequency spectrum at current time.""" + self._load_audio() + + if self._audio_data is None or len(self._audio_data) == 0: + return np.zeros(bands) + + sample_idx = int(self._current_time * self.sample_rate) + n = 2048 + + start = max(0, sample_idx - n // 2) + end = min(len(self._audio_data), sample_idx + n // 2) + + if end - start < n // 2: + return np.zeros(bands) + + chunk = self._audio_data[start:end] + + # FFT + fft = np.abs(np.fft.rfft(chunk * np.hanning(len(chunk)))) + + # Divide into bands + band_size = len(fft) // bands + result = np.zeros(bands) + for i in range(bands): + s, e = i * band_size, (i + 1) * band_size + result[i] = np.mean(fft[s:e]) + + # Normalize + max_val = np.max(result) + if max_val > 0: + result = result / max_val + + return result + + @property + def is_running(self) -> bool: + return True diff --git a/streaming/backends.py b/streaming/backends.py new file mode 100644 index 0000000..bc695d6 --- /dev/null +++ b/streaming/backends.py @@ -0,0 +1,308 @@ +""" +Effect processing backends. + +Provides abstraction over different rendering backends: +- numpy: CPU-based, works everywhere, ~3-5 fps +- glsl: GPU-based, requires OpenGL, 30+ fps (future) +""" + +import numpy as np +from abc import ABC, abstractmethod +from typing import List, Dict, Any, Optional +from pathlib import Path + + +class Backend(ABC): + """Abstract base class for effect processing backends.""" + + @abstractmethod + def process_frame( + self, + frames: List[np.ndarray], + effects_per_frame: List[List[Dict]], + compositor_config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """ + Process multiple input frames through effects and composite. + + Args: + frames: List of input frames (one per source) + effects_per_frame: List of effect chains (one per source) + compositor_config: How to blend the layers + t: Current time in seconds + analysis_data: Analysis data for binding resolution + + Returns: + Composited output frame + """ + pass + + @abstractmethod + def load_effect(self, effect_path: Path) -> Any: + """Load an effect definition.""" + pass + + +class NumpyBackend(Backend): + """ + CPU-based effect processing using NumPy. + + Uses existing sexp_effects interpreter for effect execution. + Works on any system, but limited to ~3-5 fps for complex effects. + """ + + def __init__(self, recipe_dir: Path = None, minimal_primitives: bool = True): + self.recipe_dir = recipe_dir or Path(".") + self.minimal_primitives = minimal_primitives + self._interpreter = None + self._loaded_effects = {} + + def _get_interpreter(self): + """Lazy-load the sexp interpreter.""" + if self._interpreter is None: + from sexp_effects import get_interpreter + self._interpreter = get_interpreter(minimal_primitives=self.minimal_primitives) + return self._interpreter + + def load_effect(self, effect_path: Path) -> Any: + """Load an effect from sexp file.""" + effect_key = str(effect_path) + if effect_key not in self._loaded_effects: + interp = self._get_interpreter() + interp.load_effect(str(effect_path)) + self._loaded_effects[effect_key] = effect_path.stem + return self._loaded_effects[effect_key] + + def _resolve_binding(self, value: Any, t: float, analysis_data: Dict) -> Any: + """Resolve a parameter binding to its value at time t.""" + if not isinstance(value, dict): + return value + + if "_binding" in value or "_bind" in value: + source = value.get("source") or value.get("_bind") + feature = value.get("feature", "values") + range_map = value.get("range") + + track = analysis_data.get(source, {}) + times = track.get("times", []) + values = track.get("values", []) + + if not times or not values: + return 0.0 + + # Find value at time t (linear interpolation) + if t <= times[0]: + val = values[0] + elif t >= times[-1]: + val = values[-1] + else: + # Binary search for bracket + for i in range(len(times) - 1): + if times[i] <= t <= times[i + 1]: + alpha = (t - times[i]) / (times[i + 1] - times[i]) + val = values[i] * (1 - alpha) + values[i + 1] * alpha + break + else: + val = values[-1] + + # Apply range mapping + if range_map and len(range_map) == 2: + val = range_map[0] + val * (range_map[1] - range_map[0]) + + return val + + return value + + def _apply_effect( + self, + frame: np.ndarray, + effect_name: str, + params: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """Apply a single effect to a frame.""" + # Resolve bindings in params + resolved_params = {"_time": t} + for key, value in params.items(): + if key in ("effect", "effect_path", "cid", "analysis_refs"): + continue + resolved_params[key] = self._resolve_binding(value, t, analysis_data) + + # Try fast native effects first + result = self._apply_native_effect(frame, effect_name, resolved_params) + if result is not None: + return result + + # Fall back to sexp interpreter for complex effects + interp = self._get_interpreter() + if effect_name in interp.effects: + result, _ = interp.run_effect(effect_name, frame, resolved_params, {}) + return result + + # Unknown effect - pass through + return frame + + def _apply_native_effect( + self, + frame: np.ndarray, + effect_name: str, + params: Dict, + ) -> Optional[np.ndarray]: + """Fast native numpy effects for real-time streaming.""" + import cv2 + + if effect_name == "zoom": + amount = float(params.get("amount", 1.0)) + if abs(amount - 1.0) < 0.01: + return frame + h, w = frame.shape[:2] + # Crop center and resize + new_w, new_h = int(w / amount), int(h / amount) + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = frame[y1:y1+new_h, x1:x1+new_w] + return cv2.resize(cropped, (w, h)) + + elif effect_name == "rotate": + angle = float(params.get("angle", 0)) + if abs(angle) < 0.5: + return frame + h, w = frame.shape[:2] + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + return cv2.warpAffine(frame, matrix, (w, h)) + + elif effect_name == "brightness": + amount = float(params.get("amount", 1.0)) + return np.clip(frame * amount, 0, 255).astype(np.uint8) + + elif effect_name == "invert": + amount = float(params.get("amount", 1.0)) + if amount < 0.5: + return frame + return 255 - frame + + # Not a native effect + return None + + def process_frame( + self, + frames: List[np.ndarray], + effects_per_frame: List[List[Dict]], + compositor_config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """ + Process frames through effects and composite. + """ + if not frames: + return np.zeros((720, 1280, 3), dtype=np.uint8) + + processed = [] + + # Apply effects to each input frame + for i, (frame, effects) in enumerate(zip(frames, effects_per_frame)): + result = frame.copy() + for effect_config in effects: + effect_name = effect_config.get("effect", "") + if effect_name: + result = self._apply_effect( + result, effect_name, effect_config, t, analysis_data + ) + processed.append(result) + + # Composite layers + if len(processed) == 1: + return processed[0] + + return self._composite(processed, compositor_config, t, analysis_data) + + def _composite( + self, + frames: List[np.ndarray], + config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + """Composite multiple frames into one.""" + mode = config.get("mode", "alpha") + weights = config.get("weights", [1.0 / len(frames)] * len(frames)) + + # Resolve weight bindings + resolved_weights = [] + for w in weights: + resolved_weights.append(self._resolve_binding(w, t, analysis_data)) + + # Normalize weights + total = sum(resolved_weights) + if total > 0: + resolved_weights = [w / total for w in resolved_weights] + else: + resolved_weights = [1.0 / len(frames)] * len(frames) + + # Resize frames to match first frame + target_h, target_w = frames[0].shape[:2] + resized = [] + for frame in frames: + if frame.shape[:2] != (target_h, target_w): + import cv2 + frame = cv2.resize(frame, (target_w, target_h)) + resized.append(frame.astype(np.float32)) + + # Weighted blend + result = np.zeros_like(resized[0]) + for frame, weight in zip(resized, resolved_weights): + result += frame * weight + + return np.clip(result, 0, 255).astype(np.uint8) + + +class GLSLBackend(Backend): + """ + GPU-based effect processing using OpenGL/GLSL. + + Requires GPU with OpenGL 3.3+ support (or Mesa software renderer). + Achieves 30+ fps real-time processing. + + TODO: Implement when ready for GPU acceleration. + """ + + def __init__(self): + raise NotImplementedError( + "GLSL backend not yet implemented. Use NumpyBackend for now." + ) + + def load_effect(self, effect_path: Path) -> Any: + pass + + def process_frame( + self, + frames: List[np.ndarray], + effects_per_frame: List[List[Dict]], + compositor_config: Dict, + t: float, + analysis_data: Dict, + ) -> np.ndarray: + pass + + +def get_backend(name: str = "numpy", **kwargs) -> Backend: + """ + Get a backend by name. + + Args: + name: "numpy" or "glsl" + **kwargs: Backend-specific options + + Returns: + Backend instance + """ + if name == "numpy": + return NumpyBackend(**kwargs) + elif name == "glsl": + return GLSLBackend(**kwargs) + else: + raise ValueError(f"Unknown backend: {name}") diff --git a/streaming/compositor.py b/streaming/compositor.py new file mode 100644 index 0000000..477128f --- /dev/null +++ b/streaming/compositor.py @@ -0,0 +1,595 @@ +""" +Streaming video compositor. + +Main entry point for the streaming pipeline. Combines: +- Multiple video sources (with looping) +- Per-source effect chains +- Layer compositing +- Optional live audio analysis +- Output to display/file/stream +""" + +import time +import sys +import numpy as np +from typing import List, Dict, Any, Optional, Union +from pathlib import Path + +from .sources import Source, VideoSource +from .backends import Backend, NumpyBackend, get_backend +from .output import Output, DisplayOutput, FileOutput, MultiOutput + + +class StreamingCompositor: + """ + Real-time streaming video compositor. + + Reads frames from multiple sources, applies effects, composites layers, + and outputs the result - all frame-by-frame without intermediate files. + + Example: + compositor = StreamingCompositor( + sources=["video1.mp4", "video2.mp4"], + effects_per_source=[ + [{"effect": "rotate", "angle": 45}], + [{"effect": "zoom", "amount": 1.5}], + ], + compositor_config={"mode": "alpha", "weights": [0.5, 0.5]}, + ) + compositor.run(output="preview", duration=60) + """ + + def __init__( + self, + sources: List[Union[str, Source]], + effects_per_source: List[List[Dict]] = None, + compositor_config: Dict = None, + analysis_data: Dict = None, + backend: str = "numpy", + recipe_dir: Path = None, + fps: float = 30, + audio_source: str = None, + ): + """ + Initialize the streaming compositor. + + Args: + sources: List of video paths or Source objects + effects_per_source: List of effect chains, one per source + compositor_config: How to blend layers (mode, weights) + analysis_data: Pre-computed analysis data for bindings + backend: "numpy" or "glsl" + recipe_dir: Directory for resolving relative effect paths + fps: Output frame rate + audio_source: Path to audio file for streaming analysis + """ + self.fps = fps + self.recipe_dir = recipe_dir or Path(".") + self.analysis_data = analysis_data or {} + + # Initialize streaming audio analyzer if audio source provided + self._audio_analyzer = None + self._audio_source = audio_source + if audio_source: + from .audio import StreamingAudioAnalyzer + self._audio_analyzer = StreamingAudioAnalyzer(audio_source) + print(f"Streaming audio: {audio_source}", file=sys.stderr) + + # Initialize sources + self.sources: List[Source] = [] + for src in sources: + if isinstance(src, Source): + self.sources.append(src) + elif isinstance(src, (str, Path)): + self.sources.append(VideoSource(str(src), target_fps=fps)) + else: + raise ValueError(f"Unknown source type: {type(src)}") + + # Effect chains (default: no effects) + self.effects_per_source = effects_per_source or [[] for _ in self.sources] + if len(self.effects_per_source) != len(self.sources): + raise ValueError( + f"effects_per_source length ({len(self.effects_per_source)}) " + f"must match sources length ({len(self.sources)})" + ) + + # Compositor config (default: equal blend) + self.compositor_config = compositor_config or { + "mode": "alpha", + "weights": [1.0 / len(self.sources)] * len(self.sources), + } + + # Initialize backend + self.backend: Backend = get_backend( + backend, + recipe_dir=self.recipe_dir, + ) + + # Load effects + self._load_effects() + + def _load_effects(self): + """Pre-load all effect definitions.""" + for effects in self.effects_per_source: + for effect_config in effects: + effect_path = effect_config.get("effect_path") + if effect_path: + full_path = self.recipe_dir / effect_path + if full_path.exists(): + self.backend.load_effect(full_path) + + def _create_output( + self, + output: Union[str, Output], + size: tuple, + ) -> Output: + """Create output target from string or Output object.""" + if isinstance(output, Output): + return output + + if output == "preview": + return DisplayOutput("Streaming Preview", size, + audio_source=self._audio_source, fps=self.fps) + elif output == "null": + from .output import NullOutput + return NullOutput() + elif isinstance(output, str): + return FileOutput(output, size, fps=self.fps, audio_source=self._audio_source) + else: + raise ValueError(f"Unknown output type: {output}") + + def run( + self, + output: Union[str, Output] = "preview", + duration: float = None, + audio_analyzer=None, + show_fps: bool = True, + recipe_executor=None, + ): + """ + Run the streaming compositor. + + Args: + output: Output target - "preview", filename, or Output object + duration: Duration in seconds (None = run until quit) + audio_analyzer: Optional AudioAnalyzer for live audio reactivity + show_fps: Show FPS counter in console + recipe_executor: Optional StreamingRecipeExecutor for full recipe logic + """ + # Determine output size from first source + output_size = self.sources[0].size + + # Create output + out = self._create_output(output, output_size) + + # Determine duration + if duration is None: + # Run until stopped (or min source duration if not looping) + duration = min(s.duration for s in self.sources) + if duration == float('inf'): + duration = 3600 # 1 hour max for live sources + + total_frames = int(duration * self.fps) + frame_time = 1.0 / self.fps + + print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr) + print(f"Duration: {duration:.1f}s, {total_frames} frames @ {self.fps}fps", file=sys.stderr) + print(f"Output size: {output_size[0]}x{output_size[1]}", file=sys.stderr) + print(f"Press 'q' to quit (if preview)", file=sys.stderr) + + # Frame loop + start_time = time.time() + frame_count = 0 + fps_update_interval = 30 # Update FPS display every N frames + last_fps_time = start_time + last_fps_count = 0 + + try: + for frame_num in range(total_frames): + if not out.is_open: + print(f"\nOutput closed at frame {frame_num}", file=sys.stderr) + break + + t = frame_num * frame_time + + try: + # Update analysis data from streaming audio (file-based) + energy = 0.0 + is_beat = False + if self._audio_analyzer: + self._update_from_audio(self._audio_analyzer, t) + energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0] + is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5 + elif audio_analyzer: + self._update_from_audio(audio_analyzer, t) + energy = self.analysis_data.get("live_energy", {}).get("values", [0])[0] + is_beat = self.analysis_data.get("live_beat", {}).get("values", [0])[0] > 0.5 + + # Read frames from all sources + frames = [src.read_frame(t) for src in self.sources] + + # Process through recipe executor if provided + if recipe_executor: + result = self._process_with_executor( + frames, recipe_executor, energy, is_beat, t + ) + else: + # Simple backend processing + result = self.backend.process_frame( + frames, + self.effects_per_source, + self.compositor_config, + t, + self.analysis_data, + ) + + # Output + out.write(result, t) + frame_count += 1 + + # FPS display + if show_fps and frame_count % fps_update_interval == 0: + now = time.time() + elapsed = now - last_fps_time + if elapsed > 0: + current_fps = (frame_count - last_fps_count) / elapsed + progress = frame_num / total_frames * 100 + print( + f"\r {progress:5.1f}% | {current_fps:5.1f} fps | " + f"frame {frame_num}/{total_frames}", + end="", file=sys.stderr + ) + last_fps_time = now + last_fps_count = frame_count + + except Exception as e: + print(f"\nError at frame {frame_num}, t={t:.1f}s: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + break + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + finally: + out.close() + for src in self.sources: + if hasattr(src, 'close'): + src.close() + + # Final stats + elapsed = time.time() - start_time + avg_fps = frame_count / elapsed if elapsed > 0 else 0 + print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", file=sys.stderr) + + def _process_with_executor( + self, + frames: List[np.ndarray], + executor, + energy: float, + is_beat: bool, + t: float, + ) -> np.ndarray: + """ + Process frames using the recipe executor for full pipeline. + + Implements: + 1. process-pair: two clips per source with effects, blended + 2. cycle-crossfade: dynamic composition with zoom and weights + 3. Final effects: whole-spin, ripple + """ + import cv2 + + # Target size from first source + target_h, target_w = frames[0].shape[:2] + + # Resize all frames to target size (letterbox to preserve aspect ratio) + resized_frames = [] + for frame in frames: + fh, fw = frame.shape[:2] + if (fh, fw) != (target_h, target_w): + # Calculate scale to fit while preserving aspect ratio + scale = min(target_w / fw, target_h / fh) + new_w, new_h = int(fw * scale), int(fh * scale) + resized = cv2.resize(frame, (new_w, new_h)) + # Center on black canvas + canvas = np.zeros((target_h, target_w, 3), dtype=np.uint8) + x_off = (target_w - new_w) // 2 + y_off = (target_h - new_h) // 2 + canvas[y_off:y_off+new_h, x_off:x_off+new_w] = resized + resized_frames.append(canvas) + else: + resized_frames.append(frame) + frames = resized_frames + + # Update executor state + executor.on_frame(energy, is_beat, t) + + # Get weights to know which sources are active + weights = executor.get_cycle_weights() + + # Process each source as a "pair" (clip A and B with different effects) + processed_pairs = [] + + for i, frame in enumerate(frames): + # Skip sources with zero weight (but still need placeholder) + if i < len(weights) and weights[i] < 0.001: + processed_pairs.append(None) + continue + # Get effect params for clip A and B + params_a = executor.get_effect_params(i, "a", energy) + params_b = executor.get_effect_params(i, "b", energy) + pair_params = executor.get_pair_params(i) + + # Process clip A + clip_a = self._apply_clip_effects(frame.copy(), params_a, t) + + # Process clip B + clip_b = self._apply_clip_effects(frame.copy(), params_b, t) + + # Blend A and B using pair_mix opacity + opacity = pair_params["blend_opacity"] + blended = cv2.addWeighted( + clip_a, 1 - opacity, + clip_b, opacity, + 0 + ) + + # Apply pair rotation + h, w = blended.shape[:2] + center = (w // 2, h // 2) + angle = pair_params["pair_rotation"] + if abs(angle) > 0.5: + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + blended = cv2.warpAffine(blended, matrix, (w, h)) + + processed_pairs.append(blended) + + # Cycle-crossfade composition + weights = executor.get_cycle_weights() + zooms = executor.get_cycle_zooms() + + # Apply zoom per pair and composite + h, w = target_h, target_w + result = np.zeros((h, w, 3), dtype=np.float32) + + for idx, (pair, weight, zoom) in enumerate(zip(processed_pairs, weights, zooms)): + # Skip zero-weight sources + if pair is None or weight < 0.001: + continue + + orig_shape = pair.shape + + # Apply zoom + if zoom > 1.01: + # Zoom in: crop center and resize up + new_w, new_h = int(w / zoom), int(h / zoom) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = pair[y1:y1+new_h, x1:x1+new_w] + pair = cv2.resize(cropped, (w, h)) + elif zoom < 0.99: + # Zoom out: shrink video and center on black + scaled_w, scaled_h = int(w * zoom), int(h * zoom) + if scaled_w > 0 and scaled_h > 0: + shrunk = cv2.resize(pair, (scaled_w, scaled_h)) + canvas = np.zeros((h, w, 3), dtype=np.uint8) + x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2 + canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk + pair = canvas.copy() + + # Draw colored border - size indicates zoom level + border_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)] + color = border_colors[idx % 4] + thickness = max(3, int(10 * weight)) # Thicker border = higher weight + pair = np.ascontiguousarray(pair) + pair[:thickness, :] = color + pair[-thickness:, :] = color + pair[:, :thickness] = color + pair[:, -thickness:] = color + + result += pair.astype(np.float32) * weight + + result = np.clip(result, 0, 255).astype(np.uint8) + + # Apply final effects (whole-spin, ripple) + final_params = executor.get_final_effects(energy) + + # Whole spin + spin_angle = final_params["whole_spin_angle"] + if abs(spin_angle) > 0.5: + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, spin_angle, 1.0) + result = cv2.warpAffine(result, matrix, (w, h)) + + # Ripple effect + amp = final_params["ripple_amplitude"] + if amp > 1: + result = self._apply_ripple(result, amp, + final_params["ripple_cx"], + final_params["ripple_cy"], + t) + + return result + + def _apply_clip_effects(self, frame: np.ndarray, params: dict, t: float) -> np.ndarray: + """Apply per-clip effects: rotate, zoom, invert, hue_shift, ascii.""" + import cv2 + + h, w = frame.shape[:2] + + # Rotate + angle = params["rotate_angle"] + if abs(angle) > 0.5: + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + frame = cv2.warpAffine(frame, matrix, (w, h)) + + # Zoom + zoom = params["zoom_amount"] + if abs(zoom - 1.0) > 0.01: + new_w, new_h = int(w / zoom), int(h / zoom) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + x1, y1 = max(0, x1), max(0, y1) + x2, y2 = min(w, x1 + new_w), min(h, y1 + new_h) + if x2 > x1 and y2 > y1: + cropped = frame[y1:y2, x1:x2] + frame = cv2.resize(cropped, (w, h)) + + # Invert + if params["invert_amount"] > 0.5: + frame = 255 - frame + + # Hue shift + hue_deg = params["hue_degrees"] + if abs(hue_deg) > 1: + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int32) + int(hue_deg / 2)) % 180 + frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + # ASCII art + if params["ascii_mix"] > 0.5: + char_size = max(4, int(params["ascii_char_size"])) + frame = self._apply_ascii(frame, char_size) + + return frame + + def _apply_ascii(self, frame: np.ndarray, char_size: int) -> np.ndarray: + """Apply ASCII art effect.""" + import cv2 + from PIL import Image, ImageDraw, ImageFont + + h, w = frame.shape[:2] + chars = " .:-=+*#%@" + + # Get font + try: + font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", char_size) + except: + font = ImageFont.load_default() + + # Sample cells using area interpolation (fast block average) + rows = h // char_size + cols = w // char_size + if rows < 1 or cols < 1: + return frame + + # Crop to exact grid and downsample + cropped = frame[:rows * char_size, :cols * char_size] + cell_colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA) + + # Compute luminance + luminances = (0.299 * cell_colors[:, :, 0] + + 0.587 * cell_colors[:, :, 1] + + 0.114 * cell_colors[:, :, 2]) / 255.0 + + # Create output image + out_h = rows * char_size + out_w = cols * char_size + output = Image.new('RGB', (out_w, out_h), (0, 0, 0)) + draw = ImageDraw.Draw(output) + + # Draw characters + for r in range(rows): + for c in range(cols): + lum = luminances[r, c] + color = tuple(cell_colors[r, c]) + + # Map luminance to character + idx = int(lum * (len(chars) - 1)) + char = chars[idx] + + # Draw character + x = c * char_size + y = r * char_size + draw.text((x, y), char, fill=color, font=font) + + # Convert back to numpy and resize to original + result = np.array(output) + if result.shape[:2] != (h, w): + result = cv2.resize(result, (w, h), interpolation=cv2.INTER_LINEAR) + + return result + + def _apply_ripple(self, frame: np.ndarray, amplitude: float, + cx: float, cy: float, t: float = 0) -> np.ndarray: + """Apply ripple distortion effect.""" + import cv2 + + h, w = frame.shape[:2] + center_x, center_y = cx * w, cy * h + max_dim = max(w, h) + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Distance from center + dx = x_coords - center_x + dy = y_coords - center_y + dist = np.sqrt(dx*dx + dy*dy) + + # Ripple parameters (matching recipe: frequency=8, decay=2, speed=5) + freq = 8 + decay = 2 + speed = 5 + phase = t * speed * 2 * np.pi + + # Ripple displacement (matching original formula) + ripple = np.sin(2 * np.pi * freq * dist / max_dim + phase) * amplitude + + # Apply decay + if decay > 0: + ripple = ripple * np.exp(-dist * decay / max_dim) + + # Displace along radial direction + with np.errstate(divide='ignore', invalid='ignore'): + norm_dx = np.where(dist > 0, dx / dist, 0) + norm_dy = np.where(dist > 0, dy / dist, 0) + + map_x = (x_coords + ripple * norm_dx).astype(np.float32) + map_y = (y_coords + ripple * norm_dy).astype(np.float32) + + return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, + borderMode=cv2.BORDER_REFLECT) + + def _update_from_audio(self, analyzer, t: float): + """Update analysis data from audio analyzer (streaming or live).""" + # Set time for file-based streaming analyzers + if hasattr(analyzer, 'set_time'): + analyzer.set_time(t) + + # Get current audio features + energy = analyzer.get_energy() if hasattr(analyzer, 'get_energy') else 0 + beat = analyzer.get_beat() if hasattr(analyzer, 'get_beat') else False + + # Update analysis tracks - these can be referenced by effect bindings + self.analysis_data["live_energy"] = { + "times": [t], + "values": [energy], + "duration": float('inf'), + } + self.analysis_data["live_beat"] = { + "times": [t], + "values": [1.0 if beat else 0.0], + "duration": float('inf'), + } + + +def quick_preview( + sources: List[str], + effects: List[List[Dict]] = None, + duration: float = 10, + fps: float = 30, +): + """ + Quick preview helper - show sources with optional effects. + + Example: + quick_preview(["video1.mp4", "video2.mp4"], duration=30) + """ + compositor = StreamingCompositor( + sources=sources, + effects_per_source=effects, + fps=fps, + ) + compositor.run(output="preview", duration=duration) diff --git a/streaming/demo.py b/streaming/demo.py new file mode 100644 index 0000000..0b1899f --- /dev/null +++ b/streaming/demo.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python3 +""" +Demo script for streaming compositor. + +Usage: + # Preview two videos blended + python -m streaming.demo preview video1.mp4 video2.mp4 + + # Record output to file + python -m streaming.demo record video1.mp4 video2.mp4 -o output.mp4 + + # Benchmark (no output) + python -m streaming.demo benchmark video1.mp4 --duration 10 +""" + +import argparse +import sys +from pathlib import Path + +# Add parent to path for imports +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from streaming import StreamingCompositor, VideoSource +from streaming.output import NullOutput + + +def demo_preview(sources: list, duration: float, effects: bool = False): + """Preview sources with optional simple effects.""" + effects_config = None + if effects: + effects_config = [ + [{"effect": "rotate", "angle": 15}], + [{"effect": "zoom", "amount": 1.2}], + ][:len(sources)] + + compositor = StreamingCompositor( + sources=sources, + effects_per_source=effects_config, + recipe_dir=Path(__file__).parent.parent, + ) + compositor.run(output="preview", duration=duration) + + +def demo_record(sources: list, output_path: str, duration: float): + """Record blended output to file.""" + compositor = StreamingCompositor( + sources=sources, + recipe_dir=Path(__file__).parent.parent, + ) + compositor.run(output=output_path, duration=duration) + + +def demo_benchmark(sources: list, duration: float): + """Benchmark processing speed (no output).""" + compositor = StreamingCompositor( + sources=sources, + recipe_dir=Path(__file__).parent.parent, + ) + compositor.run(output="null", duration=duration) + + +def demo_audio_reactive(sources: list, duration: float): + """Preview with live audio reactivity.""" + from streaming.audio import AudioAnalyzer + + # Create compositor with energy-reactive effects + effects_config = [ + [{ + "effect": "zoom", + "amount": {"_binding": True, "source": "live_energy", "feature": "values", "range": [1.0, 1.5]}, + }] + for _ in sources + ] + + compositor = StreamingCompositor( + sources=sources, + effects_per_source=effects_config, + recipe_dir=Path(__file__).parent.parent, + ) + + # Start audio analyzer + try: + with AudioAnalyzer() as audio: + print("Audio analyzer started. Make some noise!", file=sys.stderr) + compositor.run(output="preview", duration=duration, audio_analyzer=audio) + except Exception as e: + print(f"Audio not available: {e}", file=sys.stderr) + print("Running without audio...", file=sys.stderr) + compositor.run(output="preview", duration=duration) + + +def main(): + parser = argparse.ArgumentParser(description="Streaming compositor demo") + parser.add_argument("mode", choices=["preview", "record", "benchmark", "audio"], + help="Demo mode") + parser.add_argument("sources", nargs="+", help="Video source files") + parser.add_argument("-o", "--output", help="Output file (for record mode)") + parser.add_argument("-d", "--duration", type=float, default=30, + help="Duration in seconds") + parser.add_argument("--effects", action="store_true", + help="Apply simple effects (for preview)") + + args = parser.parse_args() + + # Verify sources exist + for src in args.sources: + if not Path(src).exists(): + print(f"Error: Source not found: {src}", file=sys.stderr) + sys.exit(1) + + if args.mode == "preview": + demo_preview(args.sources, args.duration, args.effects) + elif args.mode == "record": + if not args.output: + print("Error: --output required for record mode", file=sys.stderr) + sys.exit(1) + demo_record(args.sources, args.output, args.duration) + elif args.mode == "benchmark": + demo_benchmark(args.sources, args.duration) + elif args.mode == "audio": + demo_audio_reactive(args.sources, args.duration) + + +if __name__ == "__main__": + main() diff --git a/streaming/output.py b/streaming/output.py new file mode 100644 index 0000000..c273bd1 --- /dev/null +++ b/streaming/output.py @@ -0,0 +1,369 @@ +""" +Output targets for streaming compositor. + +Supports: +- Display window (preview) +- File output (recording) +- Stream output (RTMP, etc.) - future +""" + +import numpy as np +import subprocess +from abc import ABC, abstractmethod +from typing import Tuple, Optional +from pathlib import Path + + +class Output(ABC): + """Abstract base class for output targets.""" + + @abstractmethod + def write(self, frame: np.ndarray, t: float): + """Write a frame to the output.""" + pass + + @abstractmethod + def close(self): + """Close the output and clean up resources.""" + pass + + @property + @abstractmethod + def is_open(self) -> bool: + """Check if output is still open/valid.""" + pass + + +class DisplayOutput(Output): + """ + Display frames using mpv (handles Wayland properly). + + Useful for live preview. Press 'q' to quit. + """ + + def __init__(self, title: str = "Streaming Preview", size: Tuple[int, int] = None, + audio_source: str = None, fps: float = 30): + self.title = title + self.size = size + self.audio_source = audio_source + self.fps = fps + self._is_open = True + self._process = None + self._audio_process = None + + def _start_mpv(self, frame_size: Tuple[int, int]): + """Start mpv process for display.""" + import sys + w, h = frame_size + cmd = [ + "mpv", + "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={self.fps}", + f"--title={self.title}", + "-", + ] + print(f"Starting mpv: {' '.join(cmd)}", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + # Start audio playback if we have an audio source + if self.audio_source: + audio_cmd = [ + "ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + str(self.audio_source) + ] + print(f"Starting audio: {self.audio_source}", file=sys.stderr) + self._audio_process = subprocess.Popen( + audio_cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + def write(self, frame: np.ndarray, t: float): + """Display frame.""" + if not self._is_open: + return + + # Ensure frame is correct format + if frame.dtype != np.uint8: + frame = np.clip(frame, 0, 255).astype(np.uint8) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + + # Start mpv on first frame + if self._process is None: + self._start_mpv((frame.shape[1], frame.shape[0])) + + # Check if mpv is still running + if self._process.poll() is not None: + self._is_open = False + return + + try: + self._process.stdin.write(frame.tobytes()) + self._process.stdin.flush() # Prevent buffering + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close the display and audio.""" + if self._process: + try: + self._process.stdin.close() + except: + pass + self._process.terminate() + self._process.wait() + if self._audio_process: + self._audio_process.terminate() + self._audio_process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + if self._process and self._process.poll() is not None: + self._is_open = False + return self._is_open + + +class FileOutput(Output): + """ + Write frames to a video file using ffmpeg. + """ + + def __init__( + self, + path: str, + size: Tuple[int, int], + fps: float = 30, + codec: str = "libx264", + crf: int = 18, + preset: str = "fast", + audio_source: str = None, + ): + self.path = Path(path) + self.size = size + self.fps = fps + self._is_open = True + + # Build ffmpeg command + cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", + "-vcodec", "rawvideo", + "-pix_fmt", "rgb24", + "-s", f"{size[0]}x{size[1]}", + "-r", str(fps), + "-i", "-", + ] + + # Add audio input if provided + if audio_source: + cmd.extend(["-i", str(audio_source)]) + # Explicitly map: video from input 0 (rawvideo), audio from input 1 + cmd.extend(["-map", "0:v", "-map", "1:a"]) + + cmd.extend([ + "-c:v", codec, + "-preset", preset, + "-crf", str(crf), + "-pix_fmt", "yuv420p", + ]) + + # Add audio codec if we have audio + if audio_source: + cmd.extend(["-c:a", "aac", "-b:a", "192k", "-shortest"]) + + cmd.append(str(self.path)) + + import sys + print(f"FileOutput cmd: {' '.join(cmd)}", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=None, # Show errors for debugging + ) + + def write(self, frame: np.ndarray, t: float): + """Write frame to video file.""" + if not self._is_open or self._process.poll() is not None: + self._is_open = False + return + + # Resize if needed + if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]: + import cv2 + frame = cv2.resize(frame, self.size) + + try: + self._process.stdin.write(frame.tobytes()) + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close the video file.""" + if self._process: + self._process.stdin.close() + self._process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + return self._is_open and self._process.poll() is None + + +class MultiOutput(Output): + """ + Write to multiple outputs simultaneously. + + Useful for recording while showing preview. + """ + + def __init__(self, outputs: list): + self.outputs = outputs + + def write(self, frame: np.ndarray, t: float): + for output in self.outputs: + if output.is_open: + output.write(frame, t) + + def close(self): + for output in self.outputs: + output.close() + + @property + def is_open(self) -> bool: + return any(o.is_open for o in self.outputs) + + +class NullOutput(Output): + """ + Discard frames (for benchmarking). + """ + + def __init__(self): + self._is_open = True + self.frame_count = 0 + + def write(self, frame: np.ndarray, t: float): + self.frame_count += 1 + + def close(self): + self._is_open = False + + @property + def is_open(self) -> bool: + return self._is_open + + +class PipeOutput(Output): + """ + Pipe frames directly to mpv. + + Launches mpv with rawvideo demuxer and writes frames to stdin. + """ + + def __init__(self, size: Tuple[int, int], fps: float = 30, audio_source: str = None): + self.size = size + self.fps = fps + self.audio_source = audio_source + self._is_open = True + self._process = None + self._audio_process = None + self._started = False + + def _start(self): + """Start mpv and audio on first frame.""" + if self._started: + return + self._started = True + + import sys + w, h = self.size + + # Start mpv + cmd = [ + "mpv", "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={self.fps}", + "--title=Streaming", + "-" + ] + print(f"Starting mpv: {w}x{h} @ {self.fps}fps", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ) + + # Start audio + if self.audio_source: + audio_cmd = [ + "ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + str(self.audio_source) + ] + print(f"Starting audio: {self.audio_source}", file=sys.stderr) + self._audio_process = subprocess.Popen( + audio_cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + def write(self, frame: np.ndarray, t: float): + """Write frame to mpv.""" + if not self._is_open: + return + + self._start() + + # Check mpv still running + if self._process.poll() is not None: + self._is_open = False + return + + # Resize if needed + if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]: + import cv2 + frame = cv2.resize(frame, self.size) + + # Ensure correct format + if frame.dtype != np.uint8: + frame = np.clip(frame, 0, 255).astype(np.uint8) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + + try: + self._process.stdin.write(frame.tobytes()) + self._process.stdin.flush() + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close mpv and audio.""" + if self._process: + try: + self._process.stdin.close() + except: + pass + self._process.terminate() + self._process.wait() + if self._audio_process: + self._audio_process.terminate() + self._audio_process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + if self._process and self._process.poll() is not None: + self._is_open = False + return self._is_open diff --git a/streaming/pipeline.py b/streaming/pipeline.py new file mode 100644 index 0000000..29dd7e1 --- /dev/null +++ b/streaming/pipeline.py @@ -0,0 +1,846 @@ +""" +Streaming pipeline executor. + +Directly executes compiled sexp recipes frame-by-frame. +No adapter layer - frames and analysis flow through the DAG. +""" + +import sys +import time +import numpy as np +from pathlib import Path +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + +from .sources import VideoSource +from .audio import StreamingAudioAnalyzer +from .output import DisplayOutput, FileOutput +from .sexp_interp import SexpInterpreter + + +@dataclass +class FrameContext: + """Context passed through the pipeline for each frame.""" + t: float # Current time + energy: float = 0.0 + is_beat: bool = False + beat_count: int = 0 + analysis: Dict[str, Any] = field(default_factory=dict) + + +class StreamingPipeline: + """ + Executes a compiled sexp recipe as a streaming pipeline. + + Frames flow through the DAG directly - no adapter needed. + Each node is evaluated lazily when its output is requested. + """ + + def __init__(self, compiled_recipe, recipe_dir: Path = None, fps: float = 30, seed: int = 42, + output_size: tuple = None): + self.recipe = compiled_recipe + self.recipe_dir = recipe_dir or Path(".") + self.fps = fps + self.seed = seed + + # Build node lookup + self.nodes = {n['id']: n for n in compiled_recipe.nodes} + + # Runtime state + self.sources: Dict[str, VideoSource] = {} + self.audio_analyzer: Optional[StreamingAudioAnalyzer] = None + self.audio_source_path: Optional[str] = None + + # Sexp interpreter for expressions + self.interp = SexpInterpreter() + + # Scan state (node_id -> current value) + self.scan_state: Dict[str, Any] = {} + self.scan_emit: Dict[str, Any] = {} + + # SLICE_ON state + self.slice_on_acc: Dict[str, Any] = {} + self.slice_on_result: Dict[str, Any] = {} + + # Frame cache for current timestep (cleared each frame) + self._frame_cache: Dict[str, np.ndarray] = {} + + # Context for current frame + self.ctx = FrameContext(t=0.0) + + # Output size (w, h) - set after sources are initialized + self._output_size = output_size + + # Initialize + self._init_sources() + self._init_scans() + self._init_slice_on() + + # Set output size from first source if not specified + if self._output_size is None and self.sources: + first_source = next(iter(self.sources.values())) + self._output_size = first_source._size + + def _init_sources(self): + """Initialize video and audio sources.""" + for node in self.recipe.nodes: + if node.get('type') == 'SOURCE': + config = node.get('config', {}) + path = config.get('path') + if path: + full_path = (self.recipe_dir / path).resolve() + suffix = full_path.suffix.lower() + + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + if not full_path.exists(): + print(f"Warning: video not found: {full_path}", file=sys.stderr) + continue + self.sources[node['id']] = VideoSource( + str(full_path), + target_fps=self.fps + ) + elif suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'): + if not full_path.exists(): + print(f"Warning: audio not found: {full_path}", file=sys.stderr) + continue + self.audio_source_path = str(full_path) + self.audio_analyzer = StreamingAudioAnalyzer(str(full_path)) + + def _init_scans(self): + """Initialize scan nodes with their initial state.""" + import random + seed_offset = 0 + + for node in self.recipe.nodes: + if node.get('type') == 'SCAN': + config = node.get('config', {}) + + # Create RNG for this scan + scan_seed = config.get('seed', self.seed + seed_offset) + rng = random.Random(scan_seed) + seed_offset += 1 + + # Evaluate initial value + init_expr = config.get('init', 0) + init_value = self.interp.eval(init_expr, {}) + + self.scan_state[node['id']] = { + 'value': init_value, + 'rng': rng, + 'config': config, + } + + # Compute initial emit + self._update_scan_emit(node['id']) + + def _update_scan_emit(self, node_id: str): + """Update the emit value for a scan.""" + state = self.scan_state[node_id] + config = state['config'] + emit_expr = config.get('emit_expr', config.get('emit', None)) + + if emit_expr is None: + # No emit expression - emit the value directly + self.scan_emit[node_id] = state['value'] + return + + # Build environment from state + env = {} + if isinstance(state['value'], dict): + env.update(state['value']) + else: + env['acc'] = state['value'] + + env['beat_count'] = self.ctx.beat_count + env['time'] = self.ctx.t + + # Set RNG for interpreter + self.interp.rng = state['rng'] + + self.scan_emit[node_id] = self.interp.eval(emit_expr, env) + + def _step_scan(self, node_id: str): + """Step a scan forward on beat.""" + state = self.scan_state[node_id] + config = state['config'] + step_expr = config.get('step_expr', config.get('step', None)) + + if step_expr is None: + return + + # Build environment + env = {} + if isinstance(state['value'], dict): + env.update(state['value']) + else: + env['acc'] = state['value'] + + env['beat_count'] = self.ctx.beat_count + env['time'] = self.ctx.t + + # Set RNG + self.interp.rng = state['rng'] + + # Evaluate step + new_value = self.interp.eval(step_expr, env) + state['value'] = new_value + + # Update emit + self._update_scan_emit(node_id) + + def _init_slice_on(self): + """Initialize SLICE_ON nodes.""" + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + config = node.get('config', {}) + init = config.get('init', {}) + self.slice_on_acc[node['id']] = dict(init) + + # Evaluate initial state + self._eval_slice_on(node['id']) + + def _eval_slice_on(self, node_id: str): + """Evaluate a SLICE_ON node's Lambda.""" + node = self.nodes[node_id] + config = node.get('config', {}) + fn = config.get('fn') + videos = config.get('videos', []) + + if not fn: + return + + acc = self.slice_on_acc[node_id] + n_videos = len(videos) + + # Set up environment + self.interp.globals['videos'] = list(range(n_videos)) + + try: + from .sexp_interp import eval_slice_on_lambda + result = eval_slice_on_lambda( + fn, acc, self.ctx.beat_count, 0, 1, + list(range(n_videos)), self.interp + ) + self.slice_on_result[node_id] = result + + # Update accumulator + if 'acc' in result: + self.slice_on_acc[node_id] = result['acc'] + except Exception as e: + print(f"SLICE_ON eval error: {e}", file=sys.stderr) + + def _on_beat(self): + """Called when a beat is detected.""" + self.ctx.beat_count += 1 + + # Step all scans + for node_id in self.scan_state: + self._step_scan(node_id) + + # Step all SLICE_ON nodes + for node_id in self.slice_on_acc: + self._eval_slice_on(node_id) + + def _get_frame(self, node_id: str) -> Optional[np.ndarray]: + """ + Get the output frame for a node at current time. + + Recursively evaluates inputs as needed. + Results are cached for the current timestep. + """ + if node_id in self._frame_cache: + return self._frame_cache[node_id] + + node = self.nodes.get(node_id) + if not node: + return None + + node_type = node.get('type') + + if node_type == 'SOURCE': + frame = self._eval_source(node) + elif node_type == 'SEGMENT': + frame = self._eval_segment(node) + elif node_type == 'EFFECT': + frame = self._eval_effect(node) + elif node_type == 'SLICE_ON': + frame = self._eval_slice_on_frame(node) + else: + # Unknown node type - try to pass through input + inputs = node.get('inputs', []) + frame = self._get_frame(inputs[0]) if inputs else None + + self._frame_cache[node_id] = frame + return frame + + def _eval_source(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SOURCE node.""" + source = self.sources.get(node['id']) + if source: + return source.read_frame(self.ctx.t) + return None + + def _eval_segment(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SEGMENT node (time segment of source).""" + inputs = node.get('inputs', []) + if not inputs: + return None + + config = node.get('config', {}) + start = config.get('start', 0) + duration = config.get('duration') + + # Resolve any bindings + if isinstance(start, dict): + start = self._resolve_binding(start) if start.get('_binding') else 0 + if isinstance(duration, dict): + duration = self._resolve_binding(duration) if duration.get('_binding') else None + + # Adjust time for segment + t_local = self.ctx.t + (start if isinstance(start, (int, float)) else 0) + if duration and isinstance(duration, (int, float)): + t_local = t_local % duration # Loop within segment + + # Get source frame at adjusted time + source_id = inputs[0] + source = self.sources.get(source_id) + if source: + return source.read_frame(t_local) + + return self._get_frame(source_id) + + def _eval_effect(self, node: dict) -> Optional[np.ndarray]: + """Evaluate an EFFECT node.""" + import cv2 + + inputs = node.get('inputs', []) + config = node.get('config', {}) + effect_name = config.get('effect') + + # Get input frame(s) + input_frames = [self._get_frame(inp) for inp in inputs] + input_frames = [f for f in input_frames if f is not None] + + if not input_frames: + return None + + frame = input_frames[0] + + # Resolve bindings in config + params = self._resolve_config(config) + + # Apply effect based on name + if effect_name == 'rotate': + angle = params.get('angle', 0) + if abs(angle) > 0.5: + h, w = frame.shape[:2] + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + frame = cv2.warpAffine(frame, matrix, (w, h)) + + elif effect_name == 'zoom': + amount = params.get('amount', 1.0) + if abs(amount - 1.0) > 0.01: + frame = self._apply_zoom(frame, amount) + + elif effect_name == 'invert': + amount = params.get('amount', 0) + if amount > 0.01: + inverted = 255 - frame + frame = cv2.addWeighted(frame, 1 - amount, inverted, amount, 0) + + elif effect_name == 'hue_shift': + degrees = params.get('degrees', 0) + if abs(degrees) > 1: + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + int(degrees / 2)) % 180 + frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + elif effect_name == 'blend': + if len(input_frames) >= 2: + opacity = params.get('opacity', 0.5) + frame = cv2.addWeighted(input_frames[0], 1 - opacity, + input_frames[1], opacity, 0) + + elif effect_name == 'blend_multi': + weights = params.get('weights', []) + if len(input_frames) > 1 and weights: + h, w = input_frames[0].shape[:2] + result = np.zeros((h, w, 3), dtype=np.float32) + for f, wt in zip(input_frames, weights): + if f is not None and wt > 0.001: + if f.shape[:2] != (h, w): + f = cv2.resize(f, (w, h)) + result += f.astype(np.float32) * wt + frame = np.clip(result, 0, 255).astype(np.uint8) + + elif effect_name == 'ripple': + amp = params.get('amplitude', 0) + if amp > 1: + frame = self._apply_ripple(frame, amp, + params.get('center_x', 0.5), + params.get('center_y', 0.5), + params.get('frequency', 8), + params.get('decay', 2), + params.get('speed', 5)) + + return frame + + def _eval_slice_on_frame(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SLICE_ON node - returns composited frame.""" + import cv2 + + config = node.get('config', {}) + video_ids = config.get('videos', []) + result = self.slice_on_result.get(node['id'], {}) + + if not result: + # No result yet - return first video + if video_ids: + return self._get_frame(video_ids[0]) + return None + + # Get layers and compose info + layers = result.get('layers', []) + compose = result.get('compose', {}) + weights = compose.get('weights', []) + + if not layers or not weights: + if video_ids: + return self._get_frame(video_ids[0]) + return None + + # Get frames for each layer + frames = [] + for i, layer in enumerate(layers): + video_idx = layer.get('video', i) + if video_idx < len(video_ids): + frame = self._get_frame(video_ids[video_idx]) + + # Apply layer effects (zoom) + effects = layer.get('effects', []) + for eff in effects: + eff_name = eff.get('effect') + if hasattr(eff_name, 'name'): + eff_name = eff_name.name + if eff_name == 'zoom': + zoom_amt = eff.get('amount', 1.0) + if frame is not None: + frame = self._apply_zoom(frame, zoom_amt) + + frames.append(frame) + else: + frames.append(None) + + # Composite with weights - use consistent output size + if self._output_size: + w, h = self._output_size + else: + # Fallback to first non-None frame size + for f in frames: + if f is not None: + h, w = f.shape[:2] + break + else: + return None + + output = np.zeros((h, w, 3), dtype=np.float32) + + for frame, weight in zip(frames, weights): + if frame is None or weight < 0.001: + continue + + # Resize to output size + if frame.shape[1] != w or frame.shape[0] != h: + frame = cv2.resize(frame, (w, h)) + + output += frame.astype(np.float32) * weight + + # Normalize weights + total_weight = sum(wt for wt in weights if wt > 0.001) + if total_weight > 0 and abs(total_weight - 1.0) > 0.01: + output /= total_weight + + return np.clip(output, 0, 255).astype(np.uint8) + + def _resolve_config(self, config: dict) -> dict: + """Resolve bindings in effect config to actual values.""" + resolved = {} + + for key, value in config.items(): + if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', + 'analysis_refs', 'inputs', 'cid'): + continue + + if isinstance(value, dict) and value.get('_binding'): + resolved[key] = self._resolve_binding(value) + elif isinstance(value, dict) and value.get('_expr'): + resolved[key] = self._resolve_expr(value) + else: + resolved[key] = value + + return resolved + + def _resolve_binding(self, binding: dict) -> Any: + """Resolve a binding to its current value.""" + source_id = binding.get('source') + feature = binding.get('feature', 'values') + range_map = binding.get('range') + + # Get raw value from scan or analysis + if source_id in self.scan_emit: + value = self.scan_emit[source_id] + elif source_id in self.ctx.analysis: + data = self.ctx.analysis[source_id] + value = data.get(feature, data.get('values', [0]))[0] if isinstance(data, dict) else data + else: + # Fallback to energy + value = self.ctx.energy + + # Extract feature from dict + if isinstance(value, dict) and feature in value: + value = value[feature] + + # Apply range mapping + if range_map and isinstance(value, (int, float)): + lo, hi = range_map + value = lo + value * (hi - lo) + + return value + + def _resolve_expr(self, expr: dict) -> Any: + """Resolve a compiled expression.""" + env = { + 'energy': self.ctx.energy, + 'beat_count': self.ctx.beat_count, + 't': self.ctx.t, + } + + # Add scan values + for scan_id, value in self.scan_emit.items(): + # Use short form if available + env[scan_id] = value + + # Extract the actual expression from _expr wrapper + actual_expr = expr.get('_expr', expr) + return self.interp.eval(actual_expr, env) + + def _apply_zoom(self, frame: np.ndarray, amount: float) -> np.ndarray: + """Apply zoom to frame.""" + import cv2 + h, w = frame.shape[:2] + + if amount > 1.01: + # Zoom in: crop center + new_w, new_h = int(w / amount), int(h / amount) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = frame[y1:y1+new_h, x1:x1+new_w] + return cv2.resize(cropped, (w, h)) + elif amount < 0.99: + # Zoom out: shrink and center + scaled_w, scaled_h = int(w * amount), int(h * amount) + if scaled_w > 0 and scaled_h > 0: + shrunk = cv2.resize(frame, (scaled_w, scaled_h)) + canvas = np.zeros((h, w, 3), dtype=np.uint8) + x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2 + canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk + return canvas + + return frame + + def _apply_ripple(self, frame: np.ndarray, amplitude: float, + cx: float, cy: float, frequency: float, + decay: float, speed: float) -> np.ndarray: + """Apply ripple effect.""" + import cv2 + h, w = frame.shape[:2] + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize to center + center_x, center_y = w * cx, h * cy + dx = x_coords - center_x + dy = y_coords - center_y + dist = np.sqrt(dx**2 + dy**2) + + # Ripple displacement + phase = self.ctx.t * speed + ripple = amplitude * np.sin(dist / frequency - phase) * np.exp(-dist * decay / max(w, h)) + + # Displace coordinates + angle = np.arctan2(dy, dx) + map_x = (x_coords + ripple * np.cos(angle)).astype(np.float32) + map_y = (y_coords + ripple * np.sin(angle)).astype(np.float32) + + return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + def _find_output_node(self) -> Optional[str]: + """Find the final output node (MUX or last EFFECT).""" + # Look for MUX node + for node in self.recipe.nodes: + if node.get('type') == 'MUX': + return node['id'] + + # Otherwise find last EFFECT after SLICE_ON + last_effect = None + found_slice_on = False + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + found_slice_on = True + elif node.get('type') == 'EFFECT' and found_slice_on: + last_effect = node['id'] + + return last_effect + + def render_frame(self, t: float) -> Optional[np.ndarray]: + """Render a single frame at time t.""" + # Clear frame cache + self._frame_cache.clear() + + # Update context + self.ctx.t = t + + # Update audio analysis + if self.audio_analyzer: + self.audio_analyzer.set_time(t) + energy = self.audio_analyzer.get_energy() + is_beat = self.audio_analyzer.get_beat() + + # Beat edge detection + was_beat = self.ctx.is_beat + self.ctx.energy = energy + self.ctx.is_beat = is_beat + + if is_beat and not was_beat: + self._on_beat() + + # Store in analysis dict + self.ctx.analysis['live_energy'] = {'values': [energy]} + self.ctx.analysis['live_beat'] = {'values': [1.0 if is_beat else 0.0]} + + # Find output node and render + output_node = self._find_output_node() + if output_node: + frame = self._get_frame(output_node) + # Normalize to output size + if frame is not None and self._output_size: + w, h = self._output_size + if frame.shape[1] != w or frame.shape[0] != h: + import cv2 + frame = cv2.resize(frame, (w, h)) + return frame + + return None + + def run(self, output: str = "preview", duration: float = None): + """ + Run the pipeline. + + Args: + output: "preview", filename, or Output object + duration: Duration in seconds (default: audio duration or 60s) + """ + # Determine duration + if duration is None: + if self.audio_analyzer: + duration = self.audio_analyzer.duration + else: + duration = 60.0 + + # Create output + if output == "preview": + # Get frame size from first source + first_source = next(iter(self.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + out = DisplayOutput(size=(w, h), fps=self.fps, audio_source=self.audio_source_path) + elif isinstance(output, str): + first_source = next(iter(self.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + out = FileOutput(output, size=(w, h), fps=self.fps, audio_source=self.audio_source_path) + else: + out = output + + frame_time = 1.0 / self.fps + n_frames = int(duration * self.fps) + + print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr) + print(f"Duration: {duration:.1f}s, {n_frames} frames @ {self.fps}fps", file=sys.stderr) + + start_time = time.time() + frame_count = 0 + + try: + for frame_num in range(n_frames): + t = frame_num * frame_time + + frame = self.render_frame(t) + + if frame is not None: + out.write(frame, t) + frame_count += 1 + + # Progress + if frame_num % 50 == 0: + elapsed = time.time() - start_time + fps = frame_count / elapsed if elapsed > 0 else 0 + pct = 100 * frame_num / n_frames + print(f"\r{pct:5.1f}% | {fps:5.1f} fps | frame {frame_num}/{n_frames}", + end="", file=sys.stderr) + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + finally: + out.close() + for src in self.sources.values(): + src.close() + + elapsed = time.time() - start_time + avg_fps = frame_count / elapsed if elapsed > 0 else 0 + print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", + file=sys.stderr) + + +def run_pipeline(recipe_path: str, output: str = "preview", + duration: float = None, fps: float = None): + """ + Run a recipe through the streaming pipeline. + + No adapter layer - directly executes the compiled recipe. + """ + from pathlib import Path + + # Add artdag to path + import sys + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + + from artdag.sexp.compiler import compile_string + + recipe_path = Path(recipe_path) + recipe_text = recipe_path.read_text() + compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent) + + pipeline = StreamingPipeline( + compiled, + recipe_dir=recipe_path.parent, + fps=fps or compiled.encoding.get('fps', 30), + ) + + pipeline.run(output=output, duration=duration) + + +def run_pipeline_piped(recipe_path: str, duration: float = None, fps: float = None): + """ + Run pipeline and pipe directly to mpv with audio. + """ + import subprocess + from pathlib import Path + import sys + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + from artdag.sexp.compiler import compile_string + + recipe_path = Path(recipe_path) + recipe_text = recipe_path.read_text() + compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent) + + pipeline = StreamingPipeline( + compiled, + recipe_dir=recipe_path.parent, + fps=fps or compiled.encoding.get('fps', 30), + ) + + # Get frame info + first_source = next(iter(pipeline.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + + # Determine duration + if duration is None: + if pipeline.audio_analyzer: + duration = pipeline.audio_analyzer.duration + else: + duration = 60.0 + + actual_fps = fps or compiled.encoding.get('fps', 30) + n_frames = int(duration * actual_fps) + frame_time = 1.0 / actual_fps + + print(f"Streaming {n_frames} frames @ {actual_fps}fps to mpv", file=sys.stderr) + + # Start mpv + mpv_cmd = [ + "mpv", "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={actual_fps}", + "--title=Streaming Pipeline", + "-" + ] + mpv = subprocess.Popen(mpv_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Start audio if available + audio_proc = None + if pipeline.audio_source_path: + audio_cmd = ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + pipeline.audio_source_path] + audio_proc = subprocess.Popen(audio_cmd, stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + + try: + import cv2 + for frame_num in range(n_frames): + if mpv.poll() is not None: + break # mpv closed + + t = frame_num * frame_time + frame = pipeline.render_frame(t) + if frame is not None: + # Ensure consistent frame size + if frame.shape[1] != w or frame.shape[0] != h: + frame = cv2.resize(frame, (w, h)) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + try: + mpv.stdin.write(frame.tobytes()) + mpv.stdin.flush() + except BrokenPipeError: + break + except KeyboardInterrupt: + pass + finally: + if mpv.stdin: + mpv.stdin.close() + mpv.terminate() + if audio_proc: + audio_proc.terminate() + for src in pipeline.sources.values(): + src.close() + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run sexp recipe through streaming pipeline") + parser.add_argument("recipe", help="Path to .sexp recipe file") + parser.add_argument("-o", "--output", default="pipe", + help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)") + parser.add_argument("-d", "--duration", type=float, default=None, + help="Duration in seconds (default: audio duration)") + parser.add_argument("--fps", type=float, default=None, + help="Frame rate (default: from recipe)") + args = parser.parse_args() + + if args.output == "pipe": + run_pipeline_piped(args.recipe, duration=args.duration, fps=args.fps) + else: + run_pipeline(args.recipe, output=args.output, duration=args.duration, fps=args.fps) diff --git a/streaming/recipe_adapter.py b/streaming/recipe_adapter.py new file mode 100644 index 0000000..2133919 --- /dev/null +++ b/streaming/recipe_adapter.py @@ -0,0 +1,470 @@ +""" +Adapter to run sexp recipes through the streaming compositor. + +Bridges the gap between: +- Existing recipe format (sexp files with stages, effects, analysis) +- Streaming compositor (sources, effect chains, compositor config) +""" + +import sys +from pathlib import Path +from typing import Dict, List, Any, Optional + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + +from .compositor import StreamingCompositor +from .sources import VideoSource +from .audio import FileAudioAnalyzer + + +class RecipeAdapter: + """ + Adapts a compiled sexp recipe to run through the streaming compositor. + + Example: + adapter = RecipeAdapter("effects/quick_test.sexp") + adapter.run(output="preview", duration=60) + """ + + def __init__( + self, + recipe_path: str, + params: Dict[str, Any] = None, + backend: str = "numpy", + ): + """ + Load and prepare a recipe for streaming. + + Args: + recipe_path: Path to .sexp recipe file + params: Parameter overrides + backend: "numpy" or "glsl" + """ + self.recipe_path = Path(recipe_path) + self.recipe_dir = self.recipe_path.parent + self.params = params or {} + self.backend = backend + + # Compile recipe + self._compile() + + def _compile(self): + """Compile the recipe and extract structure.""" + from artdag.sexp.compiler import compile_string + + recipe_text = self.recipe_path.read_text() + self.compiled = compile_string(recipe_text, self.params, recipe_dir=self.recipe_dir) + + # Extract key info + self.sources = {} # name -> path + self.effects_registry = {} # effect_name -> path + self.analyzers = {} # name -> analyzer info + + # Walk nodes to find sources and structure + # nodes is a list in CompiledRecipe + for node in self.compiled.nodes: + node_type = node.get("type", "") + + if node_type == "SOURCE": + config = node.get("config", {}) + path = config.get("path") + if path: + self.sources[node["id"]] = self.recipe_dir / path + + elif node_type == "ANALYZE": + config = node.get("config", {}) + self.analyzers[node["id"]] = { + "analyzer": config.get("analyzer"), + "path": config.get("analyzer_path"), + } + + # Get effects registry from compiled recipe + # registry has 'effects' sub-dict + effects_dict = self.compiled.registry.get("effects", {}) + for name, info in effects_dict.items(): + if info.get("path"): + self.effects_registry[name] = Path(info["path"]) + + def run_analysis(self) -> Dict[str, Any]: + """ + Run analysis phase (energy, beats, etc.). + + Returns: + Dict of analysis track name -> {times, values, duration} + """ + print(f"Running analysis...", file=sys.stderr) + + # Use existing planner's analysis execution + from artdag.sexp.planner import create_plan + + analysis_data = {} + + def on_analysis(node_id: str, results: dict): + analysis_data[node_id] = results + print(f" {node_id[:16]}...: {len(results.get('times', []))} samples", file=sys.stderr) + + # Create plan (runs analysis as side effect) + plan = create_plan( + self.compiled, + inputs={}, + recipe_dir=self.recipe_dir, + on_analysis=on_analysis, + ) + + # Also store named analysis tracks + for name, data in plan.analysis.items(): + analysis_data[name] = data + + return analysis_data + + def build_compositor( + self, + analysis_data: Dict[str, Any] = None, + fps: float = None, + ) -> StreamingCompositor: + """ + Build a streaming compositor from the recipe. + + This is a simplified version that handles common patterns. + Complex recipes may need manual configuration. + + Args: + analysis_data: Pre-computed analysis data + + Returns: + Configured StreamingCompositor + """ + # Extract video and audio sources in SLICE_ON input order + video_sources = [] + audio_source = None + + # Find audio source first + for node_id, path in self.sources.items(): + suffix = path.suffix.lower() + if suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'): + audio_source = str(path) + break + + # Find SLICE_ON node to get correct video order + slice_on_inputs = None + for node in self.compiled.nodes: + if node.get('type') == 'SLICE_ON': + # Use 'videos' config key which has the correct order + config = node.get('config', {}) + slice_on_inputs = config.get('videos', []) + break + + if slice_on_inputs: + # Trace each SLICE_ON input back to its SOURCE + node_lookup = {n['id']: n for n in self.compiled.nodes} + + def trace_to_source(node_id, visited=None): + """Trace a node back to its SOURCE, return source path.""" + if visited is None: + visited = set() + if node_id in visited: + return None + visited.add(node_id) + + node = node_lookup.get(node_id) + if not node: + return None + if node.get('type') == 'SOURCE': + return self.sources.get(node_id) + # Recurse through inputs + for inp in node.get('inputs', []): + result = trace_to_source(inp, visited) + if result: + return result + return None + + # Build video_sources in SLICE_ON input order + for inp_id in slice_on_inputs: + source_path = trace_to_source(inp_id) + if source_path: + suffix = source_path.suffix.lower() + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + video_sources.append(str(source_path)) + + # Fallback to definition order if no SLICE_ON + if not video_sources: + for node_id, path in self.sources.items(): + suffix = path.suffix.lower() + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + video_sources.append(str(path)) + + if not video_sources: + raise ValueError("No video sources found in recipe") + + # Build effect chains - use live audio bindings (matching video_sources count) + effects_per_source = self._build_streaming_effects(n_sources=len(video_sources)) + + # Build compositor config from recipe + compositor_config = self._extract_compositor_config(analysis_data) + + return StreamingCompositor( + sources=video_sources, + effects_per_source=effects_per_source, + compositor_config=compositor_config, + analysis_data=analysis_data or {}, + backend=self.backend, + recipe_dir=self.recipe_dir, + fps=fps or self.compiled.encoding.get("fps", 30), + audio_source=audio_source, + ) + + def _build_streaming_effects(self, n_sources: int = None) -> List[List[Dict]]: + """ + Build effect chains for streaming with live audio bindings. + + Replicates the recipe's effect pipeline: + - Per source: rotate, zoom, invert, hue_shift, ascii_art + - All driven by live_energy and live_beat + """ + if n_sources is None: + n_sources = len([p for p in self.sources.values() + if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')]) + + effects_per_source = [] + + for i in range(n_sources): + # Alternate rotation direction per source + rot_dir = 1 if i % 2 == 0 else -1 + + effects = [ + # Rotate - energy drives angle + { + "effect": "rotate", + "effect_path": str(self.effects_registry.get("rotate", "")), + "angle": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [0, 45 * rot_dir], + }, + }, + # Zoom - energy drives amount + { + "effect": "zoom", + "effect_path": str(self.effects_registry.get("zoom", "")), + "amount": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [1.0, 1.5] if i % 2 == 0 else [1.0, 0.7], + }, + }, + # Invert - beat triggers + { + "effect": "invert", + "effect_path": str(self.effects_registry.get("invert", "")), + "amount": { + "_binding": True, + "source": "live_beat", + "feature": "values", + "range": [0, 1], + }, + }, + # Hue shift - energy drives hue + { + "effect": "hue_shift", + "effect_path": str(self.effects_registry.get("hue_shift", "")), + "degrees": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [0, 180], + }, + }, + # ASCII art - energy drives char size, beat triggers mix + { + "effect": "ascii_art", + "effect_path": str(self.effects_registry.get("ascii_art", "")), + "char_size": { + "_binding": True, + "source": "live_energy", + "feature": "values", + "range": [4, 32], + }, + "mix": { + "_binding": True, + "source": "live_beat", + "feature": "values", + "range": [0, 1], + }, + }, + ] + effects_per_source.append(effects) + + return effects_per_source + + def _extract_effects(self) -> List[List[Dict]]: + """Extract effect chains for each source (legacy, pre-computed analysis).""" + # Simplified: find EFFECT nodes and their configs + effects_per_source = [] + + for node_id, path in self.sources.items(): + if path.suffix.lower() not in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + continue + + # Find effects that depend on this source + # This is simplified - real implementation would trace the DAG + effects = [] + + for node in self.compiled.nodes: + if node.get("type") == "EFFECT": + config = node.get("config", {}) + effect_name = config.get("effect") + if effect_name and effect_name in self.effects_registry: + effect_config = { + "effect": effect_name, + "effect_path": str(self.effects_registry[effect_name]), + } + # Copy only effect params (filter out internal fields) + internal_fields = ( + "effect", "effect_path", "cid", "effect_cid", + "effects_registry", "analysis_refs", "inputs", + ) + for k, v in config.items(): + if k not in internal_fields: + effect_config[k] = v + effects.append(effect_config) + break # One effect per source for now + + effects_per_source.append(effects) + + return effects_per_source + + def _extract_compositor_config(self, analysis_data: Dict) -> Dict: + """Extract compositor configuration.""" + # Look for blend_multi or similar composition nodes + for node in self.compiled.nodes: + if node.get("type") == "EFFECT": + config = node.get("config", {}) + if config.get("effect") == "blend_multi": + return { + "mode": config.get("mode", "alpha"), + "weights": config.get("weights", []), + } + + # Default: equal blend + n_sources = len([p for p in self.sources.values() + if p.suffix.lower() in ('.mp4', '.webm', '.mov', '.avi', '.mkv')]) + return { + "mode": "alpha", + "weights": [1.0 / n_sources] * n_sources if n_sources > 0 else [1.0], + } + + def run( + self, + output: str = "preview", + duration: float = None, + fps: float = None, + ): + """ + Run the recipe through streaming compositor. + + Everything streams: video frames read on-demand, audio analyzed in real-time. + No pre-computation. + + Args: + output: "preview", filename, or Output object + duration: Duration in seconds (default: audio duration) + fps: Frame rate (default from recipe, or 30) + """ + # Build compositor with recipe executor for full pipeline + from .recipe_executor import StreamingRecipeExecutor + + compositor = self.build_compositor(analysis_data={}, fps=fps) + + # Use audio duration if not specified + if duration is None: + if compositor._audio_analyzer: + duration = compositor._audio_analyzer.duration + print(f"Using audio duration: {duration:.1f}s", file=sys.stderr) + else: + # Live mode - run until quit + print("Live mode - press 'q' to quit", file=sys.stderr) + + # Create sexp executor that interprets the recipe + from .sexp_executor import SexpStreamingExecutor + executor = SexpStreamingExecutor(self.compiled, seed=42) + + compositor.run(output=output, duration=duration, recipe_executor=executor) + + +def run_recipe( + recipe_path: str, + output: str = "preview", + duration: float = None, + params: Dict = None, + fps: float = None, +): + """ + Run a recipe through streaming compositor. + + Everything streams in real-time: video frames, audio analysis. + No pre-computation - starts immediately. + + Example: + run_recipe("effects/quick_test.sexp", output="preview", duration=30) + run_recipe("effects/quick_test.sexp", output="preview", fps=5) # Lower fps for slow systems + """ + adapter = RecipeAdapter(recipe_path, params=params) + adapter.run(output=output, duration=duration, fps=fps) + + +def run_recipe_piped( + recipe_path: str, + duration: float = None, + params: Dict = None, + fps: float = None, +): + """ + Run recipe and pipe directly to mpv. + """ + from .output import PipeOutput + + adapter = RecipeAdapter(recipe_path, params=params) + compositor = adapter.build_compositor(analysis_data={}, fps=fps) + + # Get frame size + if compositor.sources: + first_source = compositor.sources[0] + w, h = first_source._size + else: + w, h = 720, 720 + + actual_fps = fps or adapter.compiled.encoding.get('fps', 30) + + # Create pipe output + pipe_out = PipeOutput( + size=(w, h), + fps=actual_fps, + audio_source=compositor._audio_source + ) + + # Create executor + from .sexp_executor import SexpStreamingExecutor + executor = SexpStreamingExecutor(adapter.compiled, seed=42) + + # Run with pipe output + compositor.run(output=pipe_out, duration=duration, recipe_executor=executor) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor") + parser.add_argument("recipe", help="Path to .sexp recipe file") + parser.add_argument("-o", "--output", default="pipe", + help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)") + parser.add_argument("-d", "--duration", type=float, default=None, + help="Duration in seconds (default: audio duration)") + parser.add_argument("--fps", type=float, default=None, + help="Frame rate (default: from recipe)") + args = parser.parse_args() + + if args.output == "pipe": + run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps) + else: + run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps) diff --git a/streaming/recipe_executor.py b/streaming/recipe_executor.py new file mode 100644 index 0000000..678d9f6 --- /dev/null +++ b/streaming/recipe_executor.py @@ -0,0 +1,415 @@ +""" +Streaming recipe executor. + +Implements the full recipe logic for real-time streaming: +- Scans (state machines that evolve on beats) +- Process-pair template (two clips with sporadic effects, blended) +- Cycle-crossfade (dynamic composition cycling through video pairs) +""" + +import random +import numpy as np +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + + +@dataclass +class ScanState: + """State for a scan (beat-driven state machine).""" + value: Any = 0 + rng: random.Random = field(default_factory=random.Random) + + +class StreamingScans: + """ + Real-time scan executor. + + Scans are state machines that evolve on each beat. + They drive effect parameters like invert triggers, hue shifts, etc. + """ + + def __init__(self, seed: int = 42, n_sources: int = 4): + self.master_seed = seed + self.n_sources = n_sources + self.scans: Dict[str, ScanState] = {} + self.beat_count = 0 + self.current_time = 0.0 + self.last_beat_time = 0.0 + self._init_scans() + + def _init_scans(self): + """Initialize all scans with their own RNG seeds.""" + scan_names = [] + + # Per-pair scans (dynamic based on n_sources) + for i in range(self.n_sources): + scan_names.extend([ + f"inv_a_{i}", f"inv_b_{i}", f"hue_a_{i}", f"hue_b_{i}", + f"ascii_a_{i}", f"ascii_b_{i}", f"pair_mix_{i}", f"pair_rot_{i}", + ]) + + # Global scans + scan_names.extend(["whole_spin", "ripple_gate", "cycle"]) + + for i, name in enumerate(scan_names): + rng = random.Random(self.master_seed + i) + self.scans[name] = ScanState(value=self._init_value(name), rng=rng) + + def _init_value(self, name: str) -> Any: + """Get initial value for a scan.""" + if name.startswith("inv_") or name.startswith("ascii_"): + return 0 # Counter for remaining beats + elif name.startswith("hue_"): + return {"rem": 0, "hue": 0} + elif name.startswith("pair_mix"): + return {"rem": 0, "opacity": 0.5} + elif name.startswith("pair_rot"): + pair_idx = int(name.split("_")[-1]) + rot_dir = 1 if pair_idx % 2 == 0 else -1 + return {"beat": 0, "clen": 25, "dir": rot_dir, "angle": 0} + elif name == "whole_spin": + return { + "phase": 0, # 0 = waiting, 1 = spinning + "beat": 0, # beats into current phase + "plen": 20, # beats in this phase + "dir": 1, # spin direction + "total_angle": 0.0, # cumulative angle after all spins + "spin_start_angle": 0.0, # angle when current spin started + "spin_start_time": 0.0, # time when current spin started + "spin_end_time": 0.0, # estimated time when spin ends + } + elif name == "ripple_gate": + return {"rem": 0, "cx": 0.5, "cy": 0.5} + elif name == "cycle": + return {"cycle": 0, "beat": 0, "clen": 60} + return 0 + + def on_beat(self): + """Update all scans on a beat.""" + self.beat_count += 1 + # Estimate beat interval from last two beats + beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5 + self.last_beat_time = self.current_time + + for name, state in self.scans.items(): + state.value = self._step_scan(name, state.value, state.rng, beat_interval) + + def _step_scan(self, name: str, value: Any, rng: random.Random, beat_interval: float = 0.5) -> Any: + """Step a scan forward by one beat.""" + + # Invert scan: 10% chance, lasts 1-5 beats + if name.startswith("inv_"): + if value > 0: + return value - 1 + elif rng.random() < 0.1: + return rng.randint(1, 5) + return 0 + + # Hue scan: 10% chance, random hue 30-330, lasts 1-5 beats + elif name.startswith("hue_"): + if value["rem"] > 0: + return {"rem": value["rem"] - 1, "hue": value["hue"]} + elif rng.random() < 0.1: + return {"rem": rng.randint(1, 5), "hue": rng.uniform(30, 330)} + return {"rem": 0, "hue": 0} + + # ASCII scan: 5% chance, lasts 1-3 beats + elif name.startswith("ascii_"): + if value > 0: + return value - 1 + elif rng.random() < 0.05: + return rng.randint(1, 3) + return 0 + + # Pair mix: changes every 1-11 beats + elif name.startswith("pair_mix"): + if value["rem"] > 0: + return {"rem": value["rem"] - 1, "opacity": value["opacity"]} + return {"rem": rng.randint(1, 11), "opacity": rng.choice([0, 0.5, 1.0])} + + # Pair rotation: full rotation every 20-30 beats + elif name.startswith("pair_rot"): + beat = value["beat"] + clen = value["clen"] + dir_ = value["dir"] + angle = value["angle"] + + if beat + 1 < clen: + new_angle = angle + dir_ * (360 / clen) + return {"beat": beat + 1, "clen": clen, "dir": dir_, "angle": new_angle} + else: + return {"beat": 0, "clen": rng.randint(20, 30), "dir": -dir_, "angle": angle} + + # Whole spin: sporadic 720 degree spins (cumulative - stays rotated) + elif name == "whole_spin": + phase = value["phase"] + beat = value["beat"] + plen = value["plen"] + dir_ = value["dir"] + total_angle = value.get("total_angle", 0.0) + spin_start_angle = value.get("spin_start_angle", 0.0) + spin_start_time = value.get("spin_start_time", 0.0) + spin_end_time = value.get("spin_end_time", 0.0) + + if phase == 1: + # Currently spinning + if beat + 1 < plen: + return { + "phase": 1, "beat": beat + 1, "plen": plen, "dir": dir_, + "total_angle": total_angle, + "spin_start_angle": spin_start_angle, + "spin_start_time": spin_start_time, + "spin_end_time": spin_end_time, + } + else: + # Spin complete - update total_angle with final spin + new_total = spin_start_angle + dir_ * 720.0 + return { + "phase": 0, "beat": 0, "plen": rng.randint(20, 40), "dir": dir_, + "total_angle": new_total, + "spin_start_angle": new_total, + "spin_start_time": self.current_time, + "spin_end_time": self.current_time, + } + else: + # Waiting phase + if beat + 1 < plen: + return { + "phase": 0, "beat": beat + 1, "plen": plen, "dir": dir_, + "total_angle": total_angle, + "spin_start_angle": spin_start_angle, + "spin_start_time": spin_start_time, + "spin_end_time": spin_end_time, + } + else: + # Start new spin + new_dir = 1 if rng.random() < 0.5 else -1 + new_plen = rng.randint(10, 25) + spin_duration = new_plen * beat_interval + return { + "phase": 1, "beat": 0, "plen": new_plen, "dir": new_dir, + "total_angle": total_angle, + "spin_start_angle": total_angle, + "spin_start_time": self.current_time, + "spin_end_time": self.current_time + spin_duration, + } + + # Ripple gate: 5% chance, lasts 1-20 beats + elif name == "ripple_gate": + if value["rem"] > 0: + return {"rem": value["rem"] - 1, "cx": value["cx"], "cy": value["cy"]} + elif rng.random() < 0.05: + return {"rem": rng.randint(1, 20), + "cx": rng.uniform(0.1, 0.9), + "cy": rng.uniform(0.1, 0.9)} + return {"rem": 0, "cx": 0.5, "cy": 0.5} + + # Cycle: track which video pair is active + elif name == "cycle": + beat = value["beat"] + clen = value["clen"] + cycle = value["cycle"] + + if beat + 1 < clen: + return {"cycle": cycle, "beat": beat + 1, "clen": clen} + else: + # Move to next pair, vary cycle length + return {"cycle": (cycle + 1) % 4, "beat": 0, + "clen": 40 + (self.beat_count * 7) % 41} + + return value + + def get_emit(self, name: str) -> float: + """Get emitted value for a scan.""" + value = self.scans[name].value + + if name.startswith("inv_") or name.startswith("ascii_"): + return 1.0 if value > 0 else 0.0 + + elif name.startswith("hue_"): + return value["hue"] if value["rem"] > 0 else 0.0 + + elif name.startswith("pair_mix"): + return value["opacity"] + + elif name.startswith("pair_rot"): + return value["angle"] + + elif name == "whole_spin": + # Smooth time-based interpolation during spin + phase = value.get("phase", 0) + if phase == 1: + # Currently spinning - interpolate based on time + spin_start_time = value.get("spin_start_time", 0.0) + spin_end_time = value.get("spin_end_time", spin_start_time + 1.0) + spin_start_angle = value.get("spin_start_angle", 0.0) + dir_ = value.get("dir", 1) + + duration = spin_end_time - spin_start_time + if duration > 0: + progress = (self.current_time - spin_start_time) / duration + progress = max(0.0, min(1.0, progress)) # clamp to 0-1 + else: + progress = 1.0 + + return spin_start_angle + progress * 720.0 * dir_ + else: + # Not spinning - return cumulative angle + return value.get("total_angle", 0.0) + + elif name == "ripple_gate": + return 1.0 if value["rem"] > 0 else 0.0 + + elif name == "cycle": + return value + + return 0.0 + + +class StreamingRecipeExecutor: + """ + Executes a recipe in streaming mode. + + Implements: + - process-pair: two video clips with opposite effects, blended + - cycle-crossfade: dynamic cycling through video pairs + - Final effects: whole-spin rotation, ripple + """ + + def __init__(self, n_sources: int = 4, seed: int = 42): + self.n_sources = n_sources + self.scans = StreamingScans(seed, n_sources=n_sources) + self.last_beat_detected = False + self.current_time = 0.0 + + def on_frame(self, energy: float, is_beat: bool, t: float = 0.0): + """Called each frame with current audio analysis.""" + self.current_time = t + self.scans.current_time = t + # Update scans on beat + if is_beat and not self.last_beat_detected: + self.scans.on_beat() + self.last_beat_detected = is_beat + + def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict: + """ + Get effect parameters for a source clip. + + Args: + source_idx: Which video source (0-3) + clip: "a" or "b" (each source has two clips) + energy: Current audio energy (0-1) + """ + suffix = f"_{source_idx}" + + # Rotation ranges alternate + if source_idx % 2 == 0: + rot_range = [0, 45] if clip == "a" else [0, -45] + zoom_range = [1, 1.5] if clip == "a" else [1, 0.5] + else: + rot_range = [0, -45] if clip == "a" else [0, 45] + zoom_range = [1, 0.5] if clip == "a" else [1, 1.5] + + return { + "rotate_angle": rot_range[0] + energy * (rot_range[1] - rot_range[0]), + "zoom_amount": zoom_range[0] + energy * (zoom_range[1] - zoom_range[0]), + "invert_amount": self.scans.get_emit(f"inv_{clip}{suffix}"), + "hue_degrees": self.scans.get_emit(f"hue_{clip}{suffix}"), + "ascii_mix": 0, # Disabled - too slow without GPU + "ascii_char_size": 4 + energy * 28, # 4-32 + } + + def get_pair_params(self, source_idx: int) -> Dict: + """Get blend and rotation params for a video pair.""" + suffix = f"_{source_idx}" + return { + "blend_opacity": self.scans.get_emit(f"pair_mix{suffix}"), + "pair_rotation": self.scans.get_emit(f"pair_rot{suffix}"), + } + + def get_cycle_weights(self) -> List[float]: + """Get blend weights for cycle-crossfade composition.""" + cycle_state = self.scans.get_emit("cycle") + active = cycle_state["cycle"] + beat = cycle_state["beat"] + clen = cycle_state["clen"] + n = self.n_sources + + phase3 = beat * 3 + weights = [] + + for p in range(n): + prev = (p + n - 1) % n + + if active == p: + if phase3 < clen: + w = 0.9 + elif phase3 < clen * 2: + w = 0.9 - ((phase3 - clen) / clen) * 0.85 + else: + w = 0.05 + elif active == prev: + if phase3 < clen: + w = 0.05 + elif phase3 < clen * 2: + w = 0.05 + ((phase3 - clen) / clen) * 0.85 + else: + w = 0.9 + else: + w = 0.05 + + weights.append(w) + + # Normalize + total = sum(weights) + if total > 0: + weights = [w / total for w in weights] + + return weights + + def get_cycle_zooms(self) -> List[float]: + """Get zoom amounts for cycle-crossfade.""" + cycle_state = self.scans.get_emit("cycle") + active = cycle_state["cycle"] + beat = cycle_state["beat"] + clen = cycle_state["clen"] + n = self.n_sources + + phase3 = beat * 3 + zooms = [] + + for p in range(n): + prev = (p + n - 1) % n + + if active == p: + if phase3 < clen: + z = 1.0 + elif phase3 < clen * 2: + z = 1.0 + ((phase3 - clen) / clen) * 1.0 + else: + z = 0.1 + elif active == prev: + if phase3 < clen: + z = 3.0 # Start big + elif phase3 < clen * 2: + z = 3.0 - ((phase3 - clen) / clen) * 2.0 # Shrink to 1.0 + else: + z = 1.0 + else: + z = 0.1 + + zooms.append(z) + + return zooms + + def get_final_effects(self, energy: float) -> Dict: + """Get final composition effects (whole-spin, ripple).""" + ripple_gate = self.scans.get_emit("ripple_gate") + ripple_state = self.scans.scans["ripple_gate"].value + + return { + "whole_spin_angle": self.scans.get_emit("whole_spin"), + "ripple_amplitude": ripple_gate * (5 + energy * 45), # 5-50 + "ripple_cx": ripple_state["cx"], + "ripple_cy": ripple_state["cy"], + } diff --git a/streaming/sexp_executor.py b/streaming/sexp_executor.py new file mode 100644 index 0000000..0151853 --- /dev/null +++ b/streaming/sexp_executor.py @@ -0,0 +1,678 @@ +""" +Streaming S-expression executor. + +Executes compiled sexp recipes in real-time by: +- Evaluating scan expressions on each beat +- Resolving bindings to get effect parameter values +- Applying effects frame-by-frame +- Evaluating SLICE_ON Lambda for cycle crossfade +""" + +import random +import numpy as np +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + +from .sexp_interp import SexpInterpreter, eval_slice_on_lambda + + +@dataclass +class ScanState: + """Runtime state for a scan.""" + node_id: str + name: Optional[str] + value: Any + rng: random.Random + init_expr: dict + step_expr: dict + emit_expr: dict + + +class ExprEvaluator: + """ + Evaluates compiled expression ASTs. + + Expressions are dicts with: + - _expr: True (marks as expression) + - op: operation name + - args: list of arguments + - name: for 'var' ops + - keys: for 'dict' ops + """ + + def __init__(self, rng: random.Random = None): + self.rng = rng or random.Random() + + def eval(self, expr: Any, env: Dict[str, Any]) -> Any: + """Evaluate an expression in the given environment.""" + # Literal values + if not isinstance(expr, dict): + return expr + + # Check if it's an expression + if not expr.get('_expr'): + # It's a plain dict - return as-is + return expr + + op = expr.get('op') + args = expr.get('args', []) + + # Evaluate based on operation + if op == 'var': + name = expr.get('name') + if name in env: + return env[name] + raise KeyError(f"Unknown variable: {name}") + + elif op == 'dict': + keys = expr.get('keys', []) + values = [self.eval(a, env) for a in args] + return dict(zip(keys, values)) + + elif op == 'get': + obj = self.eval(args[0], env) + key = args[1] + return obj.get(key) if isinstance(obj, dict) else obj[key] + + elif op == 'if': + cond = self.eval(args[0], env) + if cond: + return self.eval(args[1], env) + elif len(args) > 2: + return self.eval(args[2], env) + return None + + # Comparison ops + elif op == '<': + return self.eval(args[0], env) < self.eval(args[1], env) + elif op == '>': + return self.eval(args[0], env) > self.eval(args[1], env) + elif op == '<=': + return self.eval(args[0], env) <= self.eval(args[1], env) + elif op == '>=': + return self.eval(args[0], env) >= self.eval(args[1], env) + elif op == '=': + return self.eval(args[0], env) == self.eval(args[1], env) + elif op == '!=': + return self.eval(args[0], env) != self.eval(args[1], env) + + # Arithmetic ops + elif op == '+': + return self.eval(args[0], env) + self.eval(args[1], env) + elif op == '-': + return self.eval(args[0], env) - self.eval(args[1], env) + elif op == '*': + return self.eval(args[0], env) * self.eval(args[1], env) + elif op == '/': + return self.eval(args[0], env) / self.eval(args[1], env) + elif op == 'mod': + return self.eval(args[0], env) % self.eval(args[1], env) + + # Random ops + elif op == 'rand': + return self.rng.random() + elif op == 'rand-int': + lo = self.eval(args[0], env) + hi = self.eval(args[1], env) + return self.rng.randint(lo, hi) + elif op == 'rand-range': + lo = self.eval(args[0], env) + hi = self.eval(args[1], env) + return self.rng.uniform(lo, hi) + + # Logic ops + elif op == 'and': + return all(self.eval(a, env) for a in args) + elif op == 'or': + return any(self.eval(a, env) for a in args) + elif op == 'not': + return not self.eval(args[0], env) + + else: + raise ValueError(f"Unknown operation: {op}") + + +class SexpStreamingExecutor: + """ + Executes a compiled sexp recipe in streaming mode. + + Reads scan definitions, effect chains, and bindings from the + compiled recipe and executes them frame-by-frame. + """ + + def __init__(self, compiled_recipe, seed: int = 42): + self.recipe = compiled_recipe + self.master_seed = seed + + # Build node lookup + self.nodes = {n['id']: n for n in compiled_recipe.nodes} + + # State (must be initialized before _init_scans) + self.beat_count = 0 + self.current_time = 0.0 + self.last_beat_time = 0.0 + self.last_beat_detected = False + self.energy = 0.0 + + # Initialize scans + self.scans: Dict[str, ScanState] = {} + self.scan_outputs: Dict[str, Any] = {} # Current emit values by node_id + self._init_scans() + + # Initialize SLICE_ON interpreter + self.sexp_interp = SexpInterpreter(random.Random(seed)) + self._slice_on_lambda = None + self._slice_on_acc = None + self._slice_on_result = None # Last evaluation result {layers, compose, acc} + self._init_slice_on() + + def _init_slice_on(self): + """Initialize SLICE_ON Lambda for cycle crossfade.""" + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + config = node.get('config', {}) + self._slice_on_lambda = config.get('fn') + init = config.get('init', {}) + self._slice_on_acc = { + 'cycle': init.get('cycle', 0), + 'beat': init.get('beat', 0), + 'clen': init.get('clen', 60), + } + # Evaluate initial state + self._eval_slice_on() + break + + def _eval_slice_on(self): + """Evaluate the SLICE_ON Lambda with current state.""" + if not self._slice_on_lambda: + return + + n = len(self._get_video_sources()) + videos = list(range(n)) # Placeholder video indices + + try: + result = eval_slice_on_lambda( + self._slice_on_lambda, + self._slice_on_acc, + self.beat_count, + 0.0, # start time (not used for weights) + 1.0, # end time (not used for weights) + videos, + self.sexp_interp, + ) + self._slice_on_result = result + # Update accumulator for next beat + if 'acc' in result: + self._slice_on_acc = result['acc'] + except Exception as e: + import sys + print(f"SLICE_ON eval error: {e}", file=sys.stderr) + + def _init_scans(self): + """Initialize all scan nodes from the recipe.""" + seed_offset = 0 + for node in self.recipe.nodes: + if node.get('type') == 'SCAN': + node_id = node['id'] + config = node.get('config', {}) + + # Create RNG with unique seed + scan_seed = config.get('seed', self.master_seed + seed_offset) + rng = random.Random(scan_seed) + seed_offset += 1 + + # Evaluate initial value + init_expr = config.get('init', 0) + evaluator = ExprEvaluator(rng) + init_value = evaluator.eval(init_expr, {}) + + self.scans[node_id] = ScanState( + node_id=node_id, + name=node.get('name'), + value=init_value, + rng=rng, + init_expr=init_expr, + step_expr=config.get('step_expr', {}), + emit_expr=config.get('emit_expr', {}), + ) + + # Compute initial emit + self._update_emit(node_id) + + def _update_emit(self, node_id: str): + """Update the emit value for a scan.""" + scan = self.scans[node_id] + evaluator = ExprEvaluator(scan.rng) + + # Build environment from current state + env = self._build_scan_env(scan) + + # Evaluate emit expression + emit_value = evaluator.eval(scan.emit_expr, env) + self.scan_outputs[node_id] = emit_value + + def _build_scan_env(self, scan: ScanState) -> Dict[str, Any]: + """Build environment for scan expression evaluation.""" + env = {} + + # Add state variables + if isinstance(scan.value, dict): + env.update(scan.value) + else: + env['acc'] = scan.value + + # Add beat count + env['beat_count'] = self.beat_count + env['time'] = self.current_time + + return env + + def on_beat(self): + """Update all scans on a beat.""" + self.beat_count += 1 + + # Estimate beat interval + beat_interval = self.current_time - self.last_beat_time if self.last_beat_time > 0 else 0.5 + self.last_beat_time = self.current_time + + # Step each scan + for node_id, scan in self.scans.items(): + evaluator = ExprEvaluator(scan.rng) + env = self._build_scan_env(scan) + + # Evaluate step expression + new_value = evaluator.eval(scan.step_expr, env) + scan.value = new_value + + # Update emit + self._update_emit(node_id) + + # Step the cycle state + self._step_cycle() + + def on_frame(self, energy: float, is_beat: bool, t: float = 0.0): + """Called each frame with audio analysis.""" + self.current_time = t + self.energy = energy + + # Update scans on beat (edge detection) + if is_beat and not self.last_beat_detected: + self.on_beat() + self.last_beat_detected = is_beat + + def resolve_binding(self, binding: dict) -> Any: + """Resolve a binding to get the current value.""" + if not isinstance(binding, dict) or not binding.get('_binding'): + return binding + + source_id = binding.get('source') + feature = binding.get('feature', 'values') + range_map = binding.get('range') + + # Get the raw value + if source_id in self.scan_outputs: + value = self.scan_outputs[source_id] + else: + # Might be an analyzer reference - use energy as fallback + value = self.energy + + # Extract feature if value is a dict + if isinstance(value, dict) and feature in value: + value = value[feature] + + # Apply range mapping + if range_map and isinstance(value, (int, float)): + lo, hi = range_map + value = lo + value * (hi - lo) + + return value + + def get_effect_params(self, effect_node: dict) -> Dict[str, Any]: + """Get resolved parameters for an effect node.""" + config = effect_node.get('config', {}) + params = {} + + for key, value in config.items(): + # Skip internal fields + if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', 'analysis_refs'): + continue + + # Resolve bindings + params[key] = self.resolve_binding(value) + + return params + + def get_scan_value(self, name: str) -> Any: + """Get scan output by name.""" + for node_id, scan in self.scans.items(): + if scan.name == name: + return self.scan_outputs.get(node_id) + return None + + def get_all_scan_values(self) -> Dict[str, Any]: + """Get all named scan outputs.""" + result = {} + for node_id, scan in self.scans.items(): + if scan.name: + result[scan.name] = self.scan_outputs.get(node_id) + return result + + # === Compositor interface methods === + + def _get_video_sources(self) -> List[str]: + """Get list of video source node IDs.""" + sources = [] + for node in self.recipe.nodes: + if node.get('type') == 'SOURCE': + sources.append(node['id']) + # Filter to video only (exclude audio - last one is usually audio) + # Look at file extensions in the paths + return sources[:-1] if len(sources) > 1 else sources + + def _trace_effect_chain(self, start_id: str, stop_at_blend: bool = True) -> List[dict]: + """Trace effect chain from a node, returning effects in order.""" + chain = [] + current_id = start_id + + for _ in range(20): # Max depth + # Find node that uses current as input + next_node = None + for node in self.recipe.nodes: + if current_id in node.get('inputs', []): + if node.get('type') == 'EFFECT': + effect_type = node.get('config', {}).get('effect') + chain.append(node) + if stop_at_blend and effect_type == 'blend': + return chain + next_node = node + break + elif node.get('type') == 'SEGMENT': + next_node = node + break + + if next_node is None: + break + current_id = next_node['id'] + + return chain + + def _find_clip_chains(self, source_idx: int) -> tuple: + """Find effect chains for clip A and B from a source.""" + sources = self._get_video_sources() + if source_idx >= len(sources): + return [], [] + + source_id = sources[source_idx] + + # Find SEGMENT node + segment_id = None + for node in self.recipe.nodes: + if node.get('type') == 'SEGMENT' and source_id in node.get('inputs', []): + segment_id = node['id'] + break + + if not segment_id: + return [], [] + + # Find the two effect chains from segment (clip A and clip B) + chains = [] + for node in self.recipe.nodes: + if segment_id in node.get('inputs', []) and node.get('type') == 'EFFECT': + chain = self._trace_effect_chain(segment_id) + # Get chain starting from this specific branch + branch_chain = [node] + current = node['id'] + for _ in range(10): + found = False + for n in self.recipe.nodes: + if current in n.get('inputs', []) and n.get('type') == 'EFFECT': + branch_chain.append(n) + if n.get('config', {}).get('effect') == 'blend': + break + current = n['id'] + found = True + break + if not found: + break + chains.append(branch_chain) + + # Return first two chains as A and B + chain_a = chains[0] if len(chains) > 0 else [] + chain_b = chains[1] if len(chains) > 1 else [] + return chain_a, chain_b + + def get_effect_params(self, source_idx: int, clip: str, energy: float) -> Dict: + """Get effect parameters for a source clip (compositor interface).""" + # Get the correct chain for this clip + chain_a, chain_b = self._find_clip_chains(source_idx) + chain = chain_a if clip == 'a' else chain_b + + # Default params + params = { + "rotate_angle": 0, + "zoom_amount": 1.0, + "invert_amount": 0, + "hue_degrees": 0, + "ascii_mix": 0, + "ascii_char_size": 8, + } + + # Resolve from effects in chain + for eff in chain: + config = eff.get('config', {}) + effect_type = config.get('effect') + + if effect_type == 'rotate': + angle_binding = config.get('angle') + if angle_binding: + if isinstance(angle_binding, dict) and angle_binding.get('_binding'): + # Bound to analyzer - use energy with range + range_map = angle_binding.get('range') + if range_map: + lo, hi = range_map + params["rotate_angle"] = lo + energy * (hi - lo) + else: + params["rotate_angle"] = self.resolve_binding(angle_binding) + else: + params["rotate_angle"] = angle_binding if isinstance(angle_binding, (int, float)) else 0 + + elif effect_type == 'zoom': + amount_binding = config.get('amount') + if amount_binding: + if isinstance(amount_binding, dict) and amount_binding.get('_binding'): + range_map = amount_binding.get('range') + if range_map: + lo, hi = range_map + params["zoom_amount"] = lo + energy * (hi - lo) + else: + params["zoom_amount"] = self.resolve_binding(amount_binding) + else: + params["zoom_amount"] = amount_binding if isinstance(amount_binding, (int, float)) else 1.0 + + elif effect_type == 'invert': + amount_binding = config.get('amount') + if amount_binding: + val = self.resolve_binding(amount_binding) + params["invert_amount"] = val if isinstance(val, (int, float)) else 0 + + elif effect_type == 'hue_shift': + deg_binding = config.get('degrees') + if deg_binding: + val = self.resolve_binding(deg_binding) + params["hue_degrees"] = val if isinstance(val, (int, float)) else 0 + + elif effect_type == 'ascii_art': + mix_binding = config.get('mix') + if mix_binding: + val = self.resolve_binding(mix_binding) + params["ascii_mix"] = val if isinstance(val, (int, float)) else 0 + size_binding = config.get('char_size') + if size_binding: + if isinstance(size_binding, dict) and size_binding.get('_binding'): + range_map = size_binding.get('range') + if range_map: + lo, hi = range_map + params["ascii_char_size"] = lo + energy * (hi - lo) + + return params + + def get_pair_params(self, source_idx: int) -> Dict: + """Get blend and rotation params for a video pair (compositor interface).""" + params = { + "blend_opacity": 0.5, + "pair_rotation": 0, + } + + # Find the blend node for this source + chain_a, _ = self._find_clip_chains(source_idx) + + # The last effect in chain_a should be the blend + blend_node = None + for eff in reversed(chain_a): + if eff.get('config', {}).get('effect') == 'blend': + blend_node = eff + break + + if blend_node: + config = blend_node.get('config', {}) + opacity_binding = config.get('opacity') + if opacity_binding: + val = self.resolve_binding(opacity_binding) + if isinstance(val, (int, float)): + params["blend_opacity"] = val + + # Find rotate after blend (pair rotation) + blend_id = blend_node['id'] + for node in self.recipe.nodes: + if blend_id in node.get('inputs', []) and node.get('type') == 'EFFECT': + if node.get('config', {}).get('effect') == 'rotate': + angle_binding = node.get('config', {}).get('angle') + if angle_binding: + val = self.resolve_binding(angle_binding) + if isinstance(val, (int, float)): + params["pair_rotation"] = val + break + + return params + + def _get_cycle_state(self) -> dict: + """Get current cycle state from SLICE_ON or internal tracking.""" + if not hasattr(self, '_cycle_state'): + # Initialize from SLICE_ON node + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + init = node.get('config', {}).get('init', {}) + self._cycle_state = { + 'cycle': init.get('cycle', 0), + 'beat': init.get('beat', 0), + 'clen': init.get('clen', 60), + } + break + else: + self._cycle_state = {'cycle': 0, 'beat': 0, 'clen': 60} + + return self._cycle_state + + def _step_cycle(self): + """Step the cycle state forward on beat by evaluating SLICE_ON Lambda.""" + # Use interpreter to evaluate the Lambda + self._eval_slice_on() + + def get_cycle_weights(self) -> List[float]: + """Get blend weights for cycle-crossfade from SLICE_ON result.""" + n = len(self._get_video_sources()) + if n == 0: + return [1.0] + + # Get weights from interpreted result + if self._slice_on_result: + compose = self._slice_on_result.get('compose', {}) + weights = compose.get('weights', []) + if weights and len(weights) == n: + # Normalize + total = sum(weights) + if total > 0: + return [w / total for w in weights] + + # Fallback: equal weights + return [1.0 / n] * n + + def get_cycle_zooms(self) -> List[float]: + """Get zoom amounts for cycle-crossfade from SLICE_ON result.""" + n = len(self._get_video_sources()) + if n == 0: + return [1.0] + + # Get zooms from interpreted result (layers -> effects -> zoom amount) + if self._slice_on_result: + layers = self._slice_on_result.get('layers', []) + if layers and len(layers) == n: + zooms = [] + for layer in layers: + effects = layer.get('effects', []) + zoom_amt = 1.0 + for eff in effects: + if eff.get('effect') == 'zoom' or (hasattr(eff.get('effect'), 'name') and eff.get('effect').name == 'zoom'): + zoom_amt = eff.get('amount', 1.0) + break + zooms.append(zoom_amt) + return zooms + + # Fallback + return [1.0] * n + + def _get_final_rotate_scan_id(self) -> str: + """Find the scan ID that drives the final rotation (after SLICE_ON).""" + if hasattr(self, '_final_rotate_scan_id'): + return self._final_rotate_scan_id + + # Find SLICE_ON node index + slice_on_idx = None + for i, node in enumerate(self.recipe.nodes): + if node.get('type') == 'SLICE_ON': + slice_on_idx = i + break + + # Find rotate effect after SLICE_ON + if slice_on_idx is not None: + for node in self.recipe.nodes[slice_on_idx + 1:]: + if node.get('type') == 'EFFECT': + config = node.get('config', {}) + if config.get('effect') == 'rotate': + angle_binding = config.get('angle', {}) + if isinstance(angle_binding, dict) and angle_binding.get('_binding'): + self._final_rotate_scan_id = angle_binding.get('source') + return self._final_rotate_scan_id + + self._final_rotate_scan_id = None + return None + + def get_final_effects(self, energy: float) -> Dict: + """Get final composition effects (compositor interface).""" + # Get named scans + scan_values = self.get_all_scan_values() + + # Whole spin - get from the specific scan bound to final rotate effect + whole_spin = 0 + final_rotate_scan_id = self._get_final_rotate_scan_id() + if final_rotate_scan_id and final_rotate_scan_id in self.scan_outputs: + val = self.scan_outputs[final_rotate_scan_id] + if isinstance(val, dict) and 'angle' in val: + whole_spin = val['angle'] + elif isinstance(val, (int, float)): + whole_spin = val + + # Ripple + ripple_gate = scan_values.get('ripple-gate', 0) + ripple_cx = scan_values.get('ripple-cx', 0.5) + ripple_cy = scan_values.get('ripple-cy', 0.5) + + if isinstance(ripple_gate, dict): + ripple_gate = ripple_gate.get('gate', 0) if 'gate' in ripple_gate else 1 + + return { + "whole_spin_angle": whole_spin, + "ripple_amplitude": ripple_gate * (5 + energy * 45), + "ripple_cx": ripple_cx if isinstance(ripple_cx, (int, float)) else 0.5, + "ripple_cy": ripple_cy if isinstance(ripple_cy, (int, float)) else 0.5, + } diff --git a/streaming/sexp_interp.py b/streaming/sexp_interp.py new file mode 100644 index 0000000..e3433b2 --- /dev/null +++ b/streaming/sexp_interp.py @@ -0,0 +1,376 @@ +""" +S-expression interpreter for streaming execution. + +Evaluates sexp expressions including: +- let bindings +- lambda definitions and calls +- Arithmetic, comparison, logic operators +- dict/list operations +- Random number generation +""" + +import random +from typing import Any, Dict, List, Callable +from dataclasses import dataclass + + +@dataclass +class Lambda: + """Runtime lambda value.""" + params: List[str] + body: Any + closure: Dict[str, Any] + + +class Symbol: + """Symbol reference.""" + def __init__(self, name: str): + self.name = name + + def __repr__(self): + return f"Symbol({self.name})" + + +class SexpInterpreter: + """ + Interprets S-expressions in real-time. + + Handles the full sexp language used in recipes. + """ + + def __init__(self, rng: random.Random = None): + self.rng = rng or random.Random() + self.globals: Dict[str, Any] = {} + + def eval(self, expr: Any, env: Dict[str, Any] = None) -> Any: + """Evaluate an expression in the given environment.""" + if env is None: + env = {} + + # Literals + if isinstance(expr, (int, float, str, bool)) or expr is None: + return expr + + # Symbol lookup + if isinstance(expr, Symbol) or (hasattr(expr, 'name') and hasattr(expr, '__class__') and expr.__class__.__name__ == 'Symbol'): + name = expr.name if hasattr(expr, 'name') else str(expr) + if name in env: + return env[name] + if name in self.globals: + return self.globals[name] + raise NameError(f"Undefined symbol: {name}") + + # Compiled expression dict (from compiler) + if isinstance(expr, dict): + if expr.get('_expr'): + return self._eval_compiled_expr(expr, env) + # Plain dict - evaluate values that might be expressions + result = {} + for k, v in expr.items(): + # Some keys should keep Symbol values as strings (effect names, modes) + if k in ('effect', 'mode') and hasattr(v, 'name'): + result[k] = v.name + else: + result[k] = self.eval(v, env) + return result + + # List expression (sexp) + if isinstance(expr, (list, tuple)) and len(expr) > 0: + return self._eval_list(expr, env) + + # Empty list + if isinstance(expr, (list, tuple)): + return [] + + return expr + + def _eval_compiled_expr(self, expr: dict, env: Dict[str, Any]) -> Any: + """Evaluate a compiled expression dict.""" + op = expr.get('op') + args = expr.get('args', []) + + if op == 'var': + name = expr.get('name') + if name in env: + return env[name] + if name in self.globals: + return self.globals[name] + raise NameError(f"Undefined: {name}") + + elif op == 'dict': + keys = expr.get('keys', []) + values = [self.eval(a, env) for a in args] + return dict(zip(keys, values)) + + elif op == 'get': + obj = self.eval(args[0], env) + key = args[1] + return obj.get(key) if isinstance(obj, dict) else obj[key] + + elif op == 'if': + cond = self.eval(args[0], env) + if cond: + return self.eval(args[1], env) + elif len(args) > 2: + return self.eval(args[2], env) + return None + + # Comparison + elif op == '<': + return self.eval(args[0], env) < self.eval(args[1], env) + elif op == '>': + return self.eval(args[0], env) > self.eval(args[1], env) + elif op == '<=': + return self.eval(args[0], env) <= self.eval(args[1], env) + elif op == '>=': + return self.eval(args[0], env) >= self.eval(args[1], env) + elif op == '=': + return self.eval(args[0], env) == self.eval(args[1], env) + elif op == '!=': + return self.eval(args[0], env) != self.eval(args[1], env) + + # Arithmetic + elif op == '+': + return self.eval(args[0], env) + self.eval(args[1], env) + elif op == '-': + return self.eval(args[0], env) - self.eval(args[1], env) + elif op == '*': + return self.eval(args[0], env) * self.eval(args[1], env) + elif op == '/': + return self.eval(args[0], env) / self.eval(args[1], env) + elif op == 'mod': + return self.eval(args[0], env) % self.eval(args[1], env) + + # Random + elif op == 'rand': + return self.rng.random() + elif op == 'rand-int': + return self.rng.randint(self.eval(args[0], env), self.eval(args[1], env)) + elif op == 'rand-range': + return self.rng.uniform(self.eval(args[0], env), self.eval(args[1], env)) + + # Logic + elif op == 'and': + return all(self.eval(a, env) for a in args) + elif op == 'or': + return any(self.eval(a, env) for a in args) + elif op == 'not': + return not self.eval(args[0], env) + + else: + raise ValueError(f"Unknown op: {op}") + + def _eval_list(self, expr: list, env: Dict[str, Any]) -> Any: + """Evaluate a list expression (sexp form).""" + if len(expr) == 0: + return [] + + head = expr[0] + + # Get head name + if isinstance(head, Symbol) or (hasattr(head, 'name') and hasattr(head, '__class__')): + head_name = head.name if hasattr(head, 'name') else str(head) + elif isinstance(head, str): + head_name = head + else: + # Not a symbol - check if it's a data list or function call + if isinstance(head, dict): + # List of dicts - evaluate each element as data + return [self.eval(item, env) for item in expr] + # Otherwise evaluate as function call + fn = self.eval(head, env) + args = [self.eval(a, env) for a in expr[1:]] + return self._call(fn, args, env) + + # Special forms + if head_name == 'let': + return self._eval_let(expr, env) + elif head_name in ('lambda', 'fn'): + return self._eval_lambda(expr, env) + elif head_name == 'if': + return self._eval_if(expr, env) + elif head_name == 'dict': + return self._eval_dict(expr, env) + elif head_name == 'get': + obj = self.eval(expr[1], env) + key = self.eval(expr[2], env) if len(expr) > 2 else expr[2] + if isinstance(key, str): + return obj.get(key) if isinstance(obj, dict) else getattr(obj, key, None) + return obj[key] + elif head_name == 'len': + return len(self.eval(expr[1], env)) + elif head_name == 'range': + start = self.eval(expr[1], env) + end = self.eval(expr[2], env) if len(expr) > 2 else start + if len(expr) == 2: + return list(range(end)) + return list(range(start, end)) + elif head_name == 'map': + fn = self.eval(expr[1], env) + lst = self.eval(expr[2], env) + return [self._call(fn, [x], env) for x in lst] + elif head_name == 'mod': + return self.eval(expr[1], env) % self.eval(expr[2], env) + + # Arithmetic + elif head_name == '+': + return self.eval(expr[1], env) + self.eval(expr[2], env) + elif head_name == '-': + if len(expr) == 2: + return -self.eval(expr[1], env) + return self.eval(expr[1], env) - self.eval(expr[2], env) + elif head_name == '*': + return self.eval(expr[1], env) * self.eval(expr[2], env) + elif head_name == '/': + return self.eval(expr[1], env) / self.eval(expr[2], env) + + # Comparison + elif head_name == '<': + return self.eval(expr[1], env) < self.eval(expr[2], env) + elif head_name == '>': + return self.eval(expr[1], env) > self.eval(expr[2], env) + elif head_name == '<=': + return self.eval(expr[1], env) <= self.eval(expr[2], env) + elif head_name == '>=': + return self.eval(expr[1], env) >= self.eval(expr[2], env) + elif head_name == '=': + return self.eval(expr[1], env) == self.eval(expr[2], env) + + # Logic + elif head_name == 'and': + return all(self.eval(a, env) for a in expr[1:]) + elif head_name == 'or': + return any(self.eval(a, env) for a in expr[1:]) + elif head_name == 'not': + return not self.eval(expr[1], env) + + # Function call + else: + fn = env.get(head_name) or self.globals.get(head_name) + if fn is None: + raise NameError(f"Undefined function: {head_name}") + args = [self.eval(a, env) for a in expr[1:]] + return self._call(fn, args, env) + + def _eval_let(self, expr: list, env: Dict[str, Any]) -> Any: + """Evaluate (let [bindings...] body).""" + bindings = expr[1] + body = expr[2] + + # Create new environment with bindings + new_env = dict(env) + + # Process bindings in pairs + i = 0 + while i < len(bindings): + name = bindings[i] + if isinstance(name, Symbol) or hasattr(name, 'name'): + name = name.name if hasattr(name, 'name') else str(name) + value = self.eval(bindings[i + 1], new_env) + new_env[name] = value + i += 2 + + return self.eval(body, new_env) + + def _eval_lambda(self, expr: list, env: Dict[str, Any]) -> Lambda: + """Evaluate (lambda [params] body).""" + params_expr = expr[1] + body = expr[2] + + # Extract parameter names + params = [] + for p in params_expr: + if isinstance(p, Symbol) or hasattr(p, 'name'): + params.append(p.name if hasattr(p, 'name') else str(p)) + else: + params.append(str(p)) + + return Lambda(params=params, body=body, closure=dict(env)) + + def _eval_if(self, expr: list, env: Dict[str, Any]) -> Any: + """Evaluate (if cond then else).""" + cond = self.eval(expr[1], env) + if cond: + return self.eval(expr[2], env) + elif len(expr) > 3: + return self.eval(expr[3], env) + return None + + def _eval_dict(self, expr: list, env: Dict[str, Any]) -> dict: + """Evaluate (dict :key val ...).""" + result = {} + i = 1 + while i < len(expr): + key = expr[i] + # Handle keyword syntax (:key) and Keyword objects + if hasattr(key, 'name'): + key = key.name + elif hasattr(key, '__class__') and key.__class__.__name__ == 'Keyword': + key = str(key).lstrip(':') + elif isinstance(key, str) and key.startswith(':'): + key = key[1:] + value = self.eval(expr[i + 1], env) + result[key] = value + i += 2 + return result + + def _call(self, fn: Any, args: List[Any], env: Dict[str, Any]) -> Any: + """Call a function with arguments.""" + if isinstance(fn, Lambda): + # Our own Lambda type + call_env = dict(fn.closure) + for param, arg in zip(fn.params, args): + call_env[param] = arg + return self.eval(fn.body, call_env) + elif hasattr(fn, 'params') and hasattr(fn, 'body'): + # Lambda from parser (artdag.sexp.parser.Lambda) + call_env = dict(env) + if hasattr(fn, 'closure') and fn.closure: + call_env.update(fn.closure) + # Get param names + params = [] + for p in fn.params: + if hasattr(p, 'name'): + params.append(p.name) + else: + params.append(str(p)) + for param, arg in zip(params, args): + call_env[param] = arg + return self.eval(fn.body, call_env) + elif callable(fn): + return fn(*args) + else: + raise TypeError(f"Not callable: {type(fn).__name__}") + + +def eval_slice_on_lambda(lambda_obj, acc: dict, i: int, start: float, end: float, + videos: list, interp: SexpInterpreter = None) -> dict: + """ + Evaluate a SLICE_ON lambda function. + + Args: + lambda_obj: The Lambda object from the compiled recipe + acc: Current accumulator state + i: Beat index + start: Slice start time + end: Slice end time + videos: List of video inputs + interp: Interpreter to use + + Returns: + Dict with 'layers', 'compose', 'acc' keys + """ + if interp is None: + interp = SexpInterpreter() + + # Set up global 'videos' for (len videos) to work + interp.globals['videos'] = videos + + # Build initial environment with lambda parameters + env = dict(lambda_obj.closure) if hasattr(lambda_obj, 'closure') and lambda_obj.closure else {} + env['videos'] = videos + + # Call the lambda + result = interp._call(lambda_obj, [acc, i, start, end], env) + + return result diff --git a/streaming/sources.py b/streaming/sources.py new file mode 100644 index 0000000..71e7e53 --- /dev/null +++ b/streaming/sources.py @@ -0,0 +1,281 @@ +""" +Video and image sources with looping support. +""" + +import numpy as np +import subprocess +import json +from pathlib import Path +from typing import Optional, Tuple +from abc import ABC, abstractmethod + + +class Source(ABC): + """Abstract base class for frame sources.""" + + @abstractmethod + def read_frame(self, t: float) -> np.ndarray: + """Read frame at time t (with looping if needed).""" + pass + + @property + @abstractmethod + def duration(self) -> float: + """Source duration in seconds.""" + pass + + @property + @abstractmethod + def size(self) -> Tuple[int, int]: + """Frame size as (width, height).""" + pass + + @property + @abstractmethod + def fps(self) -> float: + """Frames per second.""" + pass + + +class VideoSource(Source): + """ + Video file source with automatic looping. + + Reads frames on-demand, seeking as needed. When time exceeds + duration, wraps around (loops). + """ + + def __init__(self, path: str, target_fps: float = 30): + self.path = Path(path) + self.target_fps = target_fps + + # Initialize decode state first (before _probe which could fail) + self._process: Optional[subprocess.Popen] = None + self._current_start: Optional[float] = None + self._frame_buffer: Optional[np.ndarray] = None + self._buffer_time: Optional[float] = None + + self._duration = None + self._size = None + self._fps = None + + if not self.path.exists(): + raise FileNotFoundError(f"Video not found: {path}") + + self._probe() + + def _probe(self): + """Get video metadata.""" + cmd = [ + "ffprobe", "-v", "quiet", + "-print_format", "json", + "-show_format", "-show_streams", + str(self.path) + ] + result = subprocess.run(cmd, capture_output=True, text=True) + data = json.loads(result.stdout) + + # Get duration + self._duration = float(data["format"]["duration"]) + + # Get video stream info + for stream in data["streams"]: + if stream["codec_type"] == "video": + self._size = (int(stream["width"]), int(stream["height"])) + # Parse fps from r_frame_rate (e.g., "30/1" or "30000/1001") + fps_parts = stream.get("r_frame_rate", "30/1").split("/") + self._fps = float(fps_parts[0]) / float(fps_parts[1]) + break + + @property + def duration(self) -> float: + return self._duration + + @property + def size(self) -> Tuple[int, int]: + return self._size + + @property + def fps(self) -> float: + return self._fps + + def _start_decode(self, start_time: float): + """Start ffmpeg decode process from given time.""" + if self._process: + try: + self._process.stdout.close() + except: + pass + self._process.terminate() + try: + self._process.wait(timeout=1) + except: + self._process.kill() + self._process.wait() + + w, h = self._size + cmd = [ + "ffmpeg", "-v", "quiet", + "-ss", str(start_time), + "-i", str(self.path), + "-f", "rawvideo", + "-pix_fmt", "rgb24", + "-r", str(self.target_fps), + "-" + ] + self._process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + bufsize=w * h * 3 * 4, # Buffer a few frames + ) + self._current_start = start_time + self._buffer_time = start_time + + def read_frame(self, t: float) -> np.ndarray: + """ + Read frame at time t. + + If t exceeds duration, wraps around (loops). + Seeks if needed, otherwise reads sequentially. + """ + # Wrap time for looping + t_wrapped = t % self._duration + + # Check if we need to seek (loop point or large time jump) + need_seek = ( + self._process is None or + self._buffer_time is None or + abs(t_wrapped - self._buffer_time) > 1.0 / self.target_fps * 2 + ) + + if need_seek: + self._start_decode(t_wrapped) + + # Read frame + w, h = self._size + frame_size = w * h * 3 + + # Try to read with retries for seek settling + for attempt in range(3): + raw = self._process.stdout.read(frame_size) + if len(raw) == frame_size: + break + # End of stream or seek not ready - restart from beginning + self._start_decode(0) + + if len(raw) < frame_size: + # Still no data - return last frame or black + if self._frame_buffer is not None: + return self._frame_buffer.copy() + return np.zeros((h, w, 3), dtype=np.uint8) + + frame = np.frombuffer(raw, dtype=np.uint8).reshape((h, w, 3)) + self._frame_buffer = frame # Cache for fallback + self._buffer_time = t_wrapped + 1.0 / self.target_fps + + return frame + + def close(self): + """Clean up resources.""" + if self._process: + self._process.terminate() + self._process.wait() + self._process = None + + def __del__(self): + self.close() + + def __repr__(self): + return f"VideoSource({self.path.name}, {self._size[0]}x{self._size[1]}, {self._duration:.1f}s)" + + +class ImageSource(Source): + """ + Static image source (returns same frame for any time). + + Useful for backgrounds, overlays, etc. + """ + + def __init__(self, path: str): + self.path = Path(path) + if not self.path.exists(): + raise FileNotFoundError(f"Image not found: {path}") + + # Load image + import cv2 + self._frame = cv2.imread(str(self.path)) + self._frame = cv2.cvtColor(self._frame, cv2.COLOR_BGR2RGB) + self._size = (self._frame.shape[1], self._frame.shape[0]) + + @property + def duration(self) -> float: + return float('inf') # Images last forever + + @property + def size(self) -> Tuple[int, int]: + return self._size + + @property + def fps(self) -> float: + return 30.0 # Arbitrary + + def read_frame(self, t: float) -> np.ndarray: + return self._frame.copy() + + def __repr__(self): + return f"ImageSource({self.path.name}, {self._size[0]}x{self._size[1]})" + + +class LiveSource(Source): + """ + Live video capture source (webcam, capture card, etc.). + + Time parameter is ignored - always returns latest frame. + """ + + def __init__(self, device: int = 0, size: Tuple[int, int] = (1280, 720), fps: float = 30): + import cv2 + self._cap = cv2.VideoCapture(device) + self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, size[0]) + self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1]) + self._cap.set(cv2.CAP_PROP_FPS, fps) + + # Get actual settings + self._size = ( + int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + ) + self._fps = self._cap.get(cv2.CAP_PROP_FPS) + + if not self._cap.isOpened(): + raise RuntimeError(f"Could not open video device {device}") + + @property + def duration(self) -> float: + return float('inf') # Live - no duration + + @property + def size(self) -> Tuple[int, int]: + return self._size + + @property + def fps(self) -> float: + return self._fps + + def read_frame(self, t: float) -> np.ndarray: + """Read latest frame (t is ignored for live sources).""" + import cv2 + ret, frame = self._cap.read() + if not ret: + return np.zeros((self._size[1], self._size[0], 3), dtype=np.uint8) + return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + def close(self): + self._cap.release() + + def __del__(self): + self.close() + + def __repr__(self): + return f"LiveSource({self._size[0]}x{self._size[1]}, {self._fps}fps)" diff --git a/streaming/stream_sexp.py b/streaming/stream_sexp.py new file mode 100644 index 0000000..b36dabf --- /dev/null +++ b/streaming/stream_sexp.py @@ -0,0 +1,1081 @@ +""" +Generic Streaming S-expression Interpreter. + +Executes streaming sexp recipes frame-by-frame. +The sexp defines the pipeline logic - interpreter just provides primitives. + +Primitives: + (read source-name) - read frame from source + (rotate frame :angle N) - rotate frame + (zoom frame :amount N) - zoom frame + (invert frame :amount N) - invert colors + (hue-shift frame :degrees N) - shift hue + (blend frame1 frame2 :opacity N) - blend two frames + (blend-weighted [frames...] [weights...]) - weighted blend + (ripple frame :amplitude N :cx N :cy N ...) - ripple effect + + (bind scan-name :field) - get scan state field + (map value [lo hi]) - map 0-1 value to range + energy - current energy (0-1) + beat - 1 if beat, 0 otherwise + t - current time + beat-count - total beats so far + +Example sexp: + (stream "test" + :fps 30 + (source vid "video.mp4") + (audio aud "music.mp3") + + (scan spin beat + :init {:angle 0 :dir 1} + :step (dict :angle (+ angle (* dir 10)) :dir dir)) + + (frame + (-> (read vid) + (rotate :angle (bind spin :angle)) + (zoom :amount (map energy [1 1.5]))))) +""" + +import sys +import time +import json +import hashlib +import numpy as np +import subprocess +from pathlib import Path +from dataclasses import dataclass, field +from typing import Dict, List, Any, Optional, Tuple, Union + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) +from artdag.sexp.parser import parse, parse_all, Symbol, Keyword + + +@dataclass +class StreamContext: + """Runtime context for streaming.""" + t: float = 0.0 + frame_num: int = 0 + fps: float = 30.0 + energy: float = 0.0 + is_beat: bool = False + beat_count: int = 0 + output_size: Tuple[int, int] = (720, 720) + + +class StreamCache: + """Cache for streaming data.""" + + def __init__(self, cache_dir: Path, recipe_hash: str): + self.cache_dir = cache_dir / recipe_hash + self.cache_dir.mkdir(parents=True, exist_ok=True) + self.analysis_buffer: Dict[str, List] = {} + self.scan_states: Dict[str, List] = {} + self.keyframe_interval = 5.0 + + def record_analysis(self, name: str, t: float, value: float): + if name not in self.analysis_buffer: + self.analysis_buffer[name] = [] + t = float(t) if hasattr(t, 'item') else t + value = float(value) if hasattr(value, 'item') else value + self.analysis_buffer[name].append((t, value)) + + def record_scan_state(self, name: str, t: float, state: dict): + if name not in self.scan_states: + self.scan_states[name] = [] + states = self.scan_states[name] + if not states or t - states[-1][0] >= self.keyframe_interval: + t = float(t) if hasattr(t, 'item') else t + clean = {k: (float(v) if hasattr(v, 'item') else v) for k, v in state.items()} + self.scan_states[name].append((t, clean)) + + def flush(self): + for name, data in self.analysis_buffer.items(): + path = self.cache_dir / f"analysis_{name}.json" + existing = json.loads(path.read_text()) if path.exists() else [] + existing.extend(data) + path.write_text(json.dumps(existing)) + self.analysis_buffer.clear() + + for name, states in self.scan_states.items(): + path = self.cache_dir / f"scan_{name}.json" + existing = json.loads(path.read_text()) if path.exists() else [] + existing.extend(states) + path.write_text(json.dumps(existing)) + self.scan_states.clear() + + +class VideoSource: + """Video source - reads frames sequentially.""" + + def __init__(self, path: str, fps: float = 30): + self.path = Path(path) + if not self.path.exists(): + raise FileNotFoundError(f"Video not found: {path}") + + # Get info + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + + for s in info.get("streams", []): + if s.get("codec_type") == "video": + self.width = s.get("width", 720) + self.height = s.get("height", 720) + break + else: + self.width, self.height = 720, 720 + + self.duration = float(info.get("format", {}).get("duration", 60)) + self.size = (self.width, self.height) + + # Start decoder + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "rawvideo", "-pix_fmt", "rgb24", "-r", str(fps), "-"] + self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + self._frame_size = self.width * self.height * 3 + self._current_frame = None + + def read(self) -> Optional[np.ndarray]: + """Read next frame.""" + data = self._proc.stdout.read(self._frame_size) + if len(data) < self._frame_size: + return self._current_frame # Return last frame if stream ends + self._current_frame = np.frombuffer(data, dtype=np.uint8).reshape( + self.height, self.width, 3).copy() + return self._current_frame + + def skip(self): + """Read and discard frame (keep pipe in sync).""" + self._proc.stdout.read(self._frame_size) + + def close(self): + if self._proc: + self._proc.terminate() + self._proc.wait() + + +class AudioAnalyzer: + """Real-time audio analysis.""" + + def __init__(self, path: str, sample_rate: int = 22050): + self.path = Path(path) + + # Load audio + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"] + self._audio = np.frombuffer( + subprocess.run(cmd, capture_output=True).stdout, dtype=np.float32) + self.sample_rate = sample_rate + + # Get duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + self.duration = float(info.get("format", {}).get("duration", 60)) + + self._flux_history = [] + self._last_beat_time = -1 + + def get_energy(self, t: float) -> float: + idx = int(t * self.sample_rate) + start = max(0, idx - 512) + end = min(len(self._audio), idx + 512) + if start >= end: + return 0.0 + return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0) + + def get_beat(self, t: float) -> bool: + idx = int(t * self.sample_rate) + size = 2048 + + start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2) + if end - start < size//2: + return False + curr = self._audio[start:end] + + pstart, pend = max(0, start - 512), max(0, end - 512) + if pend <= pstart: + return False + prev = self._audio[pstart:pend] + + curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr)))) + prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev)))) + + n = min(len(curr_spec), len(prev_spec)) + flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1) + + self._flux_history.append((t, flux)) + while self._flux_history and self._flux_history[0][0] < t - 1.5: + self._flux_history.pop(0) + + if len(self._flux_history) < 3: + return False + + vals = [f for _, f in self._flux_history] + threshold = np.mean(vals) + np.std(vals) * 0.3 + 0.001 + + is_beat = flux > threshold and t - self._last_beat_time > 0.1 + if is_beat: + self._last_beat_time = t + return is_beat + + +class StreamInterpreter: + """ + Generic streaming sexp interpreter. + + Evaluates the frame pipeline expression each frame. + """ + + def __init__(self, sexp_path: str, cache_dir: str = None): + self.sexp_path = Path(sexp_path) + self.sexp_dir = self.sexp_path.parent + + text = self.sexp_path.read_text() + self.ast = parse(text) + + self.config = self._parse_config() + + recipe_hash = hashlib.sha256(text.encode()).hexdigest()[:16] + cache_path = Path(cache_dir) if cache_dir else self.sexp_dir / ".stream_cache" + self.cache = StreamCache(cache_path, recipe_hash) + + self.ctx = StreamContext(fps=self.config.get('fps', 30)) + self.sources: Dict[str, VideoSource] = {} + self.frames: Dict[str, np.ndarray] = {} # Current frame per source + self._sources_read: set = set() # Track which sources read this frame + self.audios: Dict[str, AudioAnalyzer] = {} # Multiple named audio sources + self.audio_paths: Dict[str, str] = {} + self.audio_state: Dict[str, dict] = {} # Per-audio: {energy, is_beat, beat_count, last_beat} + self.scans: Dict[str, dict] = {} + + # Registries for external definitions + self.primitives: Dict[str, Any] = {} # name -> Python function + self.effects: Dict[str, dict] = {} # name -> {params, body} + self.macros: Dict[str, dict] = {} # name -> {params, body} + self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs" + + self.frame_pipeline = None # The (frame ...) expression + + import random + self.rng = random.Random(self.config.get('seed', 42)) + + def _parse_config(self) -> dict: + """Parse config from (stream name :key val ...).""" + config = {'fps': 30, 'seed': 42} + if not self.ast or not isinstance(self.ast[0], Symbol): + return config + if self.ast[0].name != 'stream': + return config + + i = 2 + while i < len(self.ast): + if isinstance(self.ast[i], Keyword): + config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None + i += 2 + elif isinstance(self.ast[i], list): + break + else: + i += 1 + return config + + def _load_primitives(self, lib_name: str): + """Load primitives from a Python library file.""" + import importlib.util + + # Try multiple paths + lib_paths = [ + self.primitive_lib_dir / f"{lib_name}.py", + self.sexp_dir / "primitive_libs" / f"{lib_name}.py", + self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py", + ] + + lib_path = None + for p in lib_paths: + if p.exists(): + lib_path = p + break + + if not lib_path: + print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr) + return + + spec = importlib.util.spec_from_file_location(lib_name, lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Extract all prim_* functions + count = 0 + for name in dir(module): + if name.startswith('prim_'): + func = getattr(module, name) + prim_name = name[5:] # Remove 'prim_' prefix + self.primitives[prim_name] = func + # Also register with dashes instead of underscores + dash_name = prim_name.replace('_', '-') + self.primitives[dash_name] = func + # Also register with -img suffix (sexp convention) + self.primitives[dash_name + '-img'] = func + count += 1 + + # Also check for PRIMITIVES dict (some modules use this for additional exports) + if hasattr(module, 'PRIMITIVES'): + prims = getattr(module, 'PRIMITIVES') + if isinstance(prims, dict): + for name, func in prims.items(): + self.primitives[name] = func + # Also register underscore version + underscore_name = name.replace('-', '_') + self.primitives[underscore_name] = func + count += 1 + + print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr) + + def _load_effect(self, effect_path: Path): + """Load and register an effect from a .sexp file.""" + if not effect_path.exists(): + print(f"Warning: effect file not found: {effect_path}", file=sys.stderr) + return + + text = effect_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'define-effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = {} + body = None + + i = 2 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'params' and i + 1 < len(form): + # Parse params list + params_list = form[i + 1] + for p in params_list: + if isinstance(p, list) and p: + pname = p[0].name if isinstance(p[0], Symbol) else str(p[0]) + pdef = {'default': 0} + j = 1 + while j < len(p): + if isinstance(p[j], Keyword): + pdef[p[j].name] = p[j + 1] if j + 1 < len(p) else None + j += 2 + else: + j += 1 + params[pname] = pdef + i += 2 + else: + i += 2 + else: + # Body expression + body = form[i] + i += 1 + + self.effects[name] = {'params': params, 'body': body, 'path': str(effect_path)} + print(f"Effect: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [] + body = None + + if len(form) > 2 and isinstance(form[2], list): + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + if len(form) > 3: + body = form[3] + + self.macros[name] = {'params': params, 'body': body} + print(f"Macro: {name}", file=sys.stderr) + + def _init(self): + """Initialize sources, scans, and pipeline from sexp.""" + for form in self.ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + # === External loading === + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'effect': + # (effect name :path "...") + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + # (include :path "...") + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) # Reuse effect loader for includes + i += 2 + else: + i += 1 + + # === Sources === + + elif cmd == 'source': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + path = str(form[2]).strip('"') + full = (self.sexp_dir / path).resolve() + if full.exists(): + self.sources[name] = VideoSource(str(full), self.ctx.fps) + print(f"Source: {name} -> {full}", file=sys.stderr) + else: + print(f"Warning: {full} not found", file=sys.stderr) + + elif cmd == 'audio': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + path = str(form[2]).strip('"') + full = (self.sexp_dir / path).resolve() + if full.exists(): + self.audios[name] = AudioAnalyzer(str(full)) + self.audio_paths[name] = str(full) + self.audio_state[name] = {'energy': 0.0, 'is_beat': False, 'beat_count': 0, 'last_beat': False} + print(f"Audio: {name} -> {full}", file=sys.stderr) + + elif cmd == 'scan': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + # Trigger can be: + # (beat audio-name) - trigger on beat from specific audio + # beat - legacy: trigger on beat from first audio + trigger_expr = form[2] + if isinstance(trigger_expr, list) and len(trigger_expr) >= 2: + # (beat audio-name) + trigger_type = trigger_expr[0].name if isinstance(trigger_expr[0], Symbol) else str(trigger_expr[0]) + trigger_audio = trigger_expr[1].name if isinstance(trigger_expr[1], Symbol) else str(trigger_expr[1]) + trigger = (trigger_type, trigger_audio) + else: + # Legacy bare symbol + trigger = trigger_expr.name if isinstance(trigger_expr, Symbol) else str(trigger_expr) + + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], {}) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger, + } + trigger_str = f"{trigger[0]} {trigger[1]}" if isinstance(trigger, tuple) else trigger + print(f"Scan: {name} (on {trigger_str})", file=sys.stderr) + + elif cmd == 'frame': + # (frame expr) - the pipeline to evaluate each frame + self.frame_pipeline = form[1] if len(form) > 1 else None + + # Set output size from first source + if self.sources: + first = next(iter(self.sources.values())) + self.ctx.output_size = first.size + + def _eval(self, expr, env: dict) -> Any: + """Evaluate an expression.""" + import cv2 + + # Primitives + if isinstance(expr, (int, float)): + return expr + if isinstance(expr, str): + return expr + if isinstance(expr, Symbol): + name = expr.name + # Built-in values + if name == 't' or name == '_time': + return self.ctx.t + if name == 'pi': + import math + return math.pi + if name == 'true': + return True + if name == 'false': + return False + if name == 'nil': + return None + # Environment lookup + if name in env: + return env[name] + # Scan state lookup + if name in self.scans: + return self.scans[name]['state'] + return 0 + + if isinstance(expr, Keyword): + return expr.name + + if not isinstance(expr, list) or not expr: + return expr + + # Dict literal {:key val ...} + if isinstance(expr[0], Keyword): + result = {} + i = 0 + while i < len(expr): + if isinstance(expr[i], Keyword): + result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None + i += 2 + else: + i += 1 + return result + + head = expr[0] + if not isinstance(head, Symbol): + return [self._eval(e, env) for e in expr] + + op = head.name + args = expr[1:] + + # Check if op is a closure in environment + if op in env: + val = env[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + # Invoke closure + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + # Threading macro + if op == '->': + result = self._eval(args[0], env) + for form in args[1:]: + if isinstance(form, list) and form: + # Insert result as first arg + new_form = [form[0], result] + form[1:] + result = self._eval(new_form, env) + else: + result = self._eval([form, result], env) + return result + + # === Audio analysis (explicit) === + + if op == 'energy': + # (energy audio-name) - get current energy from named audio + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return self.audio_state[audio_name]['energy'] + return 0.0 + + if op == 'beat': + # (beat audio-name) - 1 if beat this frame, 0 otherwise + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return 1.0 if self.audio_state[audio_name]['is_beat'] else 0.0 + return 0.0 + + if op == 'beat-count': + # (beat-count audio-name) - total beats from named audio + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return self.audio_state[audio_name]['beat_count'] + return 0 + + # === Frame operations === + + if op == 'read': + # (read source-name) - get current frame from source (lazy read) + name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if name not in self.frames: + if name in self.sources: + self.frames[name] = self.sources[name].read() + self._sources_read.add(name) + return self.frames.get(name) + + # === Binding and mapping === + + if op == 'bind': + # (bind scan-name :field) or (bind scan-name) + scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + field = None + if len(args) > 1 and isinstance(args[1], Keyword): + field = args[1].name + + if scan_name in self.scans: + state = self.scans[scan_name]['state'] + if field: + return state.get(field, 0) + return state + return 0 + + if op == 'map': + # (map value [lo hi]) + val = self._eval(args[0], env) + range_list = self._eval(args[1], env) if len(args) > 1 else [0, 1] + if isinstance(range_list, list) and len(range_list) >= 2: + lo, hi = range_list[0], range_list[1] + return lo + val * (hi - lo) + return val + + # === Arithmetic === + + if op == '+': + return sum(self._eval(a, env) for a in args) + if op == '-': + vals = [self._eval(a, env) for a in args] + return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0] + if op == '*': + result = 1 + for a in args: + result *= self._eval(a, env) + return result + if op == '/': + vals = [self._eval(a, env) for a in args] + return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + if op == 'mod': + vals = [self._eval(a, env) for a in args] + return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + + if op == 'map-range': + # (map-range val from-lo from-hi to-lo to-hi) + val = self._eval(args[0], env) + from_lo = self._eval(args[1], env) + from_hi = self._eval(args[2], env) + to_lo = self._eval(args[3], env) + to_hi = self._eval(args[4], env) + # Normalize val to 0-1 in source range, then scale to target range + if from_hi == from_lo: + return to_lo + t = (val - from_lo) / (from_hi - from_lo) + return to_lo + t * (to_hi - to_lo) + + # === Comparison === + + if op == '<': + return self._eval(args[0], env) < self._eval(args[1], env) + if op == '>': + return self._eval(args[0], env) > self._eval(args[1], env) + if op == '=': + return self._eval(args[0], env) == self._eval(args[1], env) + if op == '<=': + return self._eval(args[0], env) <= self._eval(args[1], env) + if op == '>=': + return self._eval(args[0], env) >= self._eval(args[1], env) + + if op == 'and': + for arg in args: + if not self._eval(arg, env): + return False + return True + + if op == 'or': + # Lisp-style or: returns first truthy value, or last value if none truthy + result = False + for arg in args: + result = self._eval(arg, env) + if result: + return result + return result + + if op == 'not': + return not self._eval(args[0], env) + + # === Logic === + + if op == 'if': + cond = self._eval(args[0], env) + if cond: + return self._eval(args[1], env) + return self._eval(args[2], env) if len(args) > 2 else None + + if op == 'cond': + # (cond pred1 expr1 pred2 expr2 ... true else-expr) + i = 0 + while i < len(args) - 1: + pred = self._eval(args[i], env) + if pred: + return self._eval(args[i + 1], env) + i += 2 + return None + + if op == 'lambda': + # (lambda (params...) body) - create a closure + params = args[0] + body = args[1] + param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params] + # Return a closure dict that captures the current env + return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)} + + if op == 'let' or op == 'let*': + # Support both formats: + # (let [name val name val ...] body) - flat vector + # (let ((name val) (name val) ...) body) - nested list + # Note: our let already evaluates sequentially like let* + bindings = args[0] + body = args[1] + new_env = dict(env) + + if bindings and isinstance(bindings[0], list): + # Nested format: ((name val) (name val) ...) + for binding in bindings: + if isinstance(binding, list) and len(binding) >= 2: + name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0]) + val = self._eval(binding[1], new_env) + new_env[name] = val + else: + # Flat format: [name val name val ...] + i = 0 + while i < len(bindings): + name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i]) + val = self._eval(bindings[i + 1], new_env) + new_env[name] = val + i += 2 + return self._eval(body, new_env) + + # === Random === + + if op == 'rand': + return self.rng.random() + if op == 'rand-int': + lo = int(self._eval(args[0], env)) + hi = int(self._eval(args[1], env)) + return self.rng.randint(lo, hi) + if op == 'rand-range': + lo = self._eval(args[0], env) + hi = self._eval(args[1], env) + return lo + self.rng.random() * (hi - lo) + + # === Dict === + + if op == 'dict': + result = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + result[args[i].name] = self._eval(args[i + 1], env) if i + 1 < len(args) else None + i += 2 + else: + i += 1 + return result + + if op == 'get': + d = self._eval(args[0], env) + key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env) + if isinstance(d, dict): + return d.get(key, 0) + return 0 + + # === List === + + if op == 'list': + return [self._eval(a, env) for a in args] + + if op == 'nth': + lst = self._eval(args[0], env) + idx = int(self._eval(args[1], env)) + if isinstance(lst, list) and 0 <= idx < len(lst): + return lst[idx] + return None + + if op == 'len': + lst = self._eval(args[0], env) + return len(lst) if isinstance(lst, (list, dict, str)) else 0 + + # === External effects === + if op in self.effects: + effect = self.effects[op] + effect_env = dict(env) + effect_env['t'] = self.ctx.t + + # Set defaults for all params + param_names = list(effect['params'].keys()) + for pname, pdef in effect['params'].items(): + effect_env[pname] = pdef.get('default', 0) + + # Parse args: first is frame, then positional params, then kwargs + positional_idx = 0 + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + # Keyword arg + pname = args[i].name + if pname in effect['params'] and i + 1 < len(args): + effect_env[pname] = self._eval(args[i + 1], env) + i += 2 + else: + # Positional arg + val = self._eval(args[i], env) + if positional_idx == 0: + effect_env['frame'] = val + elif positional_idx - 1 < len(param_names): + effect_env[param_names[positional_idx - 1]] = val + positional_idx += 1 + i += 1 + + return self._eval(effect['body'], effect_env) + + # === External primitives === + if op in self.primitives: + prim_func = self.primitives[op] + # Evaluate all args + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + # Call primitive + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # === Macros === + if op in self.macros: + macro = self.macros[op] + # Bind macro params to args (unevaluated) + macro_env = dict(env) + for i, pname in enumerate(macro['params']): + macro_env[pname] = args[i] if i < len(args) else None + # Expand and evaluate + return self._eval(macro['body'], macro_env) + + # === Primitive-style call (name-with-dashes -> prim_name_with_underscores) === + prim_name = op.replace('-', '_') + if prim_name in self.primitives: + prim_func = self.primitives[prim_name] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name.replace('-', '_') + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # Unknown - return as-is + return expr + + def _step_scans(self): + """Step scans on beat from specific audio.""" + for name, scan in self.scans.items(): + trigger = scan['trigger'] + + # Check if this scan should step + should_step = False + audio_name = None + + if isinstance(trigger, tuple) and trigger[0] == 'beat': + # Explicit: (beat audio-name) + audio_name = trigger[1] + if audio_name in self.audio_state: + should_step = self.audio_state[audio_name]['is_beat'] + elif trigger == 'beat': + # Legacy: use first audio + if self.audio_state: + audio_name = next(iter(self.audio_state)) + should_step = self.audio_state[audio_name]['is_beat'] + + if should_step and audio_name: + state = self.audio_state[audio_name] + env = dict(scan['state']) + env['beat_count'] = state['beat_count'] + env['t'] = self.ctx.t + env['energy'] = state['energy'] + + if scan['step']: + new_state = self._eval(scan['step'], env) + if isinstance(new_state, dict): + scan['state'] = new_state + elif new_state is not None: + scan['state'] = {'acc': new_state} + + self.cache.record_scan_state(name, self.ctx.t, scan['state']) + + def run(self, duration: float = None, output: str = "pipe"): + """Run the streaming pipeline.""" + from .output import PipeOutput, DisplayOutput, FileOutput + + self._init() + + if not self.sources: + print("Error: no sources", file=sys.stderr) + return + + if not self.frame_pipeline: + print("Error: no (frame ...) pipeline defined", file=sys.stderr) + return + + w, h = self.ctx.output_size + + # Duration from first audio or default + if duration is None: + if self.audios: + first_audio = next(iter(self.audios.values())) + duration = first_audio.duration + else: + duration = 60.0 + + n_frames = int(duration * self.ctx.fps) + frame_time = 1.0 / self.ctx.fps + + print(f"Streaming {n_frames} frames @ {self.ctx.fps}fps", file=sys.stderr) + + # Use first audio for playback sync + first_audio_path = next(iter(self.audio_paths.values())) if self.audio_paths else None + + # Output + if output == "pipe": + out = PipeOutput(size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + elif output == "preview": + out = DisplayOutput(size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + else: + out = FileOutput(output, size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + + try: + for frame_num in range(n_frames): + if not out.is_open: + print(f"\nOutput closed at {frame_num}", file=sys.stderr) + break + + self.ctx.t = frame_num * frame_time + self.ctx.frame_num = frame_num + + # Update all audio states + for audio_name, analyzer in self.audios.items(): + state = self.audio_state[audio_name] + energy = analyzer.get_energy(self.ctx.t) + is_beat_raw = analyzer.get_beat(self.ctx.t) + is_beat = is_beat_raw and not state['last_beat'] + state['last_beat'] = is_beat_raw + + state['energy'] = energy + state['is_beat'] = is_beat + if is_beat: + state['beat_count'] += 1 + + self.cache.record_analysis(f'{audio_name}_energy', self.ctx.t, energy) + self.cache.record_analysis(f'{audio_name}_beat', self.ctx.t, 1.0 if is_beat else 0.0) + + # Step scans + self._step_scans() + + # Clear frames - will be read lazily + self.frames.clear() + self._sources_read = set() + + # Evaluate pipeline (reads happen on-demand) + result = self._eval(self.frame_pipeline, {}) + + # Skip unread sources to keep pipes in sync + for name, src in self.sources.items(): + if name not in self._sources_read: + src.skip() + + # Ensure output size + if result is not None: + import cv2 + if result.shape[:2] != (h, w): + result = cv2.resize(result, (w, h)) + out.write(result, self.ctx.t) + + # Progress + if frame_num % 30 == 0: + pct = 100 * frame_num / n_frames + # Show beats from first audio + total_beats = 0 + if self.audio_state: + first_state = next(iter(self.audio_state.values())) + total_beats = first_state['beat_count'] + print(f"\r{pct:5.1f}% | beats:{total_beats}", + end="", file=sys.stderr) + sys.stderr.flush() + + if frame_num % 300 == 0: + self.cache.flush() + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + except Exception as e: + print(f"\nError: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + finally: + out.close() + for src in self.sources.values(): + src.close() + self.cache.flush() + + print("\nDone", file=sys.stderr) + + +def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None): + """Run a streaming sexp.""" + interp = StreamInterpreter(sexp_path) + if fps: + interp.ctx.fps = fps + interp.run(duration=duration, output=output) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run streaming sexp") + parser.add_argument("sexp", help="Path to .sexp file") + parser.add_argument("-d", "--duration", type=float, default=None) + parser.add_argument("-o", "--output", default="pipe") + parser.add_argument("--fps", type=float, default=None, help="Override fps (default: from sexp)") + args = parser.parse_args() + + run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps) diff --git a/streaming/stream_sexp_generic.py b/streaming/stream_sexp_generic.py new file mode 100644 index 0000000..4f88614 --- /dev/null +++ b/streaming/stream_sexp_generic.py @@ -0,0 +1,863 @@ +""" +Fully Generic Streaming S-expression Interpreter. + +The interpreter knows NOTHING about video, audio, or any domain. +All domain logic comes from primitives loaded via (require-primitives ...). + +Built-in forms: + - Control: if, cond, let, let*, lambda, -> + - Arithmetic: +, -, *, /, mod, map-range + - Comparison: <, >, =, <=, >=, and, or, not + - Data: dict, get, list, nth, len, quote + - Random: rand, rand-int, rand-range + - Scan: bind (access scan state) + +Everything else comes from primitives or effects. + +Context (ctx) is passed explicitly to frame evaluation: + - ctx.t: current time + - ctx.frame-num: current frame number + - ctx.fps: frames per second +""" + +import sys +import time +import json +import hashlib +import math +import numpy as np +from pathlib import Path +from dataclasses import dataclass +from typing import Dict, List, Any, Optional, Tuple + +# Try pip-installed artdag first, fall back to local path +try: + from artdag.sexp.parser import parse, parse_all, Symbol, Keyword +except ImportError: + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + from artdag.sexp.parser import parse, parse_all, Symbol, Keyword + + +@dataclass +class Context: + """Runtime context passed to frame evaluation.""" + t: float = 0.0 + frame_num: int = 0 + fps: float = 30.0 + + +class StreamInterpreter: + """ + Fully generic streaming sexp interpreter. + + No domain-specific knowledge - just evaluates expressions + and calls primitives. + """ + + def __init__(self, sexp_path: str): + self.sexp_path = Path(sexp_path) + self.sexp_dir = self.sexp_path.parent + + text = self.sexp_path.read_text() + self.ast = parse(text) + + self.config = self._parse_config() + + # Global environment for def bindings + self.globals: Dict[str, Any] = {} + + # Scans + self.scans: Dict[str, dict] = {} + + # Audio playback path (for syncing output) + self.audio_playback: Optional[str] = None + + # Registries for external definitions + self.primitives: Dict[str, Any] = {} + self.effects: Dict[str, dict] = {} + self.macros: Dict[str, dict] = {} + self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs" + + self.frame_pipeline = None + + # External config files (set before run()) + self.sources_config: Optional[Path] = None + self.audio_config: Optional[Path] = None + + import random + self.rng = random.Random(self.config.get('seed', 42)) + + def _load_config_file(self, config_path): + """Load a config file and process its definitions.""" + config_path = Path(config_path) # Accept str or Path + if not config_path.exists(): + print(f"Warning: config file not found: {config_path}", file=sys.stderr) + return + + text = config_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'def': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + value = self._eval(form[2], self.globals) + self.globals[name] = value + print(f"Config: {name}", file=sys.stderr) + + elif cmd == 'audio-playback': + # Path relative to working directory (consistent with other paths) + path = str(form[1]).strip('"') + self.audio_playback = str(Path(path).resolve()) + print(f"Audio playback: {self.audio_playback}", file=sys.stderr) + + def _parse_config(self) -> dict: + """Parse config from (stream name :key val ...).""" + config = {'fps': 30, 'seed': 42, 'width': 720, 'height': 720} + if not self.ast or not isinstance(self.ast[0], Symbol): + return config + if self.ast[0].name != 'stream': + return config + + i = 2 + while i < len(self.ast): + if isinstance(self.ast[i], Keyword): + config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None + i += 2 + elif isinstance(self.ast[i], list): + break + else: + i += 1 + return config + + def _load_primitives(self, lib_name: str): + """Load primitives from a Python library file.""" + import importlib.util + + lib_paths = [ + self.primitive_lib_dir / f"{lib_name}.py", + self.sexp_dir / "primitive_libs" / f"{lib_name}.py", + self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py", + ] + + lib_path = None + for p in lib_paths: + if p.exists(): + lib_path = p + break + + if not lib_path: + print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr) + return + + spec = importlib.util.spec_from_file_location(lib_name, lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + count = 0 + for name in dir(module): + if name.startswith('prim_'): + func = getattr(module, name) + prim_name = name[5:] + dash_name = prim_name.replace('_', '-') + # Register ONLY with namespace (geometry:ripple-displace) + self.primitives[f"{lib_name}:{dash_name}"] = func + count += 1 + + if hasattr(module, 'PRIMITIVES'): + prims = getattr(module, 'PRIMITIVES') + if isinstance(prims, dict): + for name, func in prims.items(): + # Register ONLY with namespace + dash_name = name.replace('_', '-') + self.primitives[f"{lib_name}:{dash_name}"] = func + count += 1 + + print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr) + + def _load_effect(self, effect_path: Path): + """Load and register an effect from a .sexp file.""" + if not effect_path.exists(): + print(f"Warning: effect file not found: {effect_path}", file=sys.stderr) + return + + text = effect_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'define-effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = {} + body = None + i = 2 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'params' and i + 1 < len(form): + for pdef in form[i + 1]: + if isinstance(pdef, list) and pdef: + pname = pdef[0].name if isinstance(pdef[0], Symbol) else str(pdef[0]) + pinfo = {'default': 0} + j = 1 + while j < len(pdef): + if isinstance(pdef[j], Keyword) and j + 1 < len(pdef): + pinfo[pdef[j].name] = pdef[j + 1] + j += 2 + else: + j += 1 + params[pname] = pinfo + i += 2 + else: + body = form[i] + i += 1 + + self.effects[name] = {'params': params, 'body': body} + print(f"Effect: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + body = form[3] + self.macros[name] = {'params': params, 'body': body} + + elif cmd == 'effect': + # Handle (effect name :path "...") in included files - recursive + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + # Resolve relative to the file being loaded + full = (effect_path.parent / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + # Handle (include :path "...") in included files - recursive + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (effect_path.parent / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'scan': + # Handle scans from included files + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + trigger_expr = form[2] + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], self.globals) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger_expr, + } + print(f"Scan: {name}", file=sys.stderr) + + def _init(self): + """Initialize from sexp - load primitives, effects, defs, scans.""" + # Load external config files first (they can override recipe definitions) + if self.sources_config: + self._load_config_file(self.sources_config) + if self.audio_config: + self._load_config_file(self.audio_config) + + for form in self.ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'audio-playback': + # (audio-playback "path") - set audio file for playback sync + # Skip if already set by config file + if self.audio_playback is None: + path = str(form[1]).strip('"') + self.audio_playback = str((self.sexp_dir / path).resolve()) + print(f"Audio playback: {self.audio_playback}", file=sys.stderr) + + elif cmd == 'def': + # (def name expr) - evaluate and store in globals + # Skip if already defined by config file + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + if name in self.globals: + print(f"Def: {name} (from config, skipped)", file=sys.stderr) + continue + value = self._eval(form[2], self.globals) + self.globals[name] = value + print(f"Def: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + body = form[3] + self.macros[name] = {'params': params, 'body': body} + + elif cmd == 'scan': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + trigger_expr = form[2] + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], self.globals) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger_expr, + } + print(f"Scan: {name}", file=sys.stderr) + + elif cmd == 'frame': + self.frame_pipeline = form[1] if len(form) > 1 else None + + def _eval(self, expr, env: dict) -> Any: + """Evaluate an expression.""" + + # Primitives + if isinstance(expr, (int, float)): + return expr + if isinstance(expr, str): + return expr + if isinstance(expr, bool): + return expr + + if isinstance(expr, Symbol): + name = expr.name + # Built-in constants + if name == 'pi': + return math.pi + if name == 'true': + return True + if name == 'false': + return False + if name == 'nil': + return None + # Environment lookup + if name in env: + return env[name] + # Global lookup + if name in self.globals: + return self.globals[name] + # Scan state lookup + if name in self.scans: + return self.scans[name]['state'] + raise NameError(f"Undefined variable: {name}") + + if isinstance(expr, Keyword): + return expr.name + + if not isinstance(expr, list) or not expr: + return expr + + # Dict literal {:key val ...} + if isinstance(expr[0], Keyword): + result = {} + i = 0 + while i < len(expr): + if isinstance(expr[i], Keyword): + result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None + i += 2 + else: + i += 1 + return result + + head = expr[0] + if not isinstance(head, Symbol): + return [self._eval(e, env) for e in expr] + + op = head.name + args = expr[1:] + + # Check for closure call + if op in env: + val = env[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + if op in self.globals: + val = self.globals[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + # Threading macro + if op == '->': + result = self._eval(args[0], env) + for form in args[1:]: + if isinstance(form, list) and form: + new_form = [form[0], result] + form[1:] + result = self._eval(new_form, env) + else: + result = self._eval([form, result], env) + return result + + # === Binding === + + if op == 'bind': + scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if scan_name in self.scans: + state = self.scans[scan_name]['state'] + if len(args) > 1: + key = args[1].name if isinstance(args[1], Keyword) else str(args[1]) + return state.get(key, 0) + return state + return 0 + + # === Arithmetic === + + if op == '+': + return sum(self._eval(a, env) for a in args) + if op == '-': + vals = [self._eval(a, env) for a in args] + return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0] + if op == '*': + result = 1 + for a in args: + result *= self._eval(a, env) + return result + if op == '/': + vals = [self._eval(a, env) for a in args] + return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + if op == 'mod': + vals = [self._eval(a, env) for a in args] + return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + + # === Comparison === + + if op == '<': + return self._eval(args[0], env) < self._eval(args[1], env) + if op == '>': + return self._eval(args[0], env) > self._eval(args[1], env) + if op == '=': + return self._eval(args[0], env) == self._eval(args[1], env) + if op == '<=': + return self._eval(args[0], env) <= self._eval(args[1], env) + if op == '>=': + return self._eval(args[0], env) >= self._eval(args[1], env) + + if op == 'and': + for arg in args: + if not self._eval(arg, env): + return False + return True + + if op == 'or': + result = False + for arg in args: + result = self._eval(arg, env) + if result: + return result + return result + + if op == 'not': + return not self._eval(args[0], env) + + # === Logic === + + if op == 'if': + cond = self._eval(args[0], env) + if cond: + return self._eval(args[1], env) + return self._eval(args[2], env) if len(args) > 2 else None + + if op == 'cond': + i = 0 + while i < len(args) - 1: + pred = self._eval(args[i], env) + if pred: + return self._eval(args[i + 1], env) + i += 2 + return None + + if op == 'lambda': + params = args[0] + body = args[1] + param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params] + return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)} + + if op == 'let' or op == 'let*': + bindings = args[0] + body = args[1] + new_env = dict(env) + + if bindings and isinstance(bindings[0], list): + for binding in bindings: + if isinstance(binding, list) and len(binding) >= 2: + name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0]) + val = self._eval(binding[1], new_env) + new_env[name] = val + else: + i = 0 + while i < len(bindings): + name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i]) + val = self._eval(bindings[i + 1], new_env) + new_env[name] = val + i += 2 + return self._eval(body, new_env) + + # === Dict === + + if op == 'dict': + result = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + key = args[i].name + val = self._eval(args[i + 1], env) if i + 1 < len(args) else None + result[key] = val + i += 2 + else: + i += 1 + return result + + if op == 'get': + obj = self._eval(args[0], env) + key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env) + if isinstance(obj, dict): + return obj.get(key, 0) + return 0 + + # === List === + + if op == 'list': + return [self._eval(a, env) for a in args] + + if op == 'quote': + return args[0] if args else None + + if op == 'nth': + lst = self._eval(args[0], env) + idx = int(self._eval(args[1], env)) + if isinstance(lst, (list, tuple)) and 0 <= idx < len(lst): + return lst[idx] + return None + + if op == 'len': + val = self._eval(args[0], env) + return len(val) if hasattr(val, '__len__') else 0 + + if op == 'map': + seq = self._eval(args[0], env) + fn = self._eval(args[1], env) + if not isinstance(seq, (list, tuple)): + return [] + # Handle closure (lambda from sexp) + if isinstance(fn, dict) and fn.get('_type') == 'closure': + results = [] + for item in seq: + closure_env = dict(fn['env']) + if fn['params']: + closure_env[fn['params'][0]] = item + results.append(self._eval(fn['body'], closure_env)) + return results + # Handle Python callable + if callable(fn): + return [fn(item) for item in seq] + return [] + + # === Effects === + + if op in self.effects: + effect = self.effects[op] + effect_env = dict(env) + + param_names = list(effect['params'].keys()) + for pname, pdef in effect['params'].items(): + effect_env[pname] = pdef.get('default', 0) + + positional_idx = 0 + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + pname = args[i].name + if pname in effect['params'] and i + 1 < len(args): + effect_env[pname] = self._eval(args[i + 1], env) + i += 2 + else: + val = self._eval(args[i], env) + if positional_idx == 0: + effect_env['frame'] = val + elif positional_idx - 1 < len(param_names): + effect_env[param_names[positional_idx - 1]] = val + positional_idx += 1 + i += 1 + + return self._eval(effect['body'], effect_env) + + # === Primitives === + + if op in self.primitives: + prim_func = self.primitives[op] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # === Macros (function-like: args evaluated before binding) === + + if op in self.macros: + macro = self.macros[op] + macro_env = dict(env) + for i, pname in enumerate(macro['params']): + # Evaluate args in calling environment before binding + macro_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(macro['body'], macro_env) + + # Underscore variant lookup + prim_name = op.replace('-', '_') + if prim_name in self.primitives: + prim_func = self.primitives[prim_name] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name.replace('-', '_') + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # Unknown - return as-is + return expr + + def _step_scans(self, ctx: Context, env: dict): + """Step scans based on trigger evaluation.""" + for name, scan in self.scans.items(): + trigger_expr = scan['trigger'] + + # Evaluate trigger in context + should_step = self._eval(trigger_expr, env) + + if should_step: + state = scan['state'] + step_env = dict(state) + step_env.update(env) + + new_state = self._eval(scan['step'], step_env) + if isinstance(new_state, dict): + scan['state'] = new_state + else: + scan['state'] = {'acc': new_state} + + def run(self, duration: float = None, output: str = "pipe"): + """Run the streaming pipeline.""" + # Import output classes - handle both package and direct execution + try: + from .output import PipeOutput, DisplayOutput, FileOutput + except ImportError: + from output import PipeOutput, DisplayOutput, FileOutput + + self._init() + + if not self.frame_pipeline: + print("Error: no (frame ...) pipeline defined", file=sys.stderr) + return + + w = self.config.get('width', 720) + h = self.config.get('height', 720) + fps = self.config.get('fps', 30) + + if duration is None: + # Try to get duration from audio if available + for name, val in self.globals.items(): + if hasattr(val, 'duration'): + duration = val.duration + print(f"Using audio duration: {duration:.1f}s", file=sys.stderr) + break + else: + duration = 60.0 + + n_frames = int(duration * fps) + frame_time = 1.0 / fps + + print(f"Streaming {n_frames} frames @ {fps}fps", file=sys.stderr) + + # Create context + ctx = Context(fps=fps) + + # Output (with optional audio sync) + audio = self.audio_playback + if output == "pipe": + out = PipeOutput(size=(w, h), fps=fps, audio_source=audio) + elif output == "preview": + out = DisplayOutput(size=(w, h), fps=fps, audio_source=audio) + else: + out = FileOutput(output, size=(w, h), fps=fps, audio_source=audio) + + try: + frame_times = [] + for frame_num in range(n_frames): + if not out.is_open: + break + + frame_start = time.time() + ctx.t = frame_num * frame_time + ctx.frame_num = frame_num + + # Build frame environment with context + frame_env = { + 'ctx': { + 't': ctx.t, + 'frame-num': ctx.frame_num, + 'fps': ctx.fps, + }, + 't': ctx.t, # Also expose t directly for convenience + 'frame-num': ctx.frame_num, + } + + # Step scans + self._step_scans(ctx, frame_env) + + # Evaluate pipeline + result = self._eval(self.frame_pipeline, frame_env) + + if result is not None and hasattr(result, 'shape'): + out.write(result, ctx.t) + + frame_elapsed = time.time() - frame_start + frame_times.append(frame_elapsed) + + # Progress with timing + if frame_num % 30 == 0: + pct = 100 * frame_num / n_frames + avg_ms = 1000 * sum(frame_times[-30:]) / max(1, len(frame_times[-30:])) + target_ms = 1000 * frame_time + print(f"\r{pct:5.1f}% [{avg_ms:.0f}ms/frame, target {target_ms:.0f}ms]", end="", file=sys.stderr, flush=True) + + finally: + out.close() + print("\nDone", file=sys.stderr) + + +def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None, + sources_config: str = None, audio_config: str = None): + """Run a streaming sexp.""" + interp = StreamInterpreter(sexp_path) + if fps: + interp.config['fps'] = fps + if sources_config: + interp.sources_config = Path(sources_config) + if audio_config: + interp.audio_config = Path(audio_config) + interp.run(duration=duration, output=output) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run streaming sexp (generic interpreter)") + parser.add_argument("sexp", help="Path to .sexp file") + parser.add_argument("-d", "--duration", type=float, default=None) + parser.add_argument("-o", "--output", default="pipe") + parser.add_argument("--fps", type=float, default=None) + parser.add_argument("--sources", dest="sources_config", help="Path to sources config .sexp file") + parser.add_argument("--audio", dest="audio_config", help="Path to audio config .sexp file") + args = parser.parse_args() + + run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps, + sources_config=args.sources_config, audio_config=args.audio_config) diff --git a/tasks/__init__.py b/tasks/__init__.py index 0b7543f..bb8f547 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -1,26 +1,10 @@ -# art-celery/tasks - Celery tasks for 3-phase execution +# art-celery/tasks - Celery tasks for streaming video rendering # -# Tasks for the Art DAG distributed execution system: -# 1. analyze_input - Extract features from input media -# 2. execute_step - Execute a single step from the plan -# 3. run_plan - Orchestrate execution of a full plan -# -# S-expression tasks: -# 4. execute_step_sexp - Execute step from S-expression -# 5. run_plan_sexp - Run plan from S-expression +# Tasks: +# 1. run_stream - Execute a streaming S-expression recipe -from .analyze import analyze_input, analyze_inputs -from .execute import execute_step -from .orchestrate import run_plan, run_recipe -from .execute_sexp import execute_step_sexp, run_plan_sexp +from .streaming import run_stream __all__ = [ - "analyze_input", - "analyze_inputs", - "execute_step", - "run_plan", - "run_recipe", - # S-expression tasks - "execute_step_sexp", - "run_plan_sexp", + "run_stream", ] diff --git a/tasks/analyze.py b/tasks/analyze.py deleted file mode 100644 index 8cdb9a1..0000000 --- a/tasks/analyze.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Analysis tasks for extracting features from input media. - -Phase 1 of the 3-phase execution model. -""" - -import json -import logging -import os -from pathlib import Path -from typing import Dict, List, Optional - -from celery import current_task - -# Import from the Celery app -import sys -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from celery_app import app - -# Import artdag analysis module -try: - from artdag.analysis import Analyzer, AnalysisResult -except ImportError: - # artdag not installed, will fail at runtime - Analyzer = None - AnalysisResult = None - -logger = logging.getLogger(__name__) - -# Cache directory for analysis results -CACHE_DIR = Path(os.environ.get('CACHE_DIR', str(Path.home() / ".artdag" / "cache"))) -ANALYSIS_CACHE_DIR = CACHE_DIR / 'analysis' - - -@app.task(bind=True, name='tasks.analyze_input') -def analyze_input( - self, - input_hash: str, - input_path: str, - features: List[str], -) -> dict: - """ - Analyze a single input file. - - Args: - input_hash: Content hash of the input - input_path: Path to the input file - features: List of features to extract - - Returns: - Dict with analysis results - """ - if Analyzer is None: - raise ImportError("artdag.analysis not available") - - logger.info(f"Analyzing {input_hash[:16]}... for features: {features}") - - # Create analyzer with caching - ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True) - analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR) - - try: - result = analyzer.analyze( - input_hash=input_hash, - features=features, - input_path=Path(input_path), - ) - - return { - "status": "completed", - "input_hash": input_hash, - "cache_id": result.cache_id, - "features": features, - "result": result.to_dict(), - } - - except Exception as e: - logger.error(f"Analysis failed for {input_hash}: {e}") - return { - "status": "failed", - "input_hash": input_hash, - "error": str(e), - } - - -@app.task(bind=True, name='tasks.analyze_inputs') -def analyze_inputs( - self, - inputs: Dict[str, str], - features: List[str], -) -> dict: - """ - Analyze multiple inputs in parallel. - - Args: - inputs: Dict mapping input_hash to file path - features: List of features to extract from all inputs - - Returns: - Dict with all analysis results - """ - if Analyzer is None: - raise ImportError("artdag.analysis not available") - - logger.info(f"Analyzing {len(inputs)} inputs for features: {features}") - - ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True) - analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR) - - results = {} - errors = [] - - for input_hash, input_path in inputs.items(): - try: - result = analyzer.analyze( - input_hash=input_hash, - features=features, - input_path=Path(input_path), - ) - results[input_hash] = result.to_dict() - - except Exception as e: - logger.error(f"Analysis failed for {input_hash}: {e}") - errors.append({"input_hash": input_hash, "error": str(e)}) - - return { - "status": "completed" if not errors else "partial", - "results": results, - "errors": errors, - "total": len(inputs), - "successful": len(results), - } diff --git a/tasks/execute.py b/tasks/execute.py deleted file mode 100644 index 60dcd67..0000000 --- a/tasks/execute.py +++ /dev/null @@ -1,381 +0,0 @@ -""" -Step execution task. - -Phase 3 of the 3-phase execution model. -Executes individual steps from an execution plan with IPFS-backed caching. -""" - -import json -import logging -import os -import socket -from pathlib import Path -from typing import Dict, List, Optional - -from celery import current_task - -# Import from the Celery app -import sys -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from celery_app import app -from claiming import ( - get_claimer, - claim_task, - complete_task, - fail_task, - ClaimStatus, -) -from cache_manager import get_cache_manager, L1CacheManager - -# Import artdag -try: - from artdag import Cache, NodeType - from artdag.executor import get_executor - from artdag.planning import ExecutionStep -except ImportError: - Cache = None - NodeType = None - get_executor = None - ExecutionStep = None - -logger = logging.getLogger(__name__) - - -def get_worker_id() -> str: - """Get a unique identifier for this worker.""" - hostname = socket.gethostname() - pid = os.getpid() - return f"{hostname}:{pid}" - - -@app.task(bind=True, name='tasks.execute_step') -def execute_step( - self, - step_json: str, - plan_id: str, - input_cache_ids: Dict[str, str], -) -> dict: - """ - Execute a single step from an execution plan. - - Uses hash-based claiming to prevent duplicate work. - Results are stored in IPFS-backed cache. - - Args: - step_json: JSON-serialized ExecutionStep - plan_id: ID of the parent execution plan - input_cache_ids: Mapping from input step_id to their cache_id - - Returns: - Dict with execution result - """ - if ExecutionStep is None: - raise ImportError("artdag.planning not available") - - step = ExecutionStep.from_json(step_json) - worker_id = get_worker_id() - task_id = self.request.id - - logger.info(f"Executing step {step.step_id} ({step.node_type}) cache_id={step.cache_id[:16]}...") - - # Get L1 cache manager (IPFS-backed) - cache_mgr = get_cache_manager() - - # Check if already cached (by cache_id as cid) - cached_path = cache_mgr.get_by_cid(step.cache_id) - if cached_path: - logger.info(f"Step {step.step_id} already cached at {cached_path}") - - # Mark as cached in claiming system - claimer = get_claimer() - claimer.mark_cached(step.cache_id, str(cached_path)) - - return { - "status": "cached", - "step_id": step.step_id, - "cache_id": step.cache_id, - "output_path": str(cached_path), - } - - # Try to claim the task - if not claim_task(step.cache_id, worker_id, task_id): - # Another worker is handling it - logger.info(f"Step {step.step_id} claimed by another worker, waiting...") - - claimer = get_claimer() - result = claimer.wait_for_completion(step.cache_id, timeout=600) - - if result and result.status == ClaimStatus.COMPLETED: - return { - "status": "completed_by_other", - "step_id": step.step_id, - "cache_id": step.cache_id, - "output_path": result.output_path, - } - elif result and result.status == ClaimStatus.CACHED: - return { - "status": "cached", - "step_id": step.step_id, - "cache_id": step.cache_id, - "output_path": result.output_path, - } - elif result and result.status == ClaimStatus.FAILED: - return { - "status": "failed", - "step_id": step.step_id, - "cache_id": step.cache_id, - "error": result.error, - } - else: - return { - "status": "timeout", - "step_id": step.step_id, - "cache_id": step.cache_id, - "error": "Timeout waiting for other worker", - } - - # We have the claim, update to running - claimer = get_claimer() - claimer.update_status(step.cache_id, worker_id, ClaimStatus.RUNNING) - - try: - # Handle SOURCE nodes - if step.node_type == "SOURCE": - cid = step.config.get("cid") - if not cid: - raise ValueError(f"SOURCE step missing cid") - - # Look up in cache - path = cache_mgr.get_by_cid(cid) - if not path: - raise ValueError(f"SOURCE input not found in cache: {cid[:16]}...") - - output_path = str(path) - complete_task(step.cache_id, worker_id, output_path) - return { - "status": "completed", - "step_id": step.step_id, - "cache_id": step.cache_id, - "output_path": output_path, - } - - # Handle _LIST virtual nodes - if step.node_type == "_LIST": - item_paths = [] - for item_id in step.config.get("items", []): - item_cache_id = input_cache_ids.get(item_id) - if item_cache_id: - path = cache_mgr.get_by_cid(item_cache_id) - if path: - item_paths.append(str(path)) - - complete_task(step.cache_id, worker_id, json.dumps(item_paths)) - return { - "status": "completed", - "step_id": step.step_id, - "cache_id": step.cache_id, - "output_path": None, - "item_paths": item_paths, - } - - # Handle COMPOUND nodes (collapsed effect chains) - if step.node_type == "COMPOUND": - filter_chain = step.config.get("filter_chain", []) - if not filter_chain: - raise ValueError("COMPOUND step has empty filter_chain") - - # Resolve input paths - input_paths = [] - for input_step_id in step.input_steps: - input_cache_id = input_cache_ids.get(input_step_id) - if not input_cache_id: - raise ValueError(f"No cache_id for input step: {input_step_id}") - path = cache_mgr.get_by_cid(input_cache_id) - if not path: - raise ValueError(f"Input not in cache: {input_cache_id[:16]}...") - input_paths.append(Path(path)) - - if not input_paths: - raise ValueError("COMPOUND step has no inputs") - - # Build FFmpeg filter graph from chain - import subprocess - import tempfile - - filters = [] - for filter_item in filter_chain: - filter_type = filter_item.get("type", "") - filter_config = filter_item.get("config", {}) - - if filter_type == "TRANSFORM": - effects = filter_config.get("effects", {}) - for eff_name, eff_value in effects.items(): - if eff_name == "saturation": - filters.append(f"eq=saturation={eff_value}") - elif eff_name == "brightness": - filters.append(f"eq=brightness={eff_value}") - elif eff_name == "contrast": - filters.append(f"eq=contrast={eff_value}") - elif eff_name == "hue": - filters.append(f"hue=h={eff_value}") - - elif filter_type == "RESIZE": - width = filter_config.get("width", -1) - height = filter_config.get("height", -1) - mode = filter_config.get("mode", "fit") - if mode == "fit": - filters.append(f"scale={width}:{height}:force_original_aspect_ratio=decrease") - elif mode == "fill": - filters.append(f"scale={width}:{height}:force_original_aspect_ratio=increase,crop={width}:{height}") - else: - filters.append(f"scale={width}:{height}") - - output_dir = Path(tempfile.mkdtemp()) - output_path = output_dir / f"compound_{step.cache_id[:16]}.mp4" - - cmd = ["ffmpeg", "-y", "-i", str(input_paths[0])] - - # Handle segment timing - for filter_item in filter_chain: - if filter_item.get("type") == "SEGMENT": - seg_config = filter_item.get("config", {}) - if "start" in seg_config: - cmd.extend(["-ss", str(seg_config["start"])]) - if "end" in seg_config: - duration = seg_config["end"] - seg_config.get("start", 0) - cmd.extend(["-t", str(duration)]) - elif "duration" in seg_config: - cmd.extend(["-t", str(seg_config["duration"])]) - - if filters: - cmd.extend(["-vf", ",".join(filters)]) - - cmd.extend(["-c:v", "libx264", "-c:a", "aac", str(output_path)]) - - logger.info(f"Running COMPOUND FFmpeg: {' '.join(cmd)}") - result = subprocess.run(cmd, capture_output=True, text=True) - - if result.returncode != 0: - raise RuntimeError(f"FFmpeg failed: {result.stderr}") - - cached_file, ipfs_cid = cache_mgr.put( - source_path=output_path, - node_type="COMPOUND", - node_id=step.cache_id, - ) - - logger.info(f"COMPOUND step {step.step_id} completed with {len(filter_chain)} filters") - complete_task(step.cache_id, worker_id, str(cached_file.path)) - - import shutil - if output_dir.exists(): - shutil.rmtree(output_dir, ignore_errors=True) - - return { - "status": "completed", - "step_id": step.step_id, - "cache_id": step.cache_id, - "output_path": str(cached_file.path), - "cid": cached_file.cid, - "ipfs_cid": ipfs_cid, - "filter_count": len(filter_chain), - } - - # Get executor for this node type - try: - node_type = NodeType[step.node_type] - except KeyError: - node_type = step.node_type - - executor = get_executor(node_type) - if executor is None: - raise ValueError(f"No executor for node type: {step.node_type}") - - # Resolve input paths from cache - input_paths = [] - for input_step_id in step.input_steps: - input_cache_id = input_cache_ids.get(input_step_id) - if not input_cache_id: - raise ValueError(f"No cache_id for input step: {input_step_id}") - - path = cache_mgr.get_by_cid(input_cache_id) - if not path: - raise ValueError(f"Input not in cache: {input_cache_id[:16]}...") - - input_paths.append(Path(path)) - - # Create temp output path - import tempfile - output_dir = Path(tempfile.mkdtemp()) - output_path = output_dir / f"output_{step.cache_id[:16]}.mp4" - - # Execute - logger.info(f"Running executor for {step.node_type} with {len(input_paths)} inputs") - result_path = executor.execute(step.config, input_paths, output_path) - - # Store in IPFS-backed cache - cached_file, ipfs_cid = cache_mgr.put( - source_path=result_path, - node_type=step.node_type, - node_id=step.cache_id, - ) - - logger.info(f"Step {step.step_id} completed, IPFS CID: {ipfs_cid}") - - # Mark completed - complete_task(step.cache_id, worker_id, str(cached_file.path)) - - # Build outputs list (for multi-output support) - outputs = [] - if step.outputs: - # Use pre-defined outputs from step - for output_def in step.outputs: - outputs.append({ - "name": output_def.name, - "cache_id": output_def.cache_id, - "media_type": output_def.media_type, - "index": output_def.index, - "path": str(cached_file.path), - "cid": cached_file.cid, - "ipfs_cid": ipfs_cid, - }) - else: - # Single output (backwards compat) - outputs.append({ - "name": step.name or step.step_id, - "cache_id": step.cache_id, - "media_type": "video/mp4", - "index": 0, - "path": str(cached_file.path), - "cid": cached_file.cid, - "ipfs_cid": ipfs_cid, - }) - - # Cleanup temp - if output_dir.exists(): - import shutil - shutil.rmtree(output_dir, ignore_errors=True) - - return { - "status": "completed", - "step_id": step.step_id, - "name": step.name, - "cache_id": step.cache_id, - "output_path": str(cached_file.path), - "cid": cached_file.cid, - "ipfs_cid": ipfs_cid, - "outputs": outputs, - } - - except Exception as e: - logger.error(f"Step {step.step_id} failed: {e}") - fail_task(step.cache_id, worker_id, str(e)) - - return { - "status": "failed", - "step_id": step.step_id, - "cache_id": step.cache_id, - "error": str(e), - } - - diff --git a/tasks/execute_sexp.py b/tasks/execute_sexp.py deleted file mode 100644 index 10a947d..0000000 --- a/tasks/execute_sexp.py +++ /dev/null @@ -1,582 +0,0 @@ -""" -S-expression step execution task. - -Executes individual steps received as S-expressions. -The S-expression is the canonical format - workers verify -cache_ids by hashing the received S-expression. -""" - -import json -import logging -import os -import socket -from pathlib import Path -from typing import Dict, Optional - -from celery import current_task - -# Import from the Celery app -import sys -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from celery_app import app -from claiming import ( - get_claimer, - claim_task, - complete_task, - fail_task, - ClaimStatus, -) -from cache_manager import get_cache_manager - -# Import artdag S-expression modules -try: - from artdag.sexp import parse, Symbol, Keyword - from artdag import NodeType - from artdag.executor import get_executor -except ImportError: - parse = None - Symbol = None - Keyword = None - NodeType = None - get_executor = None - -logger = logging.getLogger(__name__) - - -def get_worker_id() -> str: - """Get a unique identifier for this worker.""" - hostname = socket.gethostname() - pid = os.getpid() - return f"{hostname}:{pid}" - - -def sexp_to_config(sexp) -> Dict: - """ - Convert parsed S-expression to config dict. - - Input: (effect :hash "abc123" :inputs ["step1"]) - Output: {"node_type": "EFFECT", "hash": "abc123", "inputs": ["step1"]} - """ - if not isinstance(sexp, list) or len(sexp) < 1: - raise ValueError(f"Invalid step S-expression: {sexp}") - - # First element is the node type - head = sexp[0] - if isinstance(head, Symbol): - node_type = head.name.upper() - else: - node_type = str(head).upper() - - config = {"node_type": node_type} - - # Parse keyword arguments - i = 1 - while i < len(sexp): - item = sexp[i] - if isinstance(item, Keyword): - if i + 1 < len(sexp): - key = item.name.replace('-', '_') - value = sexp[i + 1] - config[key] = value - i += 2 - else: - i += 1 - else: - # Positional argument - i += 1 - - return config - - -@app.task(bind=True, name='tasks.execute_step_sexp') -def execute_step_sexp( - self, - step_sexp: str, - step_id: str, - cache_id: str, - plan_id: str, - input_cache_ids: Dict[str, str], -) -> dict: - """ - Execute a single step from an S-expression. - - The step is received as a serialized S-expression string. - Workers can verify the cache_id by hashing the S-expression. - - Args: - step_sexp: Serialized S-expression for the step - step_id: Human-readable step identifier - cache_id: Expected cache_id (SHA3-256 of step_sexp) - plan_id: ID of the parent execution plan - input_cache_ids: Mapping from input step_id to their cache_id - - Returns: - Dict with execution result - """ - if parse is None: - raise ImportError("artdag.sexp not available") - - worker_id = get_worker_id() - task_id = self.request.id - - logger.info(f"Executing step {step_id} cache_id={cache_id[:16]}...") - logger.debug(f"Step S-expression: {step_sexp[:100]}...") - - # Parse the S-expression - try: - parsed = parse(step_sexp) - config = sexp_to_config(parsed) - node_type = config.pop("node_type") - except Exception as e: - logger.error(f"Failed to parse step S-expression: {e}") - return { - "status": "failed", - "step_id": step_id, - "cache_id": cache_id, - "error": f"Parse error: {e}", - } - - # Get cache manager - cache_mgr = get_cache_manager() - - # Check if already cached - cached_path = cache_mgr.get_by_cid(cache_id) - if cached_path: - logger.info(f"Step {step_id} already cached at {cached_path}") - - claimer = get_claimer() - claimer.mark_cached(cache_id, str(cached_path)) - - return { - "status": "cached", - "step_id": step_id, - "cache_id": cache_id, - "output_path": str(cached_path), - } - - # Try to claim the task - if not claim_task(cache_id, worker_id, task_id): - logger.info(f"Step {step_id} claimed by another worker, waiting...") - - claimer = get_claimer() - result = claimer.wait_for_completion(cache_id, timeout=600) - - if result and result.status == ClaimStatus.COMPLETED: - return { - "status": "completed_by_other", - "step_id": step_id, - "cache_id": cache_id, - "output_path": result.output_path, - } - elif result and result.status == ClaimStatus.CACHED: - return { - "status": "cached", - "step_id": step_id, - "cache_id": cache_id, - "output_path": result.output_path, - } - elif result and result.status == ClaimStatus.FAILED: - return { - "status": "failed", - "step_id": step_id, - "cache_id": cache_id, - "error": result.error, - } - else: - return { - "status": "timeout", - "step_id": step_id, - "cache_id": cache_id, - "error": "Timeout waiting for other worker", - } - - # We have the claim, update to running - claimer = get_claimer() - claimer.update_status(cache_id, worker_id, ClaimStatus.RUNNING) - - try: - # Handle SOURCE nodes - if node_type == "SOURCE": - # Support both :cid (new IPFS) and :hash (legacy) - content_id = config.get("cid") or config.get("hash") - if not content_id: - raise ValueError("SOURCE step missing :cid or :hash") - - path = cache_mgr.get_by_cid(content_id) - if not path: - raise ValueError(f"SOURCE input not found: {content_id[:16]}...") - - output_path = str(path) - complete_task(cache_id, worker_id, output_path) - return { - "status": "completed", - "step_id": step_id, - "cache_id": cache_id, - "output_path": output_path, - } - - # Handle EFFECT nodes - if node_type == "EFFECT": - effect_hash = config.get("cid") or config.get("hash") - if not effect_hash: - raise ValueError("EFFECT step missing :cid") - - # Get input paths - inputs = config.get("inputs", []) - input_paths = [] - for inp in inputs: - inp_cache_id = input_cache_ids.get(inp, inp) - path = cache_mgr.get_by_cid(inp_cache_id) - if not path: - raise ValueError(f"Input not found: {inp_cache_id[:16]}...") - input_paths.append(Path(path)) - - # Get executor - try: - executor = get_executor(NodeType.SOURCE) # Effects use SOURCE executor for now - except: - executor = None - - if executor is None: - # Fallback: copy input to output (identity-like behavior) - if input_paths: - output_path = str(input_paths[0]) - complete_task(cache_id, worker_id, output_path) - return { - "status": "completed", - "step_id": step_id, - "cache_id": cache_id, - "output_path": output_path, - } - raise ValueError(f"No executor for EFFECT and no inputs") - - # Handle COMPOUND nodes (collapsed effect chains) - if node_type == "COMPOUND": - filter_chain = config.get("filter_chain", []) - if not filter_chain: - raise ValueError("COMPOUND step has empty filter_chain") - - # Get input paths - inputs = config.get("inputs", []) - input_paths = [] - for inp in inputs: - inp_cache_id = input_cache_ids.get(inp, inp) - path = cache_mgr.get_by_cid(inp_cache_id) - if not path: - raise ValueError(f"Input not found: {inp_cache_id[:16]}...") - input_paths.append(Path(path)) - - if not input_paths: - raise ValueError("COMPOUND step has no inputs") - - # Build FFmpeg filter graph from chain - filters = [] - for i, filter_item in enumerate(filter_chain): - filter_type = filter_item.get("type", "") - filter_config = filter_item.get("config", {}) - - if filter_type == "EFFECT": - # Effect - for now identity-like, can be extended - effect_hash = filter_config.get("cid") or filter_config.get("hash") or filter_config.get("effect") - # TODO: resolve effect to actual FFmpeg filter - # For now, skip identity-like effects - pass - - elif filter_type == "TRANSFORM": - # Transform effects map to FFmpeg filters - effects = filter_config.get("effects", {}) - for eff_name, eff_value in effects.items(): - if eff_name == "saturation": - filters.append(f"eq=saturation={eff_value}") - elif eff_name == "brightness": - filters.append(f"eq=brightness={eff_value}") - elif eff_name == "contrast": - filters.append(f"eq=contrast={eff_value}") - elif eff_name == "hue": - filters.append(f"hue=h={eff_value}") - - elif filter_type == "RESIZE": - width = filter_config.get("width", -1) - height = filter_config.get("height", -1) - mode = filter_config.get("mode", "fit") - if mode == "fit": - filters.append(f"scale={width}:{height}:force_original_aspect_ratio=decrease") - elif mode == "fill": - filters.append(f"scale={width}:{height}:force_original_aspect_ratio=increase,crop={width}:{height}") - else: - filters.append(f"scale={width}:{height}") - - elif filter_type == "SEGMENT": - # Segment handled via -ss and -t, not filter - pass - - # Create temp output - import tempfile - import subprocess - - output_dir = Path(tempfile.mkdtemp()) - output_path = output_dir / f"compound_{cache_id[:16]}.mp4" - - # Build FFmpeg command - input_path = input_paths[0] - cmd = ["ffmpeg", "-y", "-i", str(input_path)] - - # Handle segment timing if present - for filter_item in filter_chain: - if filter_item.get("type") == "SEGMENT": - seg_config = filter_item.get("config", {}) - if "start" in seg_config: - cmd.extend(["-ss", str(seg_config["start"])]) - if "end" in seg_config: - duration = seg_config["end"] - seg_config.get("start", 0) - cmd.extend(["-t", str(duration)]) - elif "duration" in seg_config: - cmd.extend(["-t", str(seg_config["duration"])]) - - # Add filter graph if any - if filters: - cmd.extend(["-vf", ",".join(filters)]) - - # Output options - cmd.extend(["-c:v", "libx264", "-c:a", "aac", str(output_path)]) - - logger.info(f"Running COMPOUND FFmpeg: {' '.join(cmd)}") - result = subprocess.run(cmd, capture_output=True, text=True) - - if result.returncode != 0: - raise RuntimeError(f"FFmpeg failed: {result.stderr}") - - # Store in cache - cached_file, ipfs_cid = cache_mgr.put( - source_path=output_path, - node_type="COMPOUND", - node_id=cache_id, - ) - - logger.info(f"COMPOUND step {step_id} completed with {len(filter_chain)} filters, IPFS CID: {ipfs_cid}") - complete_task(cache_id, worker_id, str(cached_file.path)) - - # Cleanup temp - if output_dir.exists(): - import shutil - shutil.rmtree(output_dir, ignore_errors=True) - - return { - "status": "completed", - "step_id": step_id, - "cache_id": cache_id, - "output_path": str(cached_file.path), - "cid": cached_file.cid, - "ipfs_cid": ipfs_cid, - "filter_count": len(filter_chain), - } - - # Get executor for other node types - try: - node_type_enum = NodeType[node_type] - except (KeyError, TypeError): - node_type_enum = node_type - - executor = get_executor(node_type_enum) - if executor is None: - raise ValueError(f"No executor for node type: {node_type}") - - # Resolve input paths - inputs = config.pop("inputs", []) - input_paths = [] - for inp in inputs: - inp_cache_id = input_cache_ids.get(inp, inp) - path = cache_mgr.get_by_cid(inp_cache_id) - if not path: - raise ValueError(f"Input not found: {inp_cache_id[:16]}...") - input_paths.append(Path(path)) - - # Create temp output - import tempfile - output_dir = Path(tempfile.mkdtemp()) - output_path = output_dir / f"output_{cache_id[:16]}.mp4" - - # Execute - logger.info(f"Running executor for {node_type} with {len(input_paths)} inputs") - result_path = executor.execute(config, input_paths, output_path) - - # Store in cache - cached_file, ipfs_cid = cache_mgr.put( - source_path=result_path, - node_type=node_type, - node_id=cache_id, - ) - - logger.info(f"Step {step_id} completed, IPFS CID: {ipfs_cid}") - complete_task(cache_id, worker_id, str(cached_file.path)) - - # Cleanup temp - if output_dir.exists(): - import shutil - shutil.rmtree(output_dir, ignore_errors=True) - - return { - "status": "completed", - "step_id": step_id, - "cache_id": cache_id, - "output_path": str(cached_file.path), - "cid": cached_file.cid, - "ipfs_cid": ipfs_cid, - } - - except Exception as e: - logger.error(f"Step {step_id} failed: {e}") - fail_task(cache_id, worker_id, str(e)) - - return { - "status": "failed", - "step_id": step_id, - "cache_id": cache_id, - "error": str(e), - } - - -@app.task(bind=True, name='tasks.run_plan_sexp') -def run_plan_sexp( - self, - plan_sexp: str, - run_id: Optional[str] = None, -) -> dict: - """ - Execute a complete S-expression execution plan. - - Args: - plan_sexp: Serialized S-expression plan - run_id: Optional run ID for tracking - - Returns: - Dict with execution results - """ - if parse is None: - raise ImportError("artdag.sexp not available") - - from artdag.sexp.scheduler import PlanScheduler - from artdag.sexp.planner import ExecutionPlanSexp, PlanStep - - logger.info(f"Running plan from S-expression (run_id={run_id})") - - # Parse the plan S-expression - parsed = parse(plan_sexp) - - # Extract plan metadata and steps - plan_id = None - recipe_id = None - recipe_hash = None - inputs = {} - steps = [] - output_step_id = None - - i = 1 - while i < len(parsed): - item = parsed[i] - - if isinstance(item, Keyword): - key = item.name - if i + 1 < len(parsed): - value = parsed[i + 1] - - if key == "id": - plan_id = value - elif key == "recipe": - recipe_id = value - elif key == "recipe-hash": - recipe_hash = value - elif key == "output": - output_step_id = value - - i += 2 - else: - i += 1 - - elif isinstance(item, list) and len(item) > 0: - head = item[0] - if isinstance(head, Symbol): - if head.name == "inputs": - # Parse inputs block - for j in range(1, len(item)): - inp = item[j] - if isinstance(inp, list) and len(inp) >= 2: - name = inp[0].name if isinstance(inp[0], Symbol) else str(inp[0]) - value = inp[1] - inputs[name] = value - - elif head.name == "step": - # Parse step - step_id = item[1] if len(item) > 1 else None - step_cache_id = None - step_level = 0 - step_node = None - - j = 2 - while j < len(item): - sub = item[j] - if isinstance(sub, Keyword): - if sub.name == "cache-id" and j + 1 < len(item): - step_cache_id = item[j + 1] - j += 2 - elif sub.name == "level" and j + 1 < len(item): - step_level = item[j + 1] - j += 2 - else: - j += 1 - elif isinstance(sub, list): - step_node = sub - j += 1 - else: - j += 1 - - if step_id and step_cache_id and step_node: - # Convert step_node to config - config = sexp_to_config(step_node) - node_type = config.pop("node_type") - step_inputs = config.pop("inputs", []) - - steps.append(PlanStep( - step_id=step_id, - node_type=node_type, - config=config, - inputs=step_inputs if isinstance(step_inputs, list) else [], - cache_id=step_cache_id, - level=step_level, - )) - i += 1 - else: - i += 1 - - # Create plan object - plan = ExecutionPlanSexp( - plan_id=plan_id or "unknown", - recipe_id=recipe_id or "unknown", - recipe_hash=recipe_hash or "", - steps=steps, - output_step_id=output_step_id or (steps[-1].step_id if steps else ""), - inputs=inputs, - ) - - # Create scheduler and run - cache_mgr = get_cache_manager() - scheduler = PlanScheduler( - cache_manager=cache_mgr, - celery_app=app, - execute_task_name='tasks.execute_step_sexp', - ) - - result = scheduler.schedule(plan) - - return { - "status": result.status, - "run_id": run_id, - "plan_id": result.plan_id, - "output_cache_id": result.output_cache_id, - "output_path": result.output_path, - "output_ipfs_cid": result.output_ipfs_cid, - "steps_completed": result.steps_completed, - "steps_cached": result.steps_cached, - "steps_failed": result.steps_failed, - "error": result.error, - } diff --git a/tasks/orchestrate.py b/tasks/orchestrate.py deleted file mode 100644 index 5e68cb9..0000000 --- a/tasks/orchestrate.py +++ /dev/null @@ -1,514 +0,0 @@ -""" -Plan orchestration tasks. - -Coordinates the full 3-phase execution: -1. Analyze inputs -2. Generate plan -3. Execute steps level by level - -Uses IPFS-backed cache for durability. -""" - -import json -import logging -import os -from pathlib import Path -from typing import Dict, List, Optional - -from celery import current_task, group, chain - -# Import from the Celery app -import sys -sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from celery_app import app -from claiming import get_claimer -from cache_manager import get_cache_manager - -# Import artdag modules -try: - from artdag import Cache - from artdag.analysis import Analyzer, AnalysisResult - from artdag.planning import RecipePlanner, ExecutionPlan, Recipe -except ImportError: - Cache = None - Analyzer = None - AnalysisResult = None - RecipePlanner = None - ExecutionPlan = None - Recipe = None - -from .execute import execute_step - -logger = logging.getLogger(__name__) - -# Cache directories -CACHE_DIR = Path(os.environ.get('CACHE_DIR', str(Path.home() / ".artdag" / "cache"))) -ANALYSIS_CACHE_DIR = CACHE_DIR / 'analysis' -PLAN_CACHE_DIR = CACHE_DIR / 'plans' - - -@app.task(bind=True, name='tasks.run_plan') -def run_plan( - self, - plan_json: str, - run_id: Optional[str] = None, -) -> dict: - """ - Execute a complete execution plan. - - Runs steps level by level, with parallel execution within each level. - Results are stored in IPFS-backed cache. - - Args: - plan_json: JSON-serialized ExecutionPlan - run_id: Optional run ID for tracking - - Returns: - Dict with execution results - """ - if ExecutionPlan is None: - raise ImportError("artdag.planning not available") - - plan = ExecutionPlan.from_json(plan_json) - cache_mgr = get_cache_manager() - - logger.info(f"Executing plan {plan.plan_id[:16]}... ({len(plan.steps)} steps)") - - # Build initial cache_ids mapping (step_id -> cache_id) - cache_ids = {} - for step in plan.steps: - cache_ids[step.step_id] = step.cache_id - - # Also map input hashes - for name, cid in plan.input_hashes.items(): - cache_ids[name] = cid - - # Group steps by level - steps_by_level = plan.get_steps_by_level() - max_level = max(steps_by_level.keys()) if steps_by_level else 0 - - results_by_step = {} - total_cached = 0 - total_executed = 0 - - for level in range(max_level + 1): - level_steps = steps_by_level.get(level, []) - if not level_steps: - continue - - logger.info(f"Executing level {level}: {len(level_steps)} steps") - - # Check which steps need execution - steps_to_run = [] - - for step in level_steps: - # Check if cached - cached_path = cache_mgr.get_by_cid(step.cache_id) - if cached_path: - results_by_step[step.step_id] = { - "status": "cached", - "cache_id": step.cache_id, - "output_path": str(cached_path), - } - total_cached += 1 - else: - steps_to_run.append(step) - - if not steps_to_run: - logger.info(f"Level {level}: all steps cached") - continue - - # Build input cache_ids for this level - level_cache_ids = dict(cache_ids) - - # Execute steps in parallel - tasks = [ - execute_step.s(step.to_json(), plan.plan_id, level_cache_ids) - for step in steps_to_run - ] - - job = group(tasks) - async_results = job.apply_async() - - # Wait for completion - try: - step_results = async_results.get(timeout=3600) - except Exception as e: - logger.error(f"Level {level} execution failed: {e}") - return { - "status": "failed", - "error": str(e), - "level": level, - "results": results_by_step, - "run_id": run_id, - } - - # Process results - for result in step_results: - step_id = result.get("step_id") - cache_id = result.get("cache_id") - - results_by_step[step_id] = result - cache_ids[step_id] = cache_id - - if result.get("status") in ("completed", "cached", "completed_by_other"): - total_executed += 1 - elif result.get("status") == "failed": - logger.error(f"Step {step_id} failed: {result.get('error')}") - return { - "status": "failed", - "error": f"Step {step_id} failed: {result.get('error')}", - "level": level, - "results": results_by_step, - "run_id": run_id, - } - - # Get final output - output_step = plan.get_step(plan.output_step) - output_cache_id = output_step.cache_id if output_step else None - output_path = None - output_ipfs_cid = None - output_name = plan.output_name - - if output_cache_id: - output_path = cache_mgr.get_by_cid(output_cache_id) - output_ipfs_cid = cache_mgr.get_ipfs_cid(output_cache_id) - - # Build list of all outputs with their names and artifacts - all_outputs = [] - for step in plan.steps: - step_result = results_by_step.get(step.step_id, {}) - step_outputs = step_result.get("outputs", []) - - # If no outputs in result, build from step definition - if not step_outputs and step.outputs: - for output_def in step.outputs: - output_cache_path = cache_mgr.get_by_cid(output_def.cache_id) - output_ipfs = cache_mgr.get_ipfs_cid(output_def.cache_id) if output_cache_path else None - all_outputs.append({ - "name": output_def.name, - "step_id": step.step_id, - "step_name": step.name, - "cache_id": output_def.cache_id, - "media_type": output_def.media_type, - "path": str(output_cache_path) if output_cache_path else None, - "ipfs_cid": output_ipfs, - "status": "cached" if output_cache_path else "missing", - }) - else: - for output in step_outputs: - all_outputs.append({ - **output, - "step_id": step.step_id, - "step_name": step.name, - "status": "completed", - }) - - return { - "status": "completed", - "run_id": run_id, - "plan_id": plan.plan_id, - "plan_name": plan.name, - "recipe_name": plan.recipe_name, - "output_name": output_name, - "output_cache_id": output_cache_id, - "output_path": str(output_path) if output_path else None, - "output_ipfs_cid": output_ipfs_cid, - "total_steps": len(plan.steps), - "cached": total_cached, - "executed": total_executed, - "results": results_by_step, - "outputs": all_outputs, - } - - -def _extract_analysis_from_recipe(compiled_recipe) -> List[Dict]: - """ - Extract analysis nodes from a compiled recipe. - - Finds all (analyze ...) nodes and returns their configurations. - Analysis nodes are identified by type "ANALYZE" or by having - an "analyze" config key. - """ - analysis_nodes = [] - nodes = compiled_recipe.nodes - - if isinstance(nodes, dict): - nodes = list(nodes.values()) - - for node in nodes: - node_type = node.get("type", "").upper() - config = node.get("config", {}) - - # Check if this is an analysis node - if node_type == "ANALYZE" or config.get("analyze"): - analysis_nodes.append({ - "node_id": node.get("id"), - "input_ref": config.get("input") or config.get("source"), - "feature": config.get("feature") or config.get("analyze"), - "config": config, - }) - - return analysis_nodes - - -@app.task(bind=True, name='tasks.run_recipe') -def run_recipe( - self, - recipe_sexp: str, - input_hashes: Dict[str, str], - run_id: Optional[str] = None, -) -> dict: - """ - Run a complete recipe through all phases. - - The recipe S-expression declares what analysis is needed. - Analysis nodes in the recipe are executed first, then their - outputs are used to generate the execution plan. - - 1. Parse: Compile recipe S-expression - 2. Analyze: Run analysis nodes from recipe - 3. Plan: Generate execution plan using analysis results - 4. Execute: Run the plan - - Args: - recipe_sexp: Recipe S-expression content - input_hashes: Mapping from input name to content hash - run_id: Optional run ID for tracking - - Returns: - Dict with final results - """ - # Import S-expression compiler - try: - from artdag.sexp import compile_string - except ImportError: - raise ImportError("artdag.sexp not available") - - if Analyzer is None: - raise ImportError("artdag.analysis not available") - - cache_mgr = get_cache_manager() - - logger.info(f"Running recipe with {len(input_hashes)} inputs") - - # Phase 1: Parse recipe - logger.info("Phase 1: Parsing recipe S-expression...") - - try: - compiled = compile_string(recipe_sexp) - except Exception as e: - return {"status": "failed", "error": f"Recipe parse error: {e}"} - - logger.info(f"Parsed recipe: {compiled.name}") - - # Phase 2: Run analysis nodes from recipe - logger.info("Phase 2: Running analysis from recipe...") - - analysis_nodes = _extract_analysis_from_recipe(compiled) - logger.info(f"Found {len(analysis_nodes)} analysis nodes in recipe") - - ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True) - analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR) - - analysis_results = {} - for analysis_node in analysis_nodes: - input_ref = analysis_node["input_ref"] - feature = analysis_node["feature"] - node_id = analysis_node["node_id"] - - # Resolve input reference to content hash - cid = input_hashes.get(input_ref) - if not cid: - logger.warning(f"Analysis node {node_id}: input '{input_ref}' not in input_hashes") - continue - - path = cache_mgr.get_by_cid(cid) - if not path: - logger.warning(f"Analysis node {node_id}: content {cid[:16]}... not in cache") - continue - - try: - # Run analysis for the specific feature - features = [feature] if feature else ["beats", "energy"] - result = analyzer.analyze( - input_hash=cid, - features=features, - input_path=Path(path), - ) - # Store result keyed by node_id so plan can reference it - analysis_results[node_id] = result - # Also store by cid for compatibility - analysis_results[cid] = result - logger.info(f"Analysis {node_id}: feature={feature}, tempo={result.tempo}") - except Exception as e: - logger.warning(f"Analysis failed for {node_id}: {e}") - - logger.info(f"Completed {len(analysis_results)} analysis results") - - # Phase 3: Generate plan - logger.info("Phase 3: Generating execution plan...") - - # Use the S-expression planner if available - try: - from artdag.sexp.planner import create_plan - plan = create_plan(compiled, inputs=input_hashes) - except ImportError: - # Fall back to legacy planner - if RecipePlanner is None: - raise ImportError("No planner available") - recipe = Recipe.from_dict(compiled.to_dict()) - planner = RecipePlanner(use_tree_reduction=True) - plan = planner.plan( - recipe=recipe, - input_hashes=input_hashes, - analysis=analysis_results, - ) - - logger.info(f"Generated plan with {len(plan.steps)} steps") - - # Save plan as S-expression through cache manager (goes to IPFS) - import tempfile - - plan_content = plan.to_sexp_string() if hasattr(plan, 'to_sexp_string') else plan.to_json() - plan_suffix = ".sexp" if hasattr(plan, 'to_sexp_string') else ".json" - - with tempfile.NamedTemporaryFile(delete=False, suffix=plan_suffix, mode="w") as tmp: - tmp.write(plan_content) - tmp_path = Path(tmp.name) - - # Store in cache (content-addressed, auto-pins to IPFS) - # Plan is just another node output - no special treatment needed - cached, plan_ipfs_cid = cache_mgr.put(tmp_path, node_type="plan", move=True) - plan_cache_id = plan_ipfs_cid or cached.cid # Prefer IPFS CID - logger.info(f"Plan cached: cid={plan_cache_id}, ipfs={plan_ipfs_cid}") - - # Phase 4: Execute - logger.info("Phase 4: Executing plan...") - - result = run_plan(plan.to_json(), run_id=run_id) - - return { - "status": result.get("status"), - "run_id": run_id, - "recipe": compiled.name, - "plan_id": plan.plan_id, - "plan_cache_id": plan_cache_id, - "plan_ipfs_cid": plan_ipfs_cid, - "output_path": result.get("output_path"), - "output_cache_id": result.get("output_cache_id"), - "output_ipfs_cid": result.get("output_ipfs_cid"), - "analysis_count": len(analysis_results), - "total_steps": len(plan.steps), - "cached": result.get("cached", 0), - "executed": result.get("executed", 0), - "error": result.get("error"), - } - - -@app.task(bind=True, name='tasks.generate_plan') -def generate_plan( - self, - recipe_sexp: str, - input_hashes: Dict[str, str], -) -> dict: - """ - Generate an execution plan without executing it. - - Useful for: - - Previewing what will be executed - - Checking cache status - - Debugging recipe issues - - Args: - recipe_sexp: Recipe S-expression content - input_hashes: Mapping from input name to content hash - - Returns: - Dict with plan details - """ - try: - from artdag.sexp import compile_string - except ImportError: - raise ImportError("artdag.sexp not available") - - if Analyzer is None: - raise ImportError("artdag.analysis not available") - - cache_mgr = get_cache_manager() - - # Parse recipe - try: - compiled = compile_string(recipe_sexp) - except Exception as e: - return {"status": "failed", "error": f"Recipe parse error: {e}"} - - # Extract and run analysis nodes from recipe - analysis_nodes = _extract_analysis_from_recipe(compiled) - - ANALYSIS_CACHE_DIR.mkdir(parents=True, exist_ok=True) - analyzer = Analyzer(cache_dir=ANALYSIS_CACHE_DIR) - - analysis_results = {} - for analysis_node in analysis_nodes: - input_ref = analysis_node["input_ref"] - feature = analysis_node["feature"] - node_id = analysis_node["node_id"] - - cid = input_hashes.get(input_ref) - if not cid: - continue - - path = cache_mgr.get_by_cid(cid) - if path: - try: - features = [feature] if feature else ["beats", "energy"] - result = analyzer.analyze( - input_hash=cid, - features=features, - input_path=Path(path), - ) - analysis_results[node_id] = result - analysis_results[cid] = result - except Exception as e: - logger.warning(f"Analysis failed for {node_id}: {e}") - - # Generate plan - try: - from artdag.sexp.planner import create_plan - plan = create_plan(compiled, inputs=input_hashes) - except ImportError: - if RecipePlanner is None: - raise ImportError("No planner available") - recipe = Recipe.from_dict(compiled.to_dict()) - planner = RecipePlanner(use_tree_reduction=True) - plan = planner.plan( - recipe=recipe, - input_hashes=input_hashes, - analysis=analysis_results, - ) - - # Check cache status for each step - steps_status = [] - for step in plan.steps: - cached = cache_mgr.has_content(step.cache_id) - steps_status.append({ - "step_id": step.step_id, - "node_type": step.node_type, - "cache_id": step.cache_id, - "level": step.level, - "cached": cached, - }) - - cached_count = sum(1 for s in steps_status if s["cached"]) - - return { - "status": "planned", - "recipe": compiled.name, - "plan_id": plan.plan_id, - "total_steps": len(plan.steps), - "cached_steps": cached_count, - "pending_steps": len(plan.steps) - cached_count, - "steps": steps_status, - "plan_json": plan.to_json(), - } diff --git a/tasks/streaming.py b/tasks/streaming.py new file mode 100644 index 0000000..558f362 --- /dev/null +++ b/tasks/streaming.py @@ -0,0 +1,303 @@ +""" +Streaming video rendering task. + +Executes S-expression recipes for frame-by-frame video processing. +Supports CID and friendly name references for assets. +""" + +import hashlib +import logging +import os +import sys +import tempfile +from pathlib import Path +from typing import Dict, Optional + +from celery import current_task + +# Add parent directory to path for imports +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from celery_app import app +from cache_manager import get_cache_manager + +logger = logging.getLogger(__name__) + + +def resolve_asset(ref: str, actor_id: Optional[str] = None) -> Optional[Path]: + """ + Resolve an asset reference (CID or friendly name) to a file path. + + Args: + ref: CID or friendly name (e.g., "my-video" or "QmXyz...") + actor_id: User ID for friendly name resolution + + Returns: + Path to the asset file, or None if not found + """ + cache_mgr = get_cache_manager() + + # Try as direct CID first + path = cache_mgr.get_by_cid(ref) + if path and path.exists(): + logger.info(f"Resolved {ref[:16]}... as CID to {path}") + return path + + # Try as friendly name if actor_id provided + if actor_id: + import asyncio + from database import resolve_friendly_name + + try: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + cid = loop.run_until_complete(resolve_friendly_name(actor_id, ref)) + finally: + loop.close() + + if cid: + path = cache_mgr.get_by_cid(cid) + if path and path.exists(): + logger.info(f"Resolved '{ref}' via friendly name to {path}") + return path + except Exception as e: + logger.warning(f"Failed to resolve friendly name '{ref}': {e}") + + logger.warning(f"Could not resolve asset reference: {ref}") + return None + + +class CIDVideoSource: + """ + Video source that resolves CIDs to file paths. + + Wraps the streaming VideoSource to work with cached assets. + """ + + def __init__(self, cid: str, fps: float = 30, actor_id: Optional[str] = None): + self.cid = cid + self.fps = fps + self.actor_id = actor_id + self._source = None + + def _ensure_source(self): + if self._source is None: + path = resolve_asset(self.cid, self.actor_id) + if not path: + raise ValueError(f"Could not resolve video source: {self.cid}") + + from streaming.stream_sexp_generic import VideoSource + # Import from primitives where VideoSource is defined + from sexp_effects.primitive_libs.streaming import VideoSource + self._source = VideoSource(str(path), self.fps) + + def read_at(self, t: float): + self._ensure_source() + return self._source.read_at(t) + + def read(self): + self._ensure_source() + return self._source.read() + + @property + def size(self): + self._ensure_source() + return self._source.size + + @property + def duration(self): + self._ensure_source() + return self._source._duration + + def close(self): + if self._source: + self._source.close() + + +class CIDAudioAnalyzer: + """ + Audio analyzer that resolves CIDs to file paths. + """ + + def __init__(self, cid: str, actor_id: Optional[str] = None): + self.cid = cid + self.actor_id = actor_id + self._analyzer = None + + def _ensure_analyzer(self): + if self._analyzer is None: + path = resolve_asset(self.cid, self.actor_id) + if not path: + raise ValueError(f"Could not resolve audio source: {self.cid}") + + from sexp_effects.primitive_libs.streaming import AudioAnalyzer + self._analyzer = AudioAnalyzer(str(path)) + + def get_energy(self, t: float) -> float: + self._ensure_analyzer() + return self._analyzer.get_energy(t) + + def get_beat(self, t: float) -> bool: + self._ensure_analyzer() + return self._analyzer.get_beat(t) + + def get_beat_count(self, t: float) -> int: + self._ensure_analyzer() + return self._analyzer.get_beat_count(t) + + @property + def duration(self): + self._ensure_analyzer() + return self._analyzer.duration + + +def create_cid_primitives(actor_id: Optional[str] = None): + """ + Create CID-aware primitive functions. + + Returns dict of primitives that resolve CIDs before creating sources. + """ + def prim_make_video_source_cid(cid: str, fps: float = 30): + return CIDVideoSource(cid, fps, actor_id) + + def prim_make_audio_analyzer_cid(cid: str): + return CIDAudioAnalyzer(cid, actor_id) + + return { + 'streaming:make-video-source': prim_make_video_source_cid, + 'streaming:make-audio-analyzer': prim_make_audio_analyzer_cid, + } + + +@app.task(bind=True, name='tasks.run_stream') +def run_stream( + self, + recipe_sexp: str, + output_name: str = "output.mp4", + duration: Optional[float] = None, + fps: Optional[float] = None, + actor_id: Optional[str] = None, + sources_sexp: Optional[str] = None, + audio_sexp: Optional[str] = None, +) -> dict: + """ + Execute a streaming S-expression recipe. + + Args: + recipe_sexp: The recipe S-expression content + output_name: Name for the output file + duration: Optional duration override (seconds) + fps: Optional FPS override + actor_id: User ID for friendly name resolution + sources_sexp: Optional sources config S-expression + audio_sexp: Optional audio config S-expression + + Returns: + Dict with output_cid, output_path, and status + """ + task_id = self.request.id + logger.info(f"Starting stream task {task_id}") + + self.update_state(state='INITIALIZING', meta={'progress': 0}) + + # Get the app directory for primitive/effect paths + app_dir = Path(__file__).parent.parent # celery/ + sexp_effects_dir = app_dir / "sexp_effects" + effects_dir = app_dir / "effects" + templates_dir = app_dir / "templates" + + # Create temp directory for work + work_dir = Path(tempfile.mkdtemp(prefix="stream_")) + recipe_path = work_dir / "recipe.sexp" + output_path = work_dir / output_name + + # Create symlinks to effect directories so relative paths work + (work_dir / "sexp_effects").symlink_to(sexp_effects_dir) + (work_dir / "effects").symlink_to(effects_dir) + (work_dir / "templates").symlink_to(templates_dir) + + try: + # Write recipe to temp file + recipe_path.write_text(recipe_sexp) + + # Write optional config files + sources_path = None + if sources_sexp: + sources_path = work_dir / "sources.sexp" + sources_path.write_text(sources_sexp) + + audio_path = None + if audio_sexp: + audio_path = work_dir / "audio.sexp" + audio_path.write_text(audio_sexp) + + self.update_state(state='RENDERING', meta={'progress': 5}) + + # Import the streaming interpreter + from streaming.stream_sexp_generic import StreamInterpreter + + # Create interpreter + interp = StreamInterpreter(str(recipe_path)) + + # Set primitive library directory explicitly + interp.primitive_lib_dir = sexp_effects_dir / "primitive_libs" + + if fps: + interp.config['fps'] = fps + if sources_path: + interp.sources_config = sources_path + if audio_path: + interp.audio_config = audio_path + + # Override primitives with CID-aware versions + cid_prims = create_cid_primitives(actor_id) + interp.primitives.update(cid_prims) + + # Run rendering to file + logger.info(f"Rendering to {output_path}") + interp.run(duration=duration, output=str(output_path)) + + self.update_state(state='CACHING', meta={'progress': 90}) + + # Store output in cache + if output_path.exists(): + cache_mgr = get_cache_manager() + cached_file, ipfs_cid = cache_mgr.put( + source_path=output_path, + node_type="STREAM_OUTPUT", + node_id=f"stream_{task_id}", + ) + + logger.info(f"Stream output cached: CID={cached_file.cid}, IPFS={ipfs_cid}") + + return { + "status": "completed", + "task_id": task_id, + "output_cid": cached_file.cid, + "ipfs_cid": ipfs_cid, + "output_path": str(cached_file.path), + } + else: + return { + "status": "failed", + "task_id": task_id, + "error": "Output file not created", + } + + except Exception as e: + logger.error(f"Stream task {task_id} failed: {e}") + import traceback + traceback.print_exc() + + return { + "status": "failed", + "task_id": task_id, + "error": str(e), + } + + finally: + # Cleanup temp directory + import shutil + if work_dir.exists(): + shutil.rmtree(work_dir, ignore_errors=True) diff --git a/templates/crossfade-zoom.sexp b/templates/crossfade-zoom.sexp new file mode 100644 index 0000000..fc6d9ad --- /dev/null +++ b/templates/crossfade-zoom.sexp @@ -0,0 +1,25 @@ +;; Crossfade with Zoom Transition +;; +;; Macro for transitioning between two frames with a zoom effect. +;; Active frame zooms out while next frame zooms in. +;; +;; Required context: +;; - zoom effect must be loaded +;; - blend effect must be loaded +;; +;; Parameters: +;; active-frame: current frame +;; next-frame: frame to transition to +;; fade-amt: transition progress (0 = all active, 1 = all next) +;; +;; Usage: +;; (include :path "../templates/crossfade-zoom.sexp") +;; ... +;; (crossfade-zoom active-frame next-frame 0.5) + +(defmacro crossfade-zoom (active-frame next-frame fade-amt) + (let [active-zoom (+ 1.0 fade-amt) + active-zoomed (zoom active-frame :amount active-zoom) + next-zoom (+ 0.1 (* fade-amt 0.9)) + next-zoomed (zoom next-frame :amount next-zoom)] + (blend active-zoomed next-zoomed :opacity fade-amt))) diff --git a/templates/cycle-crossfade.sexp b/templates/cycle-crossfade.sexp new file mode 100644 index 0000000..40a87ca --- /dev/null +++ b/templates/cycle-crossfade.sexp @@ -0,0 +1,65 @@ +;; cycle-crossfade template +;; +;; Generalized cycling zoom-crossfade for any number of video layers. +;; Cycles through videos with smooth zoom-based crossfade transitions. +;; +;; Parameters: +;; beat-data - beat analysis node (drives timing) +;; input-videos - list of video nodes to cycle through +;; init-clen - initial cycle length in beats +;; +;; NOTE: The parameter is named "input-videos" (not "videos") because +;; template substitution replaces param names everywhere in the AST. +;; The planner's _expand_slice_on injects env["videos"] at plan time, +;; so (len videos) inside the lambda references that injected value. + +(deftemplate cycle-crossfade + (beat-data input-videos init-clen) + + (slice-on beat-data + :videos input-videos + :init {:cycle 0 :beat 0 :clen init-clen} + :fn (lambda [acc i start end] + (let [beat (get acc "beat") + clen (get acc "clen") + active (get acc "cycle") + n (len videos) + phase3 (* beat 3) + wt (lambda [p] + (let [prev (mod (+ p (- n 1)) n)] + (if (= active p) + (if (< phase3 clen) 1.0 + (if (< phase3 (* clen 2)) + (- 1.0 (* (/ (- phase3 clen) clen) 1.0)) + 0.0)) + (if (= active prev) + (if (< phase3 clen) 0.0 + (if (< phase3 (* clen 2)) + (* (/ (- phase3 clen) clen) 1.0) + 1.0)) + 0.0)))) + zm (lambda [p] + (let [prev (mod (+ p (- n 1)) n)] + (if (= active p) + ;; Active video: normal -> zoom out during transition -> tiny + (if (< phase3 clen) 1.0 + (if (< phase3 (* clen 2)) + (+ 1.0 (* (/ (- phase3 clen) clen) 1.0)) + 0.1)) + (if (= active prev) + ;; Incoming video: tiny -> zoom in during transition -> normal + (if (< phase3 clen) 0.1 + (if (< phase3 (* clen 2)) + (+ 0.1 (* (/ (- phase3 clen) clen) 0.9)) + 1.0)) + 0.1)))) + new-acc (if (< (+ beat 1) clen) + (dict :cycle active :beat (+ beat 1) :clen clen) + (dict :cycle (mod (+ active 1) n) :beat 0 + :clen (+ 40 (mod (* i 7) 41))))] + {:layers (map (lambda [p] + {:video p :effects [{:effect zoom :amount (zm p)}]}) + (range 0 n)) + :compose {:effect blend_multi :mode "alpha" + :weights (map (lambda [p] (wt p)) (range 0 n))} + :acc new-acc})))) diff --git a/templates/process-pair.sexp b/templates/process-pair.sexp new file mode 100644 index 0000000..6720cd2 --- /dev/null +++ b/templates/process-pair.sexp @@ -0,0 +1,112 @@ +;; process-pair template +;; +;; Reusable video-pair processor: takes a single video source, creates two +;; clips (A and B) with opposite rotations and sporadic effects, blends them, +;; and applies a per-pair slow rotation driven by a beat scan. +;; +;; All sporadic triggers (invert, hue-shift, ascii) and pair-level controls +;; (blend opacity, rotation) are defined internally using seed offsets. +;; +;; Parameters: +;; video - source video node +;; energy - energy analysis node (drives rotation/zoom amounts) +;; beat-data - beat analysis node (drives sporadic triggers) +;; rng - RNG object from (make-rng seed) for auto-derived seeds +;; rot-dir - initial rotation direction: 1 (clockwise) or -1 (anti-clockwise) +;; rot-a/b - rotation ranges for clip A/B (e.g. [0 45]) +;; zoom-a/b - zoom ranges for clip A/B (e.g. [1 1.5]) + +(deftemplate process-pair + (video energy beat-data rng rot-dir rot-a rot-b zoom-a zoom-b) + + ;; --- Sporadic triggers for clip A --- + + ;; Invert: 10% chance per beat, lasts 1-5 beats + (def inv-a (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0)) + :emit (if (> acc 0) 1 0))) + + ;; Hue shift: 10% chance, random hue 30-330 deg, lasts 1-5 beats + (def hue-a (scan beat-data :rng rng :init (dict :rem 0 :hue 0) + :step (if (> rem 0) + (dict :rem (- rem 1) :hue hue) + (if (< (rand) 0.1) + (dict :rem (rand-int 1 5) :hue (rand-range 30 330)) + (dict :rem 0 :hue 0))) + :emit (if (> rem 0) hue 0))) + + ;; ASCII art: 5% chance, lasts 1-3 beats + (def ascii-a (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0)) + :emit (if (> acc 0) 1 0))) + + ;; --- Sporadic triggers for clip B (offset seeds) --- + + (def inv-b (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.1) (rand-int 1 5) 0)) + :emit (if (> acc 0) 1 0))) + + (def hue-b (scan beat-data :rng rng :init (dict :rem 0 :hue 0) + :step (if (> rem 0) + (dict :rem (- rem 1) :hue hue) + (if (< (rand) 0.1) + (dict :rem (rand-int 1 5) :hue (rand-range 30 330)) + (dict :rem 0 :hue 0))) + :emit (if (> rem 0) hue 0))) + + (def ascii-b (scan beat-data :rng rng :init 0 + :step (if (> acc 0) (- acc 1) (if (< (rand) 0.05) (rand-int 1 3) 0)) + :emit (if (> acc 0) 1 0))) + + ;; --- Pair-level controls --- + + ;; Internal A/B blend: randomly show A (0), both (0.5), or B (1), every 1-11 beats + (def pair-mix (scan beat-data :rng rng + :init (dict :rem 0 :opacity 0.5) + :step (if (> rem 0) + (dict :rem (- rem 1) :opacity opacity) + (dict :rem (rand-int 1 11) :opacity (* (rand-int 0 2) 0.5))) + :emit opacity)) + + ;; Per-pair rotation: one full rotation every 20-30 beats, alternating direction + (def pair-rot (scan beat-data :rng rng + :init (dict :beat 0 :clen 25 :dir rot-dir :angle 0) + :step (if (< (+ beat 1) clen) + (dict :beat (+ beat 1) :clen clen :dir dir + :angle (+ angle (* dir (/ 360 clen)))) + (dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1) + :angle angle)) + :emit angle)) + + ;; --- Clip A processing --- + (def clip-a (-> video (segment :start 0 :duration (bind energy duration)))) + (def rotated-a (-> clip-a + (effect rotate :angle (bind energy values :range rot-a)) + (effect zoom :amount (bind energy values :range zoom-a)) + (effect invert :amount (bind inv-a values)) + (effect hue_shift :degrees (bind hue-a values)) + ;; ASCII disabled - too slow without GPU + ;; (effect ascii_art + ;; :char_size (bind energy values :range [4 32]) + ;; :mix (bind ascii-a values)) + )) + + ;; --- Clip B processing --- + (def clip-b (-> video (segment :start 0 :duration (bind energy duration)))) + (def rotated-b (-> clip-b + (effect rotate :angle (bind energy values :range rot-b)) + (effect zoom :amount (bind energy values :range zoom-b)) + (effect invert :amount (bind inv-b values)) + (effect hue_shift :degrees (bind hue-b values)) + ;; ASCII disabled - too slow without GPU + ;; (effect ascii_art + ;; :char_size (bind energy values :range [4 32]) + ;; :mix (bind ascii-b values)) + )) + + ;; --- Blend A+B and apply pair rotation --- + (-> rotated-a + (effect blend rotated-b + :mode "alpha" :opacity (bind pair-mix values) :resize_mode "fit") + (effect rotate + :angle (bind pair-rot values)))) diff --git a/templates/scan-oscillating-spin.sexp b/templates/scan-oscillating-spin.sexp new file mode 100644 index 0000000..051f079 --- /dev/null +++ b/templates/scan-oscillating-spin.sexp @@ -0,0 +1,28 @@ +;; Oscillating Spin Scan +;; +;; Accumulates rotation angle on each beat, reversing direction +;; periodically for an oscillating effect. +;; +;; Required context: +;; - music: audio analyzer from (streaming:make-audio-analyzer ...) +;; +;; Provides scan: spin +;; Bind with: (bind spin :angle) ;; cumulative rotation angle +;; +;; Behavior: +;; - Rotates 14.4 degrees per beat (completes 360 in 25 beats) +;; - After 20-30 beats, reverses direction +;; - Creates a swinging/oscillating rotation effect +;; +;; Usage: +;; (include :path "../templates/scan-oscillating-spin.sexp") +;; +;; In frame: +;; (rotate frame :angle (bind spin :angle)) + +(scan spin (streaming:audio-beat music t) + :init {:angle 0 :dir 1 :left 25} + :step (if (> left 0) + (dict :angle (+ angle (* dir 14.4)) :dir dir :left (- left 1)) + (dict :angle angle :dir (* dir -1) + :left (+ 20 (mod (streaming:audio-beat-count music t) 11))))) diff --git a/templates/scan-ripple-drops.sexp b/templates/scan-ripple-drops.sexp new file mode 100644 index 0000000..7caf720 --- /dev/null +++ b/templates/scan-ripple-drops.sexp @@ -0,0 +1,41 @@ +;; Beat-Triggered Ripple Drops Scan +;; +;; Creates random ripple drops triggered by audio beats. +;; Each drop has a random center position and duration. +;; +;; Required context: +;; - music: audio analyzer from (streaming:make-audio-analyzer ...) +;; - core primitives loaded +;; +;; Provides scan: ripple-state +;; Bind with: (bind ripple-state :gate) ;; 0 or 1 +;; (bind ripple-state :cx) ;; center x (0-1) +;; (bind ripple-state :cy) ;; center y (0-1) +;; +;; Parameters: +;; trigger-chance: probability per beat (default 0.15) +;; min-duration: minimum beats (default 1) +;; max-duration: maximum beats (default 15) +;; +;; Usage: +;; (include :path "../templates/scan-ripple-drops.sexp") +;; ;; Uses default: 15% chance, 1-15 beat duration +;; +;; In frame: +;; (let [rip-gate (bind ripple-state :gate) +;; rip-amp (* rip-gate (core:map-range e 0 1 5 50))] +;; (ripple frame +;; :amplitude rip-amp +;; :center_x (bind ripple-state :cx) +;; :center_y (bind ripple-state :cy))) + +(scan ripple-state (streaming:audio-beat music t) + :init {:gate 0 :cx 0.5 :cy 0.5 :left 0} + :step (if (> left 0) + (dict :gate 1 :cx cx :cy cy :left (- left 1)) + (if (< (core:rand) 0.15) + (dict :gate 1 + :cx (+ 0.2 (* (core:rand) 0.6)) + :cy (+ 0.2 (* (core:rand) 0.6)) + :left (+ 1 (mod (streaming:audio-beat-count music t) 15))) + (dict :gate 0 :cx 0.5 :cy 0.5 :left 0)))) diff --git a/templates/standard-effects.sexp b/templates/standard-effects.sexp new file mode 100644 index 0000000..9e97f34 --- /dev/null +++ b/templates/standard-effects.sexp @@ -0,0 +1,22 @@ +;; Standard Effects Bundle +;; +;; Loads commonly-used video effects. +;; Include after primitives are loaded. +;; +;; Effects provided: +;; - rotate: rotation by angle +;; - zoom: scale in/out +;; - blend: alpha blend two frames +;; - ripple: water ripple distortion +;; - invert: color inversion +;; - hue_shift: hue rotation +;; +;; Usage: +;; (include :path "../templates/standard-effects.sexp") + +(effect rotate :path "../sexp_effects/effects/rotate.sexp") +(effect zoom :path "../sexp_effects/effects/zoom.sexp") +(effect blend :path "../sexp_effects/effects/blend.sexp") +(effect ripple :path "../sexp_effects/effects/ripple.sexp") +(effect invert :path "../sexp_effects/effects/invert.sexp") +(effect hue_shift :path "../sexp_effects/effects/hue_shift.sexp") diff --git a/templates/standard-primitives.sexp b/templates/standard-primitives.sexp new file mode 100644 index 0000000..6e2c62d --- /dev/null +++ b/templates/standard-primitives.sexp @@ -0,0 +1,14 @@ +;; Standard Primitives Bundle +;; +;; Loads all commonly-used primitive libraries. +;; Include this at the top of streaming recipes. +;; +;; Usage: +;; (include :path "../templates/standard-primitives.sexp") + +(require-primitives "geometry") +(require-primitives "core") +(require-primitives "image") +(require-primitives "blending") +(require-primitives "color_ops") +(require-primitives "streaming") diff --git a/templates/stream-process-pair.sexp b/templates/stream-process-pair.sexp new file mode 100644 index 0000000..55f408e --- /dev/null +++ b/templates/stream-process-pair.sexp @@ -0,0 +1,72 @@ +;; stream-process-pair template (streaming-compatible) +;; +;; Macro for processing a video source pair with full effects. +;; Reads source, applies A/B effects (rotate, zoom, invert, hue), blends, +;; and applies pair-level rotation. +;; +;; Required context (must be defined in calling scope): +;; - sources: array of video sources +;; - pair-configs: array of {:dir :rot-a :rot-b :zoom-a :zoom-b} configs +;; - pair-states: array from (bind pairs :states) +;; - now: current time (t) +;; - e: audio energy (0-1) +;; +;; Required effects (must be loaded): +;; - rotate, zoom, invert, hue_shift, blend +;; +;; Usage: +;; (include :path "../templates/stream-process-pair.sexp") +;; ...in frame pipeline... +;; (let [pair-states (bind pairs :states) +;; now t +;; e (streaming:audio-energy music now)] +;; (process-pair 0)) ;; process source at index 0 + +(require-primitives "core") + +(defmacro process-pair (src-idx) + (let [src (nth sources src-idx) + frame (streaming:source-read src now) + cfg (nth pair-configs src-idx) + state (nth pair-states src-idx) + + ;; Get state values (invert uses countdown > 0) + inv-a-active (if (> (get state :inv-a) 0) 1 0) + inv-b-active (if (> (get state :inv-b) 0) 1 0) + ;; Hue is active only when countdown > 0 + hue-a-val (if (> (get state :hue-a) 0) (get state :hue-a-val) 0) + hue-b-val (if (> (get state :hue-b) 0) (get state :hue-b-val) 0) + mix-opacity (get state :mix) + pair-rot-angle (* (get state :angle) (get cfg :dir)) + + ;; Get config values for energy-mapped ranges + rot-a-max (get cfg :rot-a) + rot-b-max (get cfg :rot-b) + zoom-a-max (get cfg :zoom-a) + zoom-b-max (get cfg :zoom-b) + + ;; Energy-driven rotation and zoom + rot-a (core:map-range e 0 1 0 rot-a-max) + rot-b (core:map-range e 0 1 0 rot-b-max) + zoom-a (core:map-range e 0 1 1 zoom-a-max) + zoom-b (core:map-range e 0 1 1 zoom-b-max) + + ;; Apply effects to clip A + clip-a (-> frame + (rotate :angle rot-a) + (zoom :amount zoom-a) + (invert :amount inv-a-active) + (hue_shift :degrees hue-a-val)) + + ;; Apply effects to clip B + clip-b (-> frame + (rotate :angle rot-b) + (zoom :amount zoom-b) + (invert :amount inv-b-active) + (hue_shift :degrees hue-b-val)) + + ;; Blend A+B + blended (blend clip-a clip-b :opacity mix-opacity)] + + ;; Apply pair-level rotation + (rotate blended :angle pair-rot-angle)))