From 95fcc67dcc1c16348398fa134c5d108f3a08a12c Mon Sep 17 00:00:00 2001 From: gilesb Date: Mon, 2 Feb 2026 17:48:04 +0000 Subject: [PATCH] Add generic streaming interpreter with configurable sources/audio - Add stream_sexp_generic.py: fully generic sexp interpreter - Add streaming primitives for video sources and audio analysis - Add config system for external sources and audio files - Add templates for reusable scans and macros - Fix video/audio stream mapping in file output - Add dynamic source cycling based on sources array length - Remove old Python effect files (migrated to sexp) - Update sexp effects to use namespaced primitives Co-Authored-By: Claude Opus 4.5 --- analysis.sexp | 13 - configs/audio-dizzy.sexp | 17 + configs/audio-halleluwah.sexp | 17 + configs/sources-default.sexp | 38 + configs/sources-woods-half.sexp | 19 + configs/sources-woods.sexp | 39 + constructs/beat-alternate.sexp | 23 - constructs/cycle-effects-preset.sexp | 158 ---- constructs/slice-every-n.sexp | 56 -- constructs/slice-on.sexp | 47 - effect-presets/all-42.sexp | 65 -- effect-presets/chill-subtle.sexp | 13 - effect-presets/glitch-only.sexp | 13 - effect-presets/retro-vhs.sexp | 13 - effects/ascii_alternating_fx.sexp | 69 -- effects/ascii_alternating_rotate.sexp | 67 -- effects/ascii_art.py | 205 ---- effects/ascii_art.sexp | 34 - effects/ascii_art_fx_staged.sexp | 89 -- effects/ascii_art_staged.sexp | 59 -- effects/ascii_cell_effect_staged.sexp | 64 -- effects/ascii_dual_blend.sexp | 99 -- effects/ascii_fx_zone_staged.sexp | 66 -- effects/ascii_zones.sexp | 33 - effects/beam.py | 164 ---- effects/bloom.py | 108 --- effects/bloom.sexp | 21 - effects/blur.py | 54 -- effects/blur.sexp | 20 - effects/brightness.py | 52 -- effects/brightness.sexp | 20 - effects/cartoon.py | 117 --- effects/chromatic.py | 115 --- effects/color-adjust.sexp | 21 - effects/color_cycle.py | 104 --- effects/color_cycle.sexp | 20 - effects/color_grade.py | 139 --- effects/contrast.py | 52 -- effects/contrast.sexp | 20 - effects/crt.py | 193 ---- effects/crt.sexp | 21 - effects/datamosh.py | 141 --- effects/datamosh.sexp | 21 - effects/displacement.py | 170 ---- effects/echo.py | 139 --- effects/echo.sexp | 21 - effects/edge_detect.sexp | 22 - effects/emboss.py | 88 -- effects/emboss.sexp | 19 - effects/film_grain.py | 148 --- effects/film_grain.sexp | 19 - effects/fisheye.py | 116 --- effects/fisheye.sexp | 20 - effects/flip.py | 64 -- effects/flip.sexp | 19 - effects/grayscale.sexp | 14 - effects/hue_shift.py | 59 -- effects/hue_shift.sexp | 20 - effects/invert.py | 62 -- effects/invert.sexp | 14 - effects/kaleidoscope.py | 137 --- effects/kaleidoscope.sexp | 21 - effects/mirror.py | 86 -- effects/mirror.sexp | 19 - effects/motion_blur.py | 122 --- effects/neon_glow.py | 130 --- effects/neon_glow.sexp | 21 - effects/noise.py | 137 --- effects/noise.sexp | 19 - effects/outline.py | 111 --- effects/outline.sexp | 19 - effects/pixelate.py | 72 -- effects/pixelate.sexp | 20 - effects/pixelsort.py | 152 --- effects/pixelsort.sexp | 22 - effects/posterize.py | 79 -- effects/posterize.sexp | 20 - effects/quick_test.sexp | 178 ---- effects/quick_test_explicit.sexp | 150 +++ effects/random.py | 217 ----- effects/rgb_split.py | 106 --- effects/rgb_split.sexp | 19 - effects/ripple.sexp | 21 - effects/rotate.py | 110 --- effects/rotate.sexp | 20 - effects/saturation.py | 61 -- effects/saturation.sexp | 20 - effects/scanlines.py | 116 --- effects/scanlines.sexp | 21 - effects/scatter.py | 142 --- effects/sepia.sexp | 14 - effects/sharpen.py | 63 -- effects/sharpen.sexp | 20 - effects/shatter.py | 233 ----- effects/strobe.py | 90 -- effects/strobe.sexp | 20 - effects/swirl.py | 121 --- effects/swirl.sexp | 20 - effects/threshold.py | 85 -- effects/threshold.sexp | 20 - effects/tile_grid.py | 134 --- effects/tile_grid.sexp | 21 - effects/trails.py | 95 -- effects/trails.sexp | 20 - effects/vhs.py | 163 ---- effects/vignette.py | 116 --- effects/vignette.sexp | 19 - effects/wave.py | 108 --- effects/wave.sexp | 21 - effects/zoom.py | 91 -- effects/zoom.sexp | 20 - libs/all-effects.sexp | 63 -- libs/plan | 68 -- libs/standard-analyzers.sexp | 11 - libs/standard-constructs.sexp | 6 - recipe-1.sexp | 16 - recipe-all-effects-modular.sexp | 55 -- recipe-all-effects.sexp | 172 ---- recipe-bound copy.sexp | 45 - recipe-bound.sexp | 67 -- recipe-construct.sexp | 24 - recipe-lambda.sexp | 28 - recipe-parametric.sexp | 33 - recipe-sexp-slice.sexp | 30 - recipe-single-effect.sexp | 94 -- recipe.sexp | 25 - sexp_effects/effects/blend.sexp | 69 +- sexp_effects/effects/blend_multi.sexp | 19 +- sexp_effects/effects/bloom.sexp | 5 +- sexp_effects/effects/blur.sexp | 5 +- sexp_effects/effects/brightness.sexp | 3 +- sexp_effects/effects/color-adjust.sexp | 6 +- sexp_effects/effects/color_cycle.sexp | 1 + sexp_effects/effects/contrast.sexp | 3 +- sexp_effects/effects/crt.sexp | 5 +- sexp_effects/effects/echo.sexp | 3 +- sexp_effects/effects/edge_detect.sexp | 3 +- sexp_effects/effects/emboss.sexp | 3 +- sexp_effects/effects/film_grain.sexp | 1 + sexp_effects/effects/fisheye.sexp | 9 +- sexp_effects/effects/flip.sexp | 5 +- sexp_effects/effects/grayscale.sexp | 3 +- sexp_effects/effects/hue_shift.sexp | 2 +- sexp_effects/effects/invert.sexp | 2 +- sexp_effects/effects/kaleidoscope.sexp | 9 +- sexp_effects/effects/layer.sexp | 46 +- sexp_effects/effects/mirror.sexp | 21 +- sexp_effects/effects/neon_glow.sexp | 7 +- sexp_effects/effects/outline.sexp | 7 +- sexp_effects/effects/pixelate.sexp | 9 +- sexp_effects/effects/posterize.sexp | 8 +- sexp_effects/effects/resize-frame.sexp | 3 +- sexp_effects/effects/ripple.sexp | 10 +- sexp_effects/effects/rotate.sexp | 2 +- sexp_effects/effects/saturation.sexp | 3 +- sexp_effects/effects/scanlines.sexp | 3 +- sexp_effects/effects/sepia.sexp | 6 +- sexp_effects/effects/sharpen.sexp | 6 +- sexp_effects/effects/strobe.sexp | 3 +- sexp_effects/effects/swirl.sexp | 9 +- sexp_effects/effects/threshold.sexp | 9 +- sexp_effects/effects/tile_grid.sexp | 7 +- sexp_effects/effects/trails.sexp | 7 +- sexp_effects/effects/vignette.sexp | 5 +- sexp_effects/effects/wave.sexp | 9 +- sexp_effects/effects/zoom.sexp | 2 +- sexp_effects/primitive_libs/core.py | 31 + sexp_effects/primitive_libs/streaming.py | 304 ++++++ streaming/output.py | 112 ++- streaming/pipeline.py | 846 +++++++++++++++++ streaming/recipe_adapter.py | 56 ++ streaming/stream_sexp.py | 1081 ++++++++++++++++++++++ streaming/stream_sexp_generic.py | 859 +++++++++++++++++ templates/crossfade-zoom.sexp | 25 + templates/scan-oscillating-spin.sexp | 28 + templates/scan-ripple-drops.sexp | 41 + templates/standard-effects.sexp | 22 + templates/standard-primitives.sexp | 14 + templates/stream-process-pair.sexp | 72 ++ 179 files changed, 3935 insertions(+), 8226 deletions(-) delete mode 100644 analysis.sexp create mode 100644 configs/audio-dizzy.sexp create mode 100644 configs/audio-halleluwah.sexp create mode 100644 configs/sources-default.sexp create mode 100644 configs/sources-woods-half.sexp create mode 100644 configs/sources-woods.sexp delete mode 100644 constructs/beat-alternate.sexp delete mode 100644 constructs/cycle-effects-preset.sexp delete mode 100644 constructs/slice-every-n.sexp delete mode 100644 constructs/slice-on.sexp delete mode 100644 effect-presets/all-42.sexp delete mode 100644 effect-presets/chill-subtle.sexp delete mode 100644 effect-presets/glitch-only.sexp delete mode 100644 effect-presets/retro-vhs.sexp delete mode 100644 effects/ascii_alternating_fx.sexp delete mode 100644 effects/ascii_alternating_rotate.sexp delete mode 100644 effects/ascii_art.py delete mode 100644 effects/ascii_art.sexp delete mode 100644 effects/ascii_art_fx_staged.sexp delete mode 100644 effects/ascii_art_staged.sexp delete mode 100644 effects/ascii_cell_effect_staged.sexp delete mode 100644 effects/ascii_dual_blend.sexp delete mode 100644 effects/ascii_fx_zone_staged.sexp delete mode 100644 effects/ascii_zones.sexp delete mode 100644 effects/beam.py delete mode 100644 effects/bloom.py delete mode 100644 effects/bloom.sexp delete mode 100644 effects/blur.py delete mode 100644 effects/blur.sexp delete mode 100644 effects/brightness.py delete mode 100644 effects/brightness.sexp delete mode 100644 effects/cartoon.py delete mode 100644 effects/chromatic.py delete mode 100644 effects/color-adjust.sexp delete mode 100644 effects/color_cycle.py delete mode 100644 effects/color_cycle.sexp delete mode 100644 effects/color_grade.py delete mode 100644 effects/contrast.py delete mode 100644 effects/contrast.sexp delete mode 100644 effects/crt.py delete mode 100644 effects/crt.sexp delete mode 100644 effects/datamosh.py delete mode 100644 effects/datamosh.sexp delete mode 100644 effects/displacement.py delete mode 100644 effects/echo.py delete mode 100644 effects/echo.sexp delete mode 100644 effects/edge_detect.sexp delete mode 100644 effects/emboss.py delete mode 100644 effects/emboss.sexp delete mode 100644 effects/film_grain.py delete mode 100644 effects/film_grain.sexp delete mode 100644 effects/fisheye.py delete mode 100644 effects/fisheye.sexp delete mode 100644 effects/flip.py delete mode 100644 effects/flip.sexp delete mode 100644 effects/grayscale.sexp delete mode 100644 effects/hue_shift.py delete mode 100644 effects/hue_shift.sexp delete mode 100644 effects/invert.py delete mode 100644 effects/invert.sexp delete mode 100644 effects/kaleidoscope.py delete mode 100644 effects/kaleidoscope.sexp delete mode 100644 effects/mirror.py delete mode 100644 effects/mirror.sexp delete mode 100644 effects/motion_blur.py delete mode 100644 effects/neon_glow.py delete mode 100644 effects/neon_glow.sexp delete mode 100644 effects/noise.py delete mode 100644 effects/noise.sexp delete mode 100644 effects/outline.py delete mode 100644 effects/outline.sexp delete mode 100644 effects/pixelate.py delete mode 100644 effects/pixelate.sexp delete mode 100644 effects/pixelsort.py delete mode 100644 effects/pixelsort.sexp delete mode 100644 effects/posterize.py delete mode 100644 effects/posterize.sexp delete mode 100644 effects/quick_test.sexp create mode 100644 effects/quick_test_explicit.sexp delete mode 100644 effects/random.py delete mode 100644 effects/rgb_split.py delete mode 100644 effects/rgb_split.sexp delete mode 100644 effects/ripple.sexp delete mode 100644 effects/rotate.py delete mode 100644 effects/rotate.sexp delete mode 100644 effects/saturation.py delete mode 100644 effects/saturation.sexp delete mode 100644 effects/scanlines.py delete mode 100644 effects/scanlines.sexp delete mode 100644 effects/scatter.py delete mode 100644 effects/sepia.sexp delete mode 100644 effects/sharpen.py delete mode 100644 effects/sharpen.sexp delete mode 100644 effects/shatter.py delete mode 100644 effects/strobe.py delete mode 100644 effects/strobe.sexp delete mode 100644 effects/swirl.py delete mode 100644 effects/swirl.sexp delete mode 100644 effects/threshold.py delete mode 100644 effects/threshold.sexp delete mode 100644 effects/tile_grid.py delete mode 100644 effects/tile_grid.sexp delete mode 100644 effects/trails.py delete mode 100644 effects/trails.sexp delete mode 100644 effects/vhs.py delete mode 100644 effects/vignette.py delete mode 100644 effects/vignette.sexp delete mode 100644 effects/wave.py delete mode 100644 effects/wave.sexp delete mode 100644 effects/zoom.py delete mode 100644 effects/zoom.sexp delete mode 100644 libs/all-effects.sexp delete mode 100644 libs/plan delete mode 100644 libs/standard-analyzers.sexp delete mode 100644 libs/standard-constructs.sexp delete mode 100644 recipe-1.sexp delete mode 100644 recipe-all-effects-modular.sexp delete mode 100644 recipe-all-effects.sexp delete mode 100644 recipe-bound copy.sexp delete mode 100644 recipe-bound.sexp delete mode 100644 recipe-construct.sexp delete mode 100644 recipe-lambda.sexp delete mode 100644 recipe-parametric.sexp delete mode 100644 recipe-sexp-slice.sexp delete mode 100644 recipe-single-effect.sexp delete mode 100644 recipe.sexp create mode 100644 sexp_effects/primitive_libs/streaming.py create mode 100644 streaming/pipeline.py create mode 100644 streaming/stream_sexp.py create mode 100644 streaming/stream_sexp_generic.py create mode 100644 templates/crossfade-zoom.sexp create mode 100644 templates/scan-oscillating-spin.sexp create mode 100644 templates/scan-ripple-drops.sexp create mode 100644 templates/standard-effects.sexp create mode 100644 templates/standard-primitives.sexp create mode 100644 templates/stream-process-pair.sexp diff --git a/analysis.sexp b/analysis.sexp deleted file mode 100644 index b132de1..0000000 --- a/analysis.sexp +++ /dev/null @@ -1,13 +0,0 @@ -(analysis - (beats-data - :tempo 137.195 - :times (0.437333 0.874667 1.312 1.76 2.19733 2.63467 3.072 3.50933 3.936 4.37333 4.8 5.248 5.68533 6.12267 6.56 6.99733 7.424 7.86133 8.288 8.736 9.17333 9.61067 9.984) - :count 22 - :duration 9.984 - ) - (bass-data - :times (0 0.0106667 0.0213333 0.032 0.0426667 0.0533333 0.064 0.0746667 0.0853333 0.096 0.106667 0.117333 0.128 0.138667 0.149333 0.16 0.170667 0.181333 0.192 0.202667 0.213333 0.224 0.234667 0.245333 0.256 0.266667 0.277333 0.288 0.298667 0.309333 0.32 0.330667 0.341333 0.352 0.362667 0.373333 0.384 0.394667 0.405333 0.416 0.426667 0.437333 0.448 0.458667 0.469333 0.48 0.490667 0.501333 0.512 0.522667 0.533333 0.544 0.554667 0.565333 0.576 0.586667 0.597333 0.608 0.618667 0.629333 0.64 0.650667 0.661333 0.672 0.682667 0.693333 0.704 0.714667 0.725333 0.736 0.746667 0.757333 0.768 0.778667 0.789333 0.8 0.810667 0.821333 0.832 0.842667 0.853333 0.864 0.874667 0.885333 0.896 0.906667 0.917333 0.928 0.938667 0.949333 0.96 0.970667 0.981333 0.992 1.00267 1.01333 1.024 1.03467 1.04533 1.056 1.06667 1.07733 1.088 1.09867 1.10933 1.12 1.13067 1.14133 1.152 1.16267 1.17333 1.184 1.19467 1.20533 1.216 1.22667 1.23733 1.248 1.25867 1.26933 1.28 1.29067 1.30133 1.312 1.32267 1.33333 1.344 1.35467 1.36533 1.376 1.38667 1.39733 1.408 1.41867 1.42933 1.44 1.45067 1.46133 1.472 1.48267 1.49333 1.504 1.51467 1.52533 1.536 1.54667 1.55733 1.568 1.57867 1.58933 1.6 1.61067 1.62133 1.632 1.64267 1.65333 1.664 1.67467 1.68533 1.696 1.70667 1.71733 1.728 1.73867 1.74933 1.76 1.77067 1.78133 1.792 1.80267 1.81333 1.824 1.83467 1.84533 1.856 1.86667 1.87733 1.888 1.89867 1.90933 1.92 1.93067 1.94133 1.952 1.96267 1.97333 1.984 1.99467 2.00533 2.016 2.02667 2.03733 2.048 2.05867 2.06933 2.08 2.09067 2.10133 2.112 2.12267 2.13333 2.144 2.15467 2.16533 2.176 2.18667 2.19733 2.208 2.21867 2.22933 2.24 2.25067 2.26133 2.272 2.28267 2.29333 2.304 2.31467 2.32533 2.336 2.34667 2.35733 2.368 2.37867 2.38933 2.4 2.41067 2.42133 2.432 2.44267 2.45333 2.464 2.47467 2.48533 2.496 2.50667 2.51733 2.528 2.53867 2.54933 2.56 2.57067 2.58133 2.592 2.60267 2.61333 2.624 2.63467 2.64533 2.656 2.66667 2.67733 2.688 2.69867 2.70933 2.72 2.73067 2.74133 2.752 2.76267 2.77333 2.784 2.79467 2.80533 2.816 2.82667 2.83733 2.848 2.85867 2.86933 2.88 2.89067 2.90133 2.912 2.92267 2.93333 2.944 2.95467 2.96533 2.976 2.98667 2.99733 3.008 3.01867 3.02933 3.04 3.05067 3.06133 3.072 3.08267 3.09333 3.104 3.11467 3.12533 3.136 3.14667 3.15733 3.168 3.17867 3.18933 3.2 3.21067 3.22133 3.232 3.24267 3.25333 3.264 3.27467 3.28533 3.296 3.30667 3.31733 3.328 3.33867 3.34933 3.36 3.37067 3.38133 3.392 3.40267 3.41333 3.424 3.43467 3.44533 3.456 3.46667 3.47733 3.488 3.49867 3.50933 3.52 3.53067 3.54133 3.552 3.56267 3.57333 3.584 3.59467 3.60533 3.616 3.62667 3.63733 3.648 3.65867 3.66933 3.68 3.69067 3.70133 3.712 3.72267 3.73333 3.744 3.75467 3.76533 3.776 3.78667 3.79733 3.808 3.81867 3.82933 3.84 3.85067 3.86133 3.872 3.88267 3.89333 3.904 3.91467 3.92533 3.936 3.94667 3.95733 3.968 3.97867 3.98933 4 4.01067 4.02133 4.032 4.04267 4.05333 4.064 4.07467 4.08533 4.096 4.10667 4.11733 4.128 4.13867 4.14933 4.16 4.17067 4.18133 4.192 4.20267 4.21333 4.224 4.23467 4.24533 4.256 4.26667 4.27733 4.288 4.29867 4.30933 4.32 4.33067 4.34133 4.352 4.36267 4.37333 4.384 4.39467 4.40533 4.416 4.42667 4.43733 4.448 4.45867 4.46933 4.48 4.49067 4.50133 4.512 4.52267 4.53333 4.544 4.55467 4.56533 4.576 4.58667 4.59733 4.608 4.61867 4.62933 4.64 4.65067 4.66133 4.672 4.68267 4.69333 4.704 4.71467 4.72533 4.736 4.74667 4.75733 4.768 4.77867 4.78933 4.8 4.81067 4.82133 4.832 4.84267 4.85333 4.864 4.87467 4.88533 4.896 4.90667 4.91733 4.928 4.93867 4.94933 4.96 4.97067 4.98133 4.992 5.00267 5.01333 5.024 5.03467 5.04533 5.056 5.06667 5.07733 5.088 5.09867 5.10933 5.12 5.13067 5.14133 5.152 5.16267 5.17333 5.184 5.19467 5.20533 5.216 5.22667 5.23733 5.248 5.25867 5.26933 5.28 5.29067 5.30133 5.312 5.32267 5.33333 5.344 5.35467 5.36533 5.376 5.38667 5.39733 5.408 5.41867 5.42933 5.44 5.45067 5.46133 5.472 5.48267 5.49333 5.504 5.51467 5.52533 5.536 5.54667 5.55733 5.568 5.57867 5.58933 5.6 5.61067 5.62133 5.632 5.64267 5.65333 5.664 5.67467 5.68533 5.696 5.70667 5.71733 5.728 5.73867 5.74933 5.76 5.77067 5.78133 5.792 5.80267 5.81333 5.824 5.83467 5.84533 5.856 5.86667 5.87733 5.888 5.89867 5.90933 5.92 5.93067 5.94133 5.952 5.96267 5.97333 5.984 5.99467 6.00533 6.016 6.02667 6.03733 6.048 6.05867 6.06933 6.08 6.09067 6.10133 6.112 6.12267 6.13333 6.144 6.15467 6.16533 6.176 6.18667 6.19733 6.208 6.21867 6.22933 6.24 6.25067 6.26133 6.272 6.28267 6.29333 6.304 6.31467 6.32533 6.336 6.34667 6.35733 6.368 6.37867 6.38933 6.4 6.41067 6.42133 6.432 6.44267 6.45333 6.464 6.47467 6.48533 6.496 6.50667 6.51733 6.528 6.53867 6.54933 6.56 6.57067 6.58133 6.592 6.60267 6.61333 6.624 6.63467 6.64533 6.656 6.66667 6.67733 6.688 6.69867 6.70933 6.72 6.73067 6.74133 6.752 6.76267 6.77333 6.784 6.79467 6.80533 6.816 6.82667 6.83733 6.848 6.85867 6.86933 6.88 6.89067 6.90133 6.912 6.92267 6.93333 6.944 6.95467 6.96533 6.976 6.98667 6.99733 7.008 7.01867 7.02933 7.04 7.05067 7.06133 7.072 7.08267 7.09333 7.104 7.11467 7.12533 7.136 7.14667 7.15733 7.168 7.17867 7.18933 7.2 7.21067 7.22133 7.232 7.24267 7.25333 7.264 7.27467 7.28533 7.296 7.30667 7.31733 7.328 7.33867 7.34933 7.36 7.37067 7.38133 7.392 7.40267 7.41333 7.424 7.43467 7.44533 7.456 7.46667 7.47733 7.488 7.49867 7.50933 7.52 7.53067 7.54133 7.552 7.56267 7.57333 7.584 7.59467 7.60533 7.616 7.62667 7.63733 7.648 7.65867 7.66933 7.68 7.69067 7.70133 7.712 7.72267 7.73333 7.744 7.75467 7.76533 7.776 7.78667 7.79733 7.808 7.81867 7.82933 7.84 7.85067 7.86133 7.872 7.88267 7.89333 7.904 7.91467 7.92533 7.936 7.94667 7.95733 7.968 7.97867 7.98933 8 8.01067 8.02133 8.032 8.04267 8.05333 8.064 8.07467 8.08533 8.096 8.10667 8.11733 8.128 8.13867 8.14933 8.16 8.17067 8.18133 8.192 8.20267 8.21333 8.224 8.23467 8.24533 8.256 8.26667 8.27733 8.288 8.29867 8.30933 8.32 8.33067 8.34133 8.352 8.36267 8.37333 8.384 8.39467 8.40533 8.416 8.42667 8.43733 8.448 8.45867 8.46933 8.48 8.49067 8.50133 8.512 8.52267 8.53333 8.544 8.55467 8.56533 8.576 8.58667 8.59733 8.608 8.61867 8.62933 8.64 8.65067 8.66133 8.672 8.68267 8.69333 8.704 8.71467 8.72533 8.736 8.74667 8.75733 8.768 8.77867 8.78933 8.8 8.81067 8.82133 8.832 8.84267 8.85333 8.864 8.87467 8.88533 8.896 8.90667 8.91733 8.928 8.93867 8.94933 8.96 8.97067 8.98133 8.992 9.00267 9.01333 9.024 9.03467 9.04533 9.056 9.06667 9.07733 9.088 9.09867 9.10933 9.12 9.13067 9.14133 9.152 9.16267 9.17333 9.184 9.19467 9.20533 9.216 9.22667 9.23733 9.248 9.25867 9.26933 9.28 9.29067 9.30133 9.312 9.32267 9.33333 9.344 9.35467 9.36533 9.376 9.38667 9.39733 9.408 9.41867 9.42933 9.44 9.45067 9.46133 9.472 9.48267 9.49333 9.504 9.51467 9.52533 9.536 9.54667 9.55733 9.568 9.57867 9.58933 9.6 9.61067 9.62133 9.632 9.64267 9.65333 9.664 9.67467 9.68533 9.696 9.70667 9.71733 9.728 9.73867 9.74933 9.76 9.77067 9.78133 9.792 9.80267 9.81333 9.824 9.83467 9.84533 9.856 9.86667 9.87733 9.888 9.89867 9.90933 9.92 9.93067 9.94133 9.952 9.96267 9.97333 9.984) - :values (7.16216e-06 6.45309e-06 6.24163e-06 8.43058e-06 7.3364e-06 8.33967e-06 7.16933e-06 6.40957e-06 6.49916e-06 8.20697e-06 8.02302e-06 7.87236e-06 1.21227e-05 8.24857e-06 4.717e-06 6.57983e-06 4.37932e-06 6.2521e-06 7.43115e-06 5.63364e-06 8.52022e-06 7.35037e-06 4.66317e-06 6.7811e-06 7.62028e-06 7.45616e-06 7.43458e-06 8.39454e-06 6.26045e-06 6.91363e-06 5.75392e-06 6.30496e-06 8.32314e-06 6.70732e-06 6.24163e-06 8.43058e-06 7.3364e-06 8.34238e-06 1.7279e-05 0.0312504 0.127171 0.156779 0.213277 0.264067 0.228376 0.238384 0.217415 0.20997 0.166879 0.155116 0.132226 0.130255 0.0951963 0.103571 0.104248 0.0780041 0.0948994 0.0827808 0.0825249 0.0789151 0.090206 0.0928191 0.0945696 0.097371 0.0963535 0.0648912 0.0725221 0.0669522 0.0636747 0.0687113 0.0750296 0.0683408 0.0724092 0.0731389 0.0597439 0.0825941 0.0738546 0.0694798 0.0720687 0.0731458 0.0703331 0.0821691 0.0851682 0.123414 0.132517 0.155994 0.118462 0.109342 0.102054 0.0715775 0.096919 0.0840867 0.0758435 0.0778733 0.0882507 0.0701659 0.0698105 0.0765435 0.0679319 0.0707417 0.0730689 0.0598419 0.0866238 0.380699 0.700534 0.690496 0.554694 0.473041 0.42336 0.33967 0.30334 0.23985 0.207732 0.191854 0.175046 0.173634 0.113132 0.123655 0.116205 0.135359 0.120024 0.117482 0.0964799 0.144343 0.204861 0.23033 0.204271 0.202869 0.225386 0.260025 0.233206 0.173568 0.165386 0.135257 0.126117 0.108307 0.116156 0.129393 0.109 0.145881 0.108262 0.0952238 0.0873363 0.131466 0.0981327 0.13161 0.120487 0.0857447 0.0790208 0.0899334 0.0815575 0.0996942 0.0906596 0.070164 0.0844994 0.0889698 0.0890347 0.096482 0.0880858 0.0825154 0.0929791 0.0753759 0.0918 0.0839455 0.106187 0.156017 0.173052 0.143864 0.129507 0.127141 0.105499 0.0909262 0.0884056 0.0889916 0.0634021 0.0795005 0.0900996 0.0570842 0.0659083 0.0712231 0.0676829 0.055935 0.0600593 0.0532016 0.206078 0.624478 0.829637 0.695123 0.662618 0.565682 0.432616 0.357212 0.311635 0.284791 0.232738 0.194049 0.200091 0.131367 0.11794 0.114917 0.119684 0.113432 0.105584 0.0914791 0.350021 0.830524 0.919146 0.967183 1 0.789667 0.642514 0.402096 0.356062 0.372625 0.262668 0.256799 0.238152 0.239011 0.259009 0.240393 0.195865 0.210342 0.170598 0.115235 0.123494 0.172891 0.396452 0.527137 0.495694 0.502169 0.314289 0.303286 0.251372 0.170556 0.162313 0.163239 0.155657 0.134445 0.143141 0.15054 0.153207 0.172172 0.127843 0.111243 0.0908216 0.103453 0.140627 0.136314 0.13147 0.172442 0.227603 0.219058 0.185081 0.193927 0.192491 0.141449 0.165612 0.171496 0.115222 0.114057 0.139578 0.120337 0.10068 0.0787593 0.0683195 0.0684097 0.066809 0.114857 0.497881 0.920547 0.9325 0.788979 0.60633 0.532281 0.441695 0.391039 0.330509 0.291408 0.213403 0.237324 0.204736 0.153047 0.136729 0.150922 0.122362 0.124808 0.11872 0.185701 0.306793 0.2445 0.231741 0.218492 0.213972 0.217633 0.245314 0.232327 0.150716 0.0949742 0.0913199 0.079221 0.090395 0.0770564 0.100292 0.1031 0.0837269 0.0811178 0.0652714 0.0824131 0.0847641 0.100266 0.120247 0.0969764 0.106081 0.0910757 0.0590815 0.0567437 0.0743846 0.0486739 0.0640541 0.0484055 0.0556839 0.0754229 0.0581457 0.0501834 0.0478827 0.0527257 0.0493874 0.0893116 0.149908 0.155328 0.126655 0.115469 0.132254 0.0966001 0.117966 0.093855 0.0775226 0.0916065 0.0686379 0.0784045 0.0918643 0.073267 0.0711642 0.0711377 0.0501969 0.0583894 0.0556452 0.0756217 0.44287 0.906221 0.948975 0.772533 0.687945 0.596675 0.528724 0.433275 0.409462 0.358384 0.272122 0.27456 0.245432 0.262483 0.198251 0.142315 0.134197 0.11601 0.122026 0.202528 0.175024 0.214981 0.261781 0.224243 0.239647 0.226189 0.254256 0.188644 0.160832 0.137847 0.121601 0.0943244 0.138567 0.155566 0.127026 0.144832 0.118252 0.128691 0.116627 0.124978 0.0945879 0.115633 0.107412 0.111797 0.0825074 0.0830281 0.0930151 0.0881962 0.0952128 0.0804622 0.0821983 0.0895789 0.0986577 0.0641261 0.0756342 0.0935667 0.0827843 0.0883281 0.0758066 0.0694162 0.0832474 0.0685954 0.104091 0.110042 0.183639 0.148052 0.144141 0.121368 0.0693286 0.115004 0.126901 0.109386 0.103061 0.104243 0.077619 0.0804199 0.0913744 0.0826062 0.0844789 0.096114 0.0709375 0.0844898 0.348649 0.707439 0.696308 0.560395 0.483835 0.424138 0.285367 0.309361 0.268134 0.212897 0.20254 0.180925 0.180307 0.140437 0.122159 0.113377 0.1452 0.136581 0.12094 0.0924773 0.152206 0.210191 0.222118 0.210824 0.19643 0.210775 0.237974 0.210584 0.198879 0.160678 0.138147 0.133646 0.112595 0.129571 0.139936 0.119597 0.156428 0.110461 0.0983066 0.0923733 0.132595 0.100783 0.131057 0.119132 0.087296 0.0737559 0.0694214 0.0830922 0.0992999 0.0955029 0.0695149 0.0783225 0.085667 0.0815775 0.0943693 0.0907773 0.0808274 0.0966216 0.0797136 0.0931839 0.0771002 0.103668 0.148217 0.15753 0.13534 0.130242 0.120796 0.0979305 0.0859062 0.0816456 0.0900397 0.0648289 0.0774075 0.0871007 0.0578754 0.0656776 0.0649346 0.0618054 0.0530827 0.0573399 0.052835 0.204659 0.6304 0.842557 0.663718 0.630267 0.537835 0.436743 0.380594 0.334736 0.274641 0.218428 0.182904 0.194093 0.130684 0.116426 0.115307 0.120491 0.115242 0.105039 0.089898 0.339287 0.833928 0.89201 0.847351 0.885868 0.718274 0.63349 0.435283 0.355222 0.398535 0.284555 0.283632 0.258276 0.232136 0.260572 0.245105 0.194172 0.215658 0.181625 0.115912 0.124685 0.172845 0.403153 0.518318 0.505032 0.518908 0.353772 0.314415 0.260417 0.187215 0.166755 0.155303 0.153631 0.134266 0.132239 0.145932 0.145072 0.170908 0.141179 0.117025 0.0932492 0.103141 0.139657 0.141631 0.142166 0.178162 0.236232 0.233171 0.19593 0.203656 0.201666 0.147291 0.155549 0.165642 0.115685 0.114077 0.140576 0.120949 0.10023 0.075488 0.0691524 0.0703872 0.0602554 0.105975 0.460764 0.863468 0.841344 0.743975 0.624344 0.535433 0.425757 0.389639 0.322511 0.279877 0.192502 0.213059 0.190344 0.155011 0.134013 0.148768 0.127813 0.133291 0.128182 0.176028 0.281791 0.223189 0.198992 0.193601 0.183436 0.195744 0.233913 0.221215 0.14886 0.0993742 0.0920248 0.0789306 0.0851811 0.0747424 0.100489 0.106375 0.0866896 0.0822401 0.0643018 0.0819463 0.081135 0.094469 0.116559 0.0946106 0.100833 0.0928059 0.0619034 0.056504 0.0725264 0.046511 0.0671074 0.0507017 0.0509314 0.0693126 0.0573645 0.0507336 0.0471445 0.0522879 0.0480421 0.083083 0.15539 0.165325 0.145727 0.136045 0.134697 0.0870662 0.108761 0.0908244 0.0792352 0.0829125 0.060982 0.0723782 0.0907959 0.0720178 0.0660367 0.0675224 0.050675 0.0588428 0.0543639 0.0739238 0.448568 0.925567 0.973444 0.740911 0.69001 0.631465 0.523379 0.425562 0.407257 0.373598 0.28653 0.296789 0.255094 0.249436 0.199982 0.13872 0.124745 0.110177 0.109994 0.196876 0.175401 0.208955 0.266637 0.242249 0.264852 0.225198 0.239447 0.183968 0.162852 0.151754 0.120695 0.0952901 0.133652 0.155726 0.119348 0.130654 0.120284 0.134148 0.119943 0.127491 0.0998429 0.127342 0.117227 0.138459 0.108536 0.112109 0.116469 0.105608 0.103308 0.0997458 0.0908462 0.0925413 0.103308 0.0622741 0.0717011 0.0920471 0.0798529 0.0845619 0.0768434 0.064583 0.0791313 0.0695519 0.104025 0.120987 0.171366 0.141478 0.127739 0.108496 0.0727998 0.114553 0.127771 0.109442 0.110514 0.110202 0.0835657 0.0795321 0.0935803 0.0834574 0.084336 0.095042 0.0741728 0.0841201 0.330518 0.665087 0.693456 0.566006 0.488647 0.42792 0.337344 0.346076 0.284655 0.214681 0.212832 0.184396 0.174909 0.136491 0.12191 0.1182 0.141756 0.127448 0.118748 0.0850289 0.148361 0.222931 0.239708 0.228752 0.203162 0.235074 0.266685 0.241574 0.199221 0.144484 0.126863 0.1294 0.108858 0.127052 0.142572 0.120572 0.152882 0.111591 0.0881809 0.0888725 0.138569 0.106755 0.124997 0.113986 0.0841088 0.0744094 0.0903245 0.0870655 0.0998053 0.0834926 0.06468 0.0820826 0.0938939 0.0843508 0.0947001 0.091387 0.077998 0.0941699 0.074824 0.0904097 0.0831858 0.106969 0.160138 0.170449 0.160021 0.138937 0.137461 0.105947 0.0918812 0.0931718 0.0947702 0.0717733 0.075929 0.08621 0.0617874 0.0755435 0.0764485 0.0670138 0.054362 0.0621337 0.0601537 0.192354 0.604609 0.857971 0.728876 0.655009 0.530333 0.451029 0.391772 0.332011 0.269215 0.237235 0.198724 0.202796 0.135874 0.119565 0.112975 0.117327 0.110819 0.105813 0.0870918 0.328994 0.801941 0.90869 0.937387 0.987602 0.745804 0.658189 0.443015 0.385498 0.388736 0.275883 0.255372 0.239243 0.216375 0.241192 0.228235 0.187837 0.203992 0.170158 0.109679 0.11758 0.159904 0.390653 0.517078 0.508666 0.531087 0.341772 0.319248 0.274061 0.17915 0.169531 0.160211 0.157662 0.140002 0.132297 0.142435 0.138881 0.171226 0.144367 0.123187 0.0987208 0.105159 0.134667 0.132355 0.139594 0.182268 0.248417 0.247857 0.194815 0.19428 0.189158 0.140931 0.151526 0.168047 0.119665 0.116234 0.13657 0.11837 0.0981858 0.0728398 0.0637859 0.0676442 0.0603401 0.107312 0.45778 0.875891 0.884144 0.702587 0.570179 0.504397 0.463568 0.381661 0.309783 0.277454 0.208983 0.233763 0.207871 0.165335 0.113466) - :duration 9.984 - ) -) \ No newline at end of file diff --git a/configs/audio-dizzy.sexp b/configs/audio-dizzy.sexp new file mode 100644 index 0000000..dc16087 --- /dev/null +++ b/configs/audio-dizzy.sexp @@ -0,0 +1,17 @@ +;; Audio Configuration - dizzy.mp3 +;; +;; Defines audio analyzer and playback for a recipe. +;; Pass to recipe with: --audio configs/audio-dizzy.sexp +;; +;; Provides: +;; - music: audio analyzer for beat/energy detection +;; - audio-playback: path for synchronized playback + +(require-primitives "streaming") + +;; Audio analyzer (provides beat detection and energy levels) +;; Paths relative to working directory (project root) +(def music (streaming:make-audio-analyzer "dizzy.mp3")) + +;; Audio playback path (for sync with video output) +(audio-playback "dizzy.mp3") diff --git a/configs/audio-halleluwah.sexp b/configs/audio-halleluwah.sexp new file mode 100644 index 0000000..5e4b812 --- /dev/null +++ b/configs/audio-halleluwah.sexp @@ -0,0 +1,17 @@ +;; Audio Configuration - dizzy.mp3 +;; +;; Defines audio analyzer and playback for a recipe. +;; Pass to recipe with: --audio configs/audio-dizzy.sexp +;; +;; Provides: +;; - music: audio analyzer for beat/energy detection +;; - audio-playback: path for synchronized playback + +(require-primitives "streaming") + +;; Audio analyzer (provides beat detection and energy levels) +;; Paths relative to working directory (project root) +(def music (streaming:make-audio-analyzer "woods_half/halleluwah.webm")) + +;; Audio playback path (for sync with video output) +(audio-playback "woods_half/halleluwah.webm") diff --git a/configs/sources-default.sexp b/configs/sources-default.sexp new file mode 100644 index 0000000..754bd92 --- /dev/null +++ b/configs/sources-default.sexp @@ -0,0 +1,38 @@ +;; Default Sources Configuration +;; +;; Defines video sources and per-pair effect configurations. +;; Pass to recipe with: --sources configs/sources-default.sexp +;; +;; Required by recipes using process-pair macro: +;; - sources: array of video sources +;; - pair-configs: array of effect configurations per source + +(require-primitives "streaming") + +;; Video sources array +;; Paths relative to working directory (project root) +(def sources [ + (streaming:make-video-source "monday.webm" 30) + (streaming:make-video-source "escher.webm" 30) + (streaming:make-video-source "2.webm" 30) + (streaming:make-video-source "disruptors.webm" 30) + (streaming:make-video-source "4.mp4" 30) + (streaming:make-video-source "ecstacy.mp4" 30) + (streaming:make-video-source "dopple.webm" 30) + (streaming:make-video-source "5.mp4" 30) +]) + +;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges +;; :dir = rotation direction (1 or -1) +;; :rot-a, :rot-b = max rotation angles for clip A and B +;; :zoom-a, :zoom-b = max zoom amounts for clip A and B +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4 + {:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller) + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed) + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5 +]) diff --git a/configs/sources-woods-half.sexp b/configs/sources-woods-half.sexp new file mode 100644 index 0000000..d2feff8 --- /dev/null +++ b/configs/sources-woods-half.sexp @@ -0,0 +1,19 @@ +;; Half-resolution Woods Sources (960x540) +;; +;; Pass to recipe with: --sources configs/sources-woods-half.sexp + +(require-primitives "streaming") + +(def sources [ + (streaming:make-video-source "woods_half/1.webm" 30) + (streaming:make-video-source "woods_half/2.webm" 30) + (streaming:make-video-source "woods_half/3.webm" 30) + (streaming:make-video-source "woods_half/4.webm" 30) +]) + +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} +]) diff --git a/configs/sources-woods.sexp b/configs/sources-woods.sexp new file mode 100644 index 0000000..717bfd9 --- /dev/null +++ b/configs/sources-woods.sexp @@ -0,0 +1,39 @@ +;; Default Sources Configuration +;; +;; Defines video sources and per-pair effect configurations. +;; Pass to recipe with: --sources configs/sources-default.sexp +;; +;; Required by recipes using process-pair macro: +;; - sources: array of video sources +;; - pair-configs: array of effect configurations per source + +(require-primitives "streaming") + +;; Video sources array +;; Paths relative to working directory (project root) +(def sources [ + (streaming:make-video-source "woods/1.webm" 10) + (streaming:make-video-source "woods/2.webm" 10) + (streaming:make-video-source "woods/3.webm" 10) + (streaming:make-video-source "woods/4.webm" 10) + (streaming:make-video-source "woods/5.webm" 10) + (streaming:make-video-source "woods/6.webm" 10) + (streaming:make-video-source "woods/7.webm" 10) + (streaming:make-video-source "woods/8.webm" 10) +]) + +;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges +;; :dir = rotation direction (1 or -1) +;; :rot-a, :rot-b = max rotation angles for clip A and B +;; :zoom-a, :zoom-b = max zoom amounts for clip A and B +(def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + +]) diff --git a/constructs/beat-alternate.sexp b/constructs/beat-alternate.sexp deleted file mode 100644 index 255cdb5..0000000 --- a/constructs/beat-alternate.sexp +++ /dev/null @@ -1,23 +0,0 @@ -;; beat-alternate construct -;; Alternates between sources on each beat -;; -;; Usage in recipe: -;; (construct beat-alternate :path "constructs/beat-alternate.sexp") -;; (def segments (beat-alternate beats-data (list video-a video-b))) - -(define-construct beat-alternate - :params ( - (analysis :type any :desc "Analysis data with :times") - (sources :type any :desc "List of source nodes to alternate between") - ) - ;; Body: map over time pairs, return segment descriptors - (let [times (get analysis :times) - pairs (zip-pairs (cons 0 times)) - n-sources (len sources)] - (map-indexed - (fn [i pair] - (dict :source (nth sources (mod i n-sources)) - :start (first pair) - :end (nth pair 1) - :effects (list))) - pairs))) diff --git a/constructs/cycle-effects-preset.sexp b/constructs/cycle-effects-preset.sexp deleted file mode 100644 index b9bcacb..0000000 --- a/constructs/cycle-effects-preset.sexp +++ /dev/null @@ -1,158 +0,0 @@ -;; cycle-effects-preset construct -;; Data-driven effect cycling using preset definitions -;; -;; Preset format (flat, no nested :params): -;; {:effect "brightness" :amount {:bind "bass" :range [-80 80]}} -;; {:effect "blur" :radius 5} -;; -;; Binding specs {:bind "analyzer" :range [min max]} are resolved to actual bindings - -(define-construct cycle-effects-preset - :params ( - (preset :type any :desc "List of effect preset definitions") - (videos :type any :desc "List of video source nodes") - (video_infos :type any :desc "List of video info analysis results") - (beats :type any :desc "Beat analysis data with :times") - (beats_per_segment :type int :default 4 :desc "Number of beats per segment") - ) - (let [num-effects (len preset) - num-videos (len videos) - ;; Extract durations from video-info analysis results - durations (map (fn [info] (get info :duration)) video_infos) - times (get beats :times) - grouped (chunk-every times beats_per_segment) - - ;; Resolve a param value - if it's a binding spec dict, create actual Binding - ;; Note: pass the analyzer NAME (string) to bind, not the data - it will be - ;; looked up at execution time in analysis_data - resolve-param (fn [param-value] - (if (dict? param-value) - (let [bind-name (get param-value :bind)] - (if bind-name - (let [range-spec (get param-value :range (list 0 1))] - (bind bind-name :range range-spec)) - param-value)) - param-value)) - - ;; Process effect spec - resolve any binding specs in params - ;; Effect spec is flat: {:effect "name" :param1 val1 :param2 {:bind ...}} - make-effect (fn [effect-spec] - (let [effect-name (get effect-spec :effect)] - ;; Build effect dict with resolved params - ;; Since we can't iterate dict keys, we check known params - ;; Only include params that exist (non-nil) in the spec - (let [result {:effect effect-name} - ;; Check each known param - amount (get effect-spec :amount nil) - degrees (get effect-spec :degrees nil) - speed (get effect-spec :speed nil) - level (get effect-spec :level nil) - levels (get effect-spec :levels nil) - radius (get effect-spec :radius nil) - intensity (get effect-spec :intensity nil) - contrast (get effect-spec :contrast nil) - brightness (get effect-spec :brightness nil) - strength (get effect-spec :strength nil) - amplitude (get effect-spec :amplitude nil) - wavelength (get effect-spec :wavelength nil) - frequency (get effect-spec :frequency nil) - segments-p (get effect-spec :segments nil) - rotation_speed (get effect-spec :rotation_speed nil) - factor (get effect-spec :factor nil) - angle (get effect-spec :angle nil) - direction (get effect-spec :direction nil) - block_size (get effect-spec :block_size nil) - char_size (get effect-spec :char_size nil) - color_mode (get effect-spec :color_mode nil) - low (get effect-spec :low nil) - high (get effect-spec :high nil) - thickness (get effect-spec :thickness nil) - glow_radius (get effect-spec :glow_radius nil) - glow_intensity (get effect-spec :glow_intensity nil) - line_spacing (get effect-spec :line_spacing nil) - vignette_amount (get effect-spec :vignette_amount nil) - spacing (get effect-spec :spacing nil) - offset_x (get effect-spec :offset_x nil) - num_echoes (get effect-spec :num_echoes nil) - decay (get effect-spec :decay nil) - persistence (get effect-spec :persistence nil) - rows (get effect-spec :rows nil) - cols (get effect-spec :cols nil) - threshold_low (get effect-spec :threshold_low nil) - threshold_high (get effect-spec :threshold_high nil) - corruption (get effect-spec :corruption nil)] - ;; Only add non-nil params to result - ;; Use cond to build up the dict (since we can't dynamically add keys) - ;; This is ugly but necessary without dict iteration - {:effect effect-name - :amount (if (nil? amount) nil (resolve-param amount)) - :degrees (if (nil? degrees) nil (resolve-param degrees)) - :speed speed - :level level - :levels levels - :radius (if (nil? radius) nil (resolve-param radius)) - :intensity (if (nil? intensity) nil (resolve-param intensity)) - :contrast (if (nil? contrast) nil (resolve-param contrast)) - :brightness (if (nil? brightness) nil (resolve-param brightness)) - :strength (if (nil? strength) nil (resolve-param strength)) - :amplitude (if (nil? amplitude) nil (resolve-param amplitude)) - :wavelength wavelength - :frequency frequency - :segments segments-p - :rotation_speed rotation_speed - :factor (if (nil? factor) nil (resolve-param factor)) - :angle (if (nil? angle) nil (resolve-param angle)) - :direction direction - :block_size (if (nil? block_size) nil (resolve-param block_size)) - :char_size char_size - :color_mode color_mode - :low low - :high high - :thickness thickness - :glow_radius glow_radius - :glow_intensity glow_intensity - :line_spacing line_spacing - :vignette_amount (if (nil? vignette_amount) nil (resolve-param vignette_amount)) - :spacing spacing - :offset_x (if (nil? offset_x) nil (resolve-param offset_x)) - :num_echoes num_echoes - :decay decay - :persistence persistence - :rows rows - :cols cols - :threshold_low threshold_low - :threshold_high threshold_high - :corruption (if (nil? corruption) nil (resolve-param corruption))}))) - - find-valid-video (fn [preferred-idx seg-duration] - (cond - (>= (nth durations preferred-idx) seg-duration) preferred-idx - (>= (nth durations (mod (+ preferred-idx 1) num-videos)) seg-duration) (mod (+ preferred-idx 1) num-videos) - (>= (nth durations (mod (+ preferred-idx 2) num-videos)) seg-duration) (mod (+ preferred-idx 2) num-videos) - :else nil))] - - (nth - (reduce - (fn [state group] - (let [acc (first state) - segments (nth state 1) - audio-start (first group) - audio-end (last group) - seg-duration (- audio-end audio-start) - vid-idx (find-valid-video (mod acc num-videos) seg-duration)] - (if (nil? vid-idx) - (list (inc acc) segments) - (let [src (nth videos vid-idx) - src-duration (nth durations vid-idx) - wrapped-start (mod audio-start src-duration) - effect-idx (mod acc num-effects) - effect-spec (nth preset effect-idx) - fx (make-effect effect-spec) - segment (dict :source src - :start wrapped-start - :duration seg-duration - :effects (list fx))] - (list (inc acc) (append segments segment)))))) - (list 0 (list)) - grouped) - 1))) diff --git a/constructs/slice-every-n.sexp b/constructs/slice-every-n.sexp deleted file mode 100644 index 2e4fff3..0000000 --- a/constructs/slice-every-n.sexp +++ /dev/null @@ -1,56 +0,0 @@ -;; slice-every-n construct - group every N beats into one segment -;; -;; Usage: -;; (construct slice-every-n :path "constructs/slice-every-n.sexp") -;; (def segments (slice-every-n beats-data 4 -;; :init 0 -;; :reducer (fn [acc i start end] -;; {:source video-a -;; :effects (list {:effect invert}) -;; :acc (inc acc)}))) -;; -;; Groups every N analysis times into one segment, calling reducer once per group - -(define-construct slice-every-n - :params ( - (analysis :type any :desc "Analysis data with :times") - (n :type int :default 4 :desc "Number of beats per segment") - (init :type any :default 0 :desc "Initial accumulator value") - (reducer :type any :desc "Reducer function (fn [acc i start end] ...)") - ) - ;; Reducer receives: (acc, i, start, end) where start/end are audio beat times - ;; Reducer returns: {:source src :effects fx :acc new-acc} - ;; Optionally include :start/:end to override (e.g., for wrapping/randomizing) - ;; :duration is calculated from start/end (use :duration to override) - ;; Return :skip true to skip this segment - (let [times (get analysis :times) - ;; Group times into chunks of n - grouped (chunk-every times n)] - (nth - (reduce - (fn [state group] - (let [acc (first state) - segments (nth state 1) - i (len segments) - audio-start (first group) - audio-end (last group) - audio-duration (- audio-end audio-start) - ;; Call user's reducer with audio beat times - result (reducer acc i audio-start audio-end) - new-acc (get result :acc)] - ;; Skip if reducer returns :skip true - (if (get result :skip false) - (list new-acc segments) - (let [;; Use reducer's start/end/duration if provided, else use audio times - seg-start (get result :start audio-start) - seg-end (get result :end audio-end) - seg-duration (get result :duration (- seg-end seg-start)) - segment (dict :source (get result :source) - :start seg-start - :end seg-end - :duration seg-duration - :effects (get result :effects))] - (list new-acc (append segments segment)))))) - (list init (list)) - grouped) - 1))) diff --git a/constructs/slice-on.sexp b/constructs/slice-on.sexp deleted file mode 100644 index a4c29aa..0000000 --- a/constructs/slice-on.sexp +++ /dev/null @@ -1,47 +0,0 @@ -;; slice-on construct - iterate over analysis times with user function -;; -;; Usage: -;; (construct slice-on :path "constructs/slice-on.sexp") -;; (def segments (slice-on beats-data -;; :init 0 -;; :reducer (fn [acc i start end] -;; {:source (nth (list video-a video-b) (mod acc 2)) -;; :effects (list) -;; :acc (inc acc)}))) -;; -;; The construct receives: -;; - First positional arg as 'analysis' (the analysis data with :times) -;; - :init as 'init' (initial accumulator value) -;; - :reducer as 'reducer' (the reducer lambda) - -(define-construct slice-on - :params ( - (analysis :type any :desc "Analysis data with :times") - (init :type any :default 0 :desc "Initial accumulator value") - (reducer :type any :desc "Reducer function (fn [acc i start end] ...)") - ) - ;; Get times from analysis data - (let [times (get analysis :times) - pairs (zip-pairs (cons 0 times))] - ;; Use nth to get second element of reduce result (the segments list) - (nth - (reduce - (fn [state pair] - (let [acc (first state) - segments (nth state 1) - i (len segments) - start (first pair) - end (nth pair 1) - ;; Call user's reducer function - result (reducer acc i start end) - ;; Extract new acc and build segment - new-acc (get result :acc) - segment (dict :source (get result :source) - :start start - :end end - :duration (- end start) - :effects (get result :effects))] - (list new-acc (append segments segment)))) - (list init (list)) - pairs) - 1))) diff --git a/effect-presets/all-42.sexp b/effect-presets/all-42.sexp deleted file mode 100644 index 8cc2e9f..0000000 --- a/effect-presets/all-42.sexp +++ /dev/null @@ -1,65 +0,0 @@ -;; All 42 Effects Preset -;; Flat format - params are direct keys, not nested under :params -;; Binding format: {:bind "analyzer-name" :range [min max]} - -(def all-42-preset - (list - ;; Color effects 0-9 - {:effect "invert"} - {:effect "grayscale"} - {:effect "sepia"} - {:effect "brightness" :amount {:bind "bass" :range [-80 80]}} - {:effect "contrast" :amount {:bind "energy" :range [0.5 2.5]}} - {:effect "saturation" :amount {:bind "bass" :range [0.2 3.0]}} - {:effect "hue_shift" :degrees {:bind "energy" :range [0 360]}} - {:effect "color_cycle" :speed 2} - {:effect "threshold" :level 128} - {:effect "posterize" :levels 6} - - ;; Blur/sharpen 10-13 - {:effect "blur" :radius {:bind "bass" :range [1 30]}} - {:effect "sharpen" :amount {:bind "energy" :range [0.5 4]}} - {:effect "bloom" :intensity 0.6 :radius 20} - {:effect "color-adjust" :brightness 20 :contrast 1.2} - - ;; Distortion 14-21 - {:effect "swirl" :strength {:bind "bass" :range [-6 6]}} - {:effect "fisheye" :strength {:bind "bass" :range [-0.5 0.8]}} - {:effect "wave" :amplitude {:bind "bass" :range [10 60]} :wavelength 60} - {:effect "ripple" :amplitude {:bind "bass" :range [10 40]} :frequency 6} - {:effect "kaleidoscope" :segments 6 :rotation_speed 30} - {:effect "zoom" :factor {:bind "bass" :range [0.8 1.5]}} - {:effect "rotate" :angle {:bind "energy" :range [-30 30]}} - {:effect "mirror" :direction "horizontal"} - - ;; Stylization 22-28 - {:effect "pixelate" :block_size {:bind "bass" :range [4 32]}} - {:effect "ascii_art" :char_size 8 :color_mode "color"} - {:effect "ascii_zones" :char_size 10} - {:effect "edge_detect" :low 50 :high 150} - {:effect "emboss" :strength 1.5} - {:effect "outline" :thickness 2} - {:effect "neon_glow" :glow_radius 20 :glow_intensity 2} - - ;; Retro/film 29-33 - {:effect "crt" :line_spacing 3 :vignette_amount 0.3} - {:effect "scanlines" :spacing 3 :intensity 0.4} - {:effect "film_grain" :intensity 0.25} - {:effect "vignette" :strength 0.6} - {:effect "noise" :amount {:bind "bass" :range [10 80]}} - - ;; Chromatic 34 - {:effect "rgb_split" :offset_x {:bind "bass" :range [5 40]}} - - ;; Temporal 35-37 - {:effect "echo" :num_echoes 4 :decay 0.5} - {:effect "trails" :persistence 0.7} - {:effect "strobe" :frequency 4} - - ;; Geometric 38-39 - {:effect "flip" :direction "horizontal"} - {:effect "tile_grid" :rows 2 :cols 2} - - ;; Glitch 40-41 - {:effect "pixelsort" :threshold_low 30 :threshold_high 220} - {:effect "datamosh" :corruption {:bind "bass" :range [0.2 0.8]} :block_size 24})) diff --git a/effect-presets/chill-subtle.sexp b/effect-presets/chill-subtle.sexp deleted file mode 100644 index 3ccf192..0000000 --- a/effect-presets/chill-subtle.sexp +++ /dev/null @@ -1,13 +0,0 @@ -;; Chill/Subtle Preset -;; Gentle color shifts and soft effects - -(def chill-subtle-preset - (list - {:effect "hue_shift" :params {:degrees {:bind "energy" :range [0 60]}}} - {:effect "saturation" :params {:amount {:bind "bass" :range [0.8 1.3]}}} - {:effect "brightness" :params {:amount {:bind "energy" :range [-10 15]}}} - {:effect "blur" :params {:radius {:bind "bass" :range [0 5]}}} - {:effect "vignette" :params {:strength {:bind "energy" :range [0.2 0.5]}}} - {:effect "film_grain" :params {:intensity {:bind "bass" :range [0.05 0.15]}}} - {:effect "bloom" :params {:intensity {:bind "energy" :range [0.2 0.5]} :radius 15}} - {:effect "color-adjust" :params {:brightness {:bind "bass" :range [-5 10]} :contrast 1.05}})) diff --git a/effect-presets/glitch-only.sexp b/effect-presets/glitch-only.sexp deleted file mode 100644 index e72f73f..0000000 --- a/effect-presets/glitch-only.sexp +++ /dev/null @@ -1,13 +0,0 @@ -;; Glitch-Only Preset -;; Just the aggressive digital artifacts - -(def glitch-only-preset - (list - {:effect "datamosh" :params {:corruption {:bind "bass" :range [0.3 0.9]} :block_size 16}} - {:effect "pixelsort" :params {:threshold_low 20 :threshold_high 200}} - {:effect "rgb_split" :params {:offset_x {:bind "bass" :range [10 60]}}} - {:effect "noise" :params {:amount {:bind "energy" :range [20 100]}}} - {:effect "scanlines" :params {:spacing 2 :intensity {:bind "bass" :range [0.3 0.8]}}} - {:effect "pixelate" :params {:block_size {:bind "bass" :range [8 48]}}} - {:effect "threshold" :params {:level {:bind "energy" :range [80 180]}}} - {:effect "posterize" :params {:levels {:bind "bass" :range [2 6]}}})) diff --git a/effect-presets/retro-vhs.sexp b/effect-presets/retro-vhs.sexp deleted file mode 100644 index 628051b..0000000 --- a/effect-presets/retro-vhs.sexp +++ /dev/null @@ -1,13 +0,0 @@ -;; Retro VHS Preset -;; 80s/90s video aesthetic - -(def retro-vhs-preset - (list - {:effect "crt" :params {:line_spacing 3 :vignette_amount {:bind "bass" :range [0.2 0.5]}}} - {:effect "scanlines" :params {:spacing 2 :intensity {:bind "energy" :range [0.3 0.6]}}} - {:effect "rgb_split" :params {:offset_x {:bind "bass" :range [2 12]}}} - {:effect "noise" :params {:amount {:bind "bass" :range [5 30]}}} - {:effect "saturation" :params {:amount {:bind "energy" :range [1.2 1.8]}}} - {:effect "blur" :params {:radius {:bind "bass" :range [0 3]}}} - {:effect "contrast" :params {:amount {:bind "energy" :range [1.1 1.4]}}} - {:effect "film_grain" :params {:intensity {:bind "bass" :range [0.1 0.3]}}})) diff --git a/effects/ascii_alternating_fx.sexp b/effects/ascii_alternating_fx.sexp deleted file mode 100644 index d1a4278..0000000 --- a/effects/ascii_alternating_fx.sexp +++ /dev/null @@ -1,69 +0,0 @@ -;; ASCII with Alternating Effects - Checkerboard of blur and RGB split -;; -;; Demonstrates using existing sexp effects within cell_effect lambdas. -;; Even cells get blur, odd cells get RGB split - creating a checkerboard pattern. - -(recipe "ascii_alternating_fx" - :version "1.0" - :description "ASCII art with alternating blur and RGB split effects per cell" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - :params ( - (cols :type int :default 40 :range [20 100] - :desc "Number of character columns") - (blur_amount :type float :default 3 :range [1 10] - :desc "Blur radius for blur cells") - (rgb_offset :type int :default 3 :range [1 10] - :desc "RGB split offset for split cells") - ) - - ;; Registry - (effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Source files - (def video (source :path "../monday.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - (stage :analyze - :outputs [energy-data] - (def audio-clip (-> audio (segment :start 60 :duration 10))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process - apply effect with alternating cell effects - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [result audio-clip] - (def clip (-> video (segment :start 0 :duration 10))) - (def audio-clip (-> audio (segment :start 60 :duration 10))) - - ;; Apply effect with cell_effect lambda - ;; Checkerboard: (row + col) even = blur, odd = rgb_split - (def result (-> clip - (effect ascii_fx_zone - :cols cols - :char_size (bind energy-data values :range [12 24]) - :color_mode "color" - :background "black" - ;; Pass params to zone dict - :energy (bind energy-data values :range [0 1]) - :blur_amount blur_amount - :rgb_offset rgb_offset - ;; Cell effect: alternate between blur and rgb_split - ;; Uses existing sexp effects - each cell is just a small frame - :cell_effect (lambda [cell zone] - (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) - ;; Even cells: blur scaled by energy - (blur cell (* (get zone "blur_amount") (get zone "energy"))) - ;; Odd cells: rgb split scaled by energy - (rgb_split cell - (* (get zone "rgb_offset") (get zone "energy")) - 0))))))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [result audio-clip] - (mux result audio-clip))) diff --git a/effects/ascii_alternating_rotate.sexp b/effects/ascii_alternating_rotate.sexp deleted file mode 100644 index da04a8e..0000000 --- a/effects/ascii_alternating_rotate.sexp +++ /dev/null @@ -1,67 +0,0 @@ -;; ASCII with Alternating Rotation Directions -;; -;; Checkerboard pattern: even cells rotate clockwise, odd cells rotate counter-clockwise -;; Rotation amount scaled by energy and position (more at top-right) - -(recipe "ascii_alternating_rotate" - :version "1.0" - :description "ASCII art with alternating rotation directions per cell" - :minimal-primitives true - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - :params ( - (cols :type int :default 50 :range [20 100] - :desc "Number of character columns") - (rotation_scale :type float :default 60 :range [0 180] - :desc "Max rotation in degrees") - (duration :type float :default 10 :range [1 300] - :desc "Clip duration in seconds") - ) - - ;; Registry - (effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp") - ;; Effects used in cell_effect lambda - (effect rotate :path "../sexp_effects/effects/rotate.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Source files - (def video (source :path "../monday.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - (stage :analyze - :outputs [energy-data] - (def audio-clip (-> audio (segment :start 60 :duration duration))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [result audio-clip] - (def clip (-> video (segment :start 0 :duration duration))) - (def audio-clip (-> audio (segment :start 60 :duration duration))) - - (def result (-> clip - (effect ascii_fx_zone - :cols cols - :char_size (bind energy-data values :range [10 20]) - :color_mode "color" - :background "black" - :energy (bind energy-data values :range [0 1]) - :rotation_scale rotation_scale - ;; Alternating rotation: even cells clockwise, odd cells counter-clockwise - ;; Scaled by energy * position (more at top-right) - :cell_effect (lambda [cell zone] - (rotate cell - (* (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) 1 -1) - (* (get zone "energy") - (get zone "rotation_scale") - (* 1.5 (+ (get zone "col-norm") - (- 1 (get zone "row-norm")))))))))))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [result audio-clip] - (mux result audio-clip))) diff --git a/effects/ascii_art.py b/effects/ascii_art.py deleted file mode 100644 index 408f013..0000000 --- a/effects/ascii_art.py +++ /dev/null @@ -1,205 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect ascii_art -@version 1.0.0 -@author artdag - -@description -ASCII art effect - converts video to ASCII character representation. -Renders the video using ASCII characters based on brightness, -creating a retro terminal aesthetic. - -@param char_size int - @range 4 32 - @default 8 - Size of each character cell in pixels. Sync to bass for reactive sizing. - -@param alphabet string - @enum standard blocks cyrillic greek japanese braille - @default standard - Character set to use for rendering. Each has different visual density. - -@param color_mode string - @enum mono color invert - @default color - Color rendering mode: - - mono: white on black - - color: preserve source colors - - invert: dark text on colored background - -@param contrast_boost float - @range 1 3 - @default 1.5 - Enhance contrast for better character separation. - -@param background list - @default [0, 0, 0] - Background color RGB. - -@example - (effect ascii_art :char_size 8 :color_mode "color") - -@example - ;; Japanese characters, reactive sizing - (effect ascii_art :alphabet "japanese" :char_size (bind bass :range [6 16])) - -@example - ;; Braille pattern for high detail - (effect ascii_art :alphabet "braille" :char_size 4) -""" - -import numpy as np -import cv2 - - -# Character sets ordered by visual density (light to dark) -ALPHABETS = { - # Classic ASCII gradient - "standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$", - - # Unicode block elements - naturally ordered by fill - "blocks": " ░▒▓█", - - # Cyrillic - ordered by visual complexity - "cyrillic": " ·гтпрсьоеаилнкчуцбдвжзмъыюяфщшэ", - - # Greek - ordered by visual weight - "greek": " ·ιτορεαηυικλνσςπμβγδζθξφψωΣΩΨΦ", - - # Japanese Katakana - ordered by stroke complexity - "japanese": " ·ノ一ヘイコニハヒフホメヨワヲンリルレロカキクケサシスセソタチツテト", - - # Braille patterns - high detail - "braille": " ⠁⠂⠃⠄⠅⠆⠇⠈⠉⠊⠋⠌⠍⠎⠏⠐⠑⠒⠓⠔⠕⠖⠗⠘⠙⠚⠛⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫⠬⠭⠮⠯⠰⠱⠲⠳⠴⠵⠶⠷⠸⠹⠺⠻⠼⠽⠾⠿", -} - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply ASCII art effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - char_size: character cell size (default 8) - - alphabet: character set name (default "standard") - - color_mode: mono/color/invert (default "color") - - contrast_boost: enhance contrast (default 1.5) - - background: RGB tuple (default [0,0,0]) - state: Persistent state dict (used for caching) - - Returns: - Tuple of (processed_frame, new_state) - """ - char_size = max(4, min(int(params.get("char_size", 8)), 32)) - alphabet_name = params.get("alphabet", "standard") - color_mode = params.get("color_mode", "color") - contrast_boost = max(1.0, min(params.get("contrast_boost", 1.5), 3.0)) - background = params.get("background", [0, 0, 0]) - - if state is None: - state = {} - - # Get character set - char_set = ALPHABETS.get(alphabet_name, ALPHABETS["standard"]) - - # Get or create character atlas cache - cache_key = f"{char_size}_{alphabet_name}" - if "atlas_cache" not in state or state.get("cache_key") != cache_key: - state["atlas"] = _create_char_atlas(char_size, char_set) - state["cache_key"] = cache_key - - atlas = state["atlas"] - - h, w = frame.shape[:2] - cols = w // char_size - rows = h // char_size - - if cols < 1 or rows < 1: - return frame, state - - # Crop frame to fit grid - grid_h, grid_w = rows * char_size, cols * char_size - frame_cropped = frame[:grid_h, :grid_w] - - # Downsample to get average color per cell - reshaped = frame_cropped.reshape(rows, char_size, cols, char_size, 3) - cell_colors = reshaped.mean(axis=(1, 3)).astype(np.uint8) - - # Convert to grayscale for brightness mapping - cell_gray = 0.299 * cell_colors[:,:,0] + 0.587 * cell_colors[:,:,1] + 0.114 * cell_colors[:,:,2] - - # Apply contrast boost - if contrast_boost > 1: - cell_gray = (cell_gray - 128) * contrast_boost + 128 - cell_gray = np.clip(cell_gray, 0, 255) - - # Map brightness to character indices - char_indices = ((cell_gray / 255) * (len(char_set) - 1)).astype(np.int32) - char_indices = np.clip(char_indices, 0, len(char_set) - 1) - - # Create output frame - if isinstance(background, (list, tuple)) and len(background) >= 3: - bg = background[:3] - else: - bg = [0, 0, 0] - result = np.full((grid_h, grid_w, 3), bg, dtype=np.uint8) - - # Render characters - for row in range(rows): - for col in range(cols): - char_idx = char_indices[row, col] - char = char_set[char_idx] - char_mask = atlas.get(char) - - if char_mask is None: - continue - - y1, x1 = row * char_size, col * char_size - - if color_mode == "mono": - color = np.array([255, 255, 255], dtype=np.uint8) - elif color_mode == "invert": - # Colored background, dark text - result[y1:y1+char_size, x1:x1+char_size] = cell_colors[row, col] - color = np.array([0, 0, 0], dtype=np.uint8) - else: # color - color = cell_colors[row, col] - - # Apply character mask - if char != ' ': - mask = char_mask > 0 - result[y1:y1+char_size, x1:x1+char_size][mask] = color - - # Pad to original size if needed - if result.shape[0] < h or result.shape[1] < w: - padded = np.full((h, w, 3), bg, dtype=np.uint8) - padded[:grid_h, :grid_w] = result - result = padded - - return result, state - - -def _create_char_atlas(char_size: int, char_set: str) -> dict: - """Pre-render all characters as masks.""" - font = cv2.FONT_HERSHEY_SIMPLEX - font_scale = char_size / 20.0 - thickness = max(1, int(char_size / 10)) - - atlas = {} - for char in char_set: - char_img = np.zeros((char_size, char_size), dtype=np.uint8) - if char != ' ': - try: - (text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness) - text_x = (char_size - text_w) // 2 - text_y = (char_size + text_h) // 2 - cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA) - except: - pass - atlas[char] = char_img - - return atlas diff --git a/effects/ascii_art.sexp b/effects/ascii_art.sexp deleted file mode 100644 index 386f32b..0000000 --- a/effects/ascii_art.sexp +++ /dev/null @@ -1,34 +0,0 @@ -;; ASCII art effect - render as text characters (music reactive) -;; Usage: python3 plan.py effects/ascii_art.sexp | python3 execute.py - -d . -o - | mpv --fs - -;; -;; Parameters: -;; color_mode: coloring mode ("color", "green", "white", default: "color") -;; char_size is bound to energy (wobbles with overall loudness) - -(recipe "ascii_art" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") - (analyzer energy :path "../artdag-analyzers/energy/analyzer.py") - - ;; Default parameter - (def color_mode "color") - - (def video (source :path "monday.webm")) - (def audio (source :path "dizzy.mp3")) - - (def clip (-> video (segment :start 0 :duration 10))) - ;; Audio from 60s where it's louder - (def audio-clip (-> audio (segment :start 60 :duration 10))) - - ;; Analyze just the audio clip - times are 0-10s relative to clip - (def energy-data (-> audio-clip (analyze energy))) - - ;; char_size bound to energy - no offset needed since analysis is on clip - (def result (-> clip - (effect ascii_art - :char_size (bind energy-data values :range [2 32]) - :color_mode color_mode))) - - (mux result audio-clip)) diff --git a/effects/ascii_art_fx_staged.sexp b/effects/ascii_art_fx_staged.sexp deleted file mode 100644 index 75e507d..0000000 --- a/effects/ascii_art_fx_staged.sexp +++ /dev/null @@ -1,89 +0,0 @@ -;; ASCII art FX effect with staged execution and per-character effects -;; -;; Run with --list-params to see all available parameters: -;; python3 run_staged.py effects/ascii_art_fx_staged.sexp --list-params - -(recipe "ascii_art_fx_staged" - :version "1.0" - :description "ASCII art FX with per-character effects" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - :params ( - ;; Colors - (color_mode :type string :default "color" - :desc "Character color: color, mono, invert, or any color name/hex") - (background_color :type string :default "black" - :desc "Background color name or hex value") - (invert_colors :type int :default 0 :range [0 1] - :desc "Swap foreground and background colors") - - ;; Character sizing - (char_size :type int :default 12 :range [4 32] - :desc "Base character cell size in pixels") - - ;; Per-character effects - (char_jitter :type float :default 0 :range [0 20] - :desc "Position jitter amount in pixels") - (char_scale :type float :default 1.0 :range [0.5 2.0] - :desc "Character scale factor") - (char_rotation :type float :default 0 :range [0 180] - :desc "Rotation amount in degrees") - (char_hue_shift :type float :default 0 :range [0 360] - :desc "Hue shift in degrees") - - ;; Modulation sources - (jitter_source :type string :default "none" - :choices [none luminance inv_luminance saturation position_x position_y random center_dist] - :desc "What drives jitter modulation") - (scale_source :type string :default "none" - :choices [none luminance inv_luminance saturation position_x position_y random center_dist] - :desc "What drives scale modulation") - (rotation_source :type string :default "none" - :choices [none luminance inv_luminance saturation position_x position_y random center_dist] - :desc "What drives rotation modulation") - (hue_source :type string :default "none" - :choices [none luminance inv_luminance saturation position_x position_y random center_dist] - :desc "What drives hue shift modulation") - ) - - ;; Registry - (effect ascii_art_fx :path "../sexp_effects/effects/ascii_art_fx.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Source files (not parameterized for now) - (def video (source :path "../monday.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - (stage :analyze - :outputs [energy-data] - (def audio-clip (-> audio (segment :start 60 :duration 10))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process - apply effect - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [result audio-clip] - (def clip (-> video (segment :start 0 :duration 10))) - (def audio-clip (-> audio (segment :start 60 :duration 10))) - (def result (-> clip - (effect ascii_art_fx - :char_size (bind energy-data values :range [8 24]) - :color_mode color_mode - :background_color background_color - :invert_colors invert_colors - :char_jitter char_jitter - :char_scale char_scale - :char_rotation char_rotation - :char_hue_shift char_hue_shift - :jitter_source jitter_source - :scale_source scale_source - :rotation_source rotation_source - :hue_source hue_source)))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [result audio-clip] - (mux result audio-clip))) diff --git a/effects/ascii_art_staged.sexp b/effects/ascii_art_staged.sexp deleted file mode 100644 index 0a0dbb1..0000000 --- a/effects/ascii_art_staged.sexp +++ /dev/null @@ -1,59 +0,0 @@ -;; ASCII art effect with staged execution -;; -;; Stages: -;; :analyze - Run energy analysis on audio (cacheable) -;; :process - Segment media and apply effect -;; :output - Mux video with audio -;; -;; Usage: python3 run_staged.py effects/ascii_art_staged.sexp -;; -;; Parameters: -;; color_mode: coloring mode ("color", "green", "white", default: "color") -;; char_size is bound to energy (wobbles with overall loudness) - -(recipe "ascii_art_staged" - :version "1.0" - :description "ASCII art effect with staged execution" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - ;; Registry: effects and analyzers - (effect ascii_art :path "../sexp_effects/effects/ascii_art.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Pre-stage definitions (available to all stages) - (def color_mode "color") - (def background_color "black") - (def invert_colors 0) ;; 0=false, 1=true - (def video (source :path "../monday.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - extract energy from audio - ;; This stage is expensive but cacheable - rerun with same input skips this - (stage :analyze - :outputs [energy-data] - ;; Audio from 60s where it's louder - (def audio-clip (-> audio (segment :start 60 :duration 10))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process - apply ASCII art effect with energy binding - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [result audio-clip] - ;; Video segment - (def clip (-> video (segment :start 0 :duration 10))) - ;; Audio clip for muxing (same segment as analysis) - (def audio-clip (-> audio (segment :start 60 :duration 10))) - ;; Apply effect with char_size bound to energy - (def result (-> clip - (effect ascii_art - :char_size (bind energy-data values :range [2 32]) - :color_mode color_mode - :background_color background_color - :invert_colors invert_colors)))) - - ;; Stage 3: Output - combine video and audio - (stage :output - :requires [:process] - :inputs [result audio-clip] - (mux result audio-clip))) diff --git a/effects/ascii_cell_effect_staged.sexp b/effects/ascii_cell_effect_staged.sexp deleted file mode 100644 index b165c57..0000000 --- a/effects/ascii_cell_effect_staged.sexp +++ /dev/null @@ -1,64 +0,0 @@ -;; ASCII Cell Effect - Demonstrates arbitrary per-cell effects via lambda -;; -;; Each character cell is a mini-frame that can have any effects applied. -;; The lambda receives the cell image and zone context (including bound analysis data). - -(recipe "ascii_cell_effect_staged" - :version "1.0" - :description "ASCII art with lambda-driven per-cell effects" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - :params ( - (cols :type int :default 60 :range [20 200] - :desc "Number of character columns") - (rotation_scale :type float :default 45 :range [0 90] - :desc "Max rotation in degrees at top-right corner") - ) - - ;; Registry - (effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Source files - (def video (source :path "../monday.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - (stage :analyze - :outputs [energy-data] - (def audio-clip (-> audio (segment :start 60 :duration 10))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process - apply effect with cell_effect lambda - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [result audio-clip] - (def clip (-> video (segment :start 0 :duration 10))) - (def audio-clip (-> audio (segment :start 60 :duration 10))) - - ;; Apply effect with cell_effect lambda - ;; The lambda receives (cell zone) where: - ;; cell = the rendered character as a small image - ;; zone = dict with row, col, lum, sat, hue, energy, rotation_scale, etc. - (def result (-> clip - (effect ascii_fx_zone - :cols cols - :char_size (bind energy-data values :range [10 20]) - :color_mode "color" - :background "black" - ;; Pass bound values so they're available in zone dict - :energy (bind energy-data values :range [0 1]) - :rotation_scale rotation_scale - ;; Cell effect lambda: rotate each cell based on energy * position - :cell_effect (lambda [cell zone] - (rotate-cell cell - (* (* (get zone "energy") (get zone "rotation_scale")) - (* 1.5 (+ (get zone "col-norm") - (- 1 (get zone "row-norm"))))))))))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [result audio-clip] - (mux result audio-clip))) diff --git a/effects/ascii_dual_blend.sexp b/effects/ascii_dual_blend.sexp deleted file mode 100644 index 90bec29..0000000 --- a/effects/ascii_dual_blend.sexp +++ /dev/null @@ -1,99 +0,0 @@ -;; ASCII Dual Blend -;; -;; Applies ASCII alternating rotation effect to two video sources, -;; blends them together, and muxes with audio. -;; All synced to the same audio analysis. - -(recipe "ascii_dual_blend" - :version "1.0" - :description "Blend two ASCII-processed videos synced to audio" - :minimal-primitives true - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - :params ( - (cols :type int :default 50 :range [20 100] - :desc "Number of character columns") - (rotation_scale :type float :default 60 :range [0 180] - :desc "Max rotation in degrees") - (blend_opacity :type float :default 0.5 :range [0 1] - :desc "Blend opacity (0=video-a only, 1=video-b only)") - (blend_mode :type string :default "overlay" - :desc "Blend mode: alpha, add, multiply, screen, overlay, difference") - (duration :type float :default 10 :range [1 300] - :desc "Clip duration in seconds") - ) - - ;; Registry - effects and analyzers - (effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp") - (effect rotate :path "../sexp_effects/effects/rotate.sexp") - (effect blend :path "../sexp_effects/effects/blend.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Source files - (def video-a (source :path "../monday.webm")) - (def video-b (source :path "../new.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - (stage :analyze - :outputs [energy-data] - (def audio-clip (-> audio (segment :start 60 :duration duration))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process both videos - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [blended audio-clip] - - ;; Get audio clip for final mux - (def audio-clip (-> audio (segment :start 60 :duration duration))) - - ;; Process video A with ASCII effect - (def clip-a (-> video-a (segment :start 0 :duration duration))) - (def ascii-a (-> clip-a - (effect ascii_fx_zone - :cols cols - :char_size (bind energy-data values :range [10 20]) - :color_mode "color" - :background "black" - :energy (bind energy-data values :range [0 1]) - :rotation_scale rotation_scale - :cell_effect (lambda [cell zone] - (rotate cell - (* (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) 1 -1) - (* (get zone "energy") - (get zone "rotation_scale") - (* 1.5 (+ (get zone "col-norm") - (- 1 (get zone "row-norm"))))))))))) - - ;; Process video B with ASCII effect - (def clip-b (-> video-b (segment :start 0 :duration duration))) - (def ascii-b (-> clip-b - (effect ascii_fx_zone - :cols cols - :char_size (bind energy-data values :range [10 20]) - :color_mode "color" - :background "black" - :energy (bind energy-data values :range [0 1]) - :rotation_scale rotation_scale - :cell_effect (lambda [cell zone] - (rotate cell - (* (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) 1 -1) - (* (get zone "energy") - (get zone "rotation_scale") - (* 1.5 (+ (get zone "col-norm") - (- 1 (get zone "row-norm"))))))))))) - - ;; Blend the two ASCII videos using consistent effect syntax - (def blended (-> ascii-a - (effect blend ascii-b - :mode blend_mode - :opacity blend_opacity - :resize_mode "fit")))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [blended audio-clip] - (mux blended audio-clip))) diff --git a/effects/ascii_fx_zone_staged.sexp b/effects/ascii_fx_zone_staged.sexp deleted file mode 100644 index 25c4254..0000000 --- a/effects/ascii_fx_zone_staged.sexp +++ /dev/null @@ -1,66 +0,0 @@ -;; ASCII FX Zone effect with per-zone expression-driven effects -;; -;; Uses energy analysis to drive rotation based on position: -;; - Bottom-left = 0 rotation -;; - Top-right = max rotation (scaled by energy) - -(recipe "ascii_fx_zone_staged" - :version "1.0" - :description "ASCII art with per-zone expression-driven effects" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - :params ( - (cols :type int :default 80 :range [20 200] - :desc "Number of character columns") - (color_mode :type string :default "color" - :desc "Character color: color, mono, invert, or any color name/hex") - (background :type string :default "black" - :desc "Background color name or hex value") - (rotation_scale :type float :default 30 :range [0 90] - :desc "Max rotation in degrees at top-right corner") - ) - - ;; Registry - (effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - - ;; Source files - (def video (source :path "../monday.webm")) - (def audio (source :path "../dizzy.mp3")) - - ;; Stage 1: Analysis - (stage :analyze - :outputs [energy-data] - (def audio-clip (-> audio (segment :start 60 :duration 10))) - (def energy-data (-> audio-clip (analyze energy)))) - - ;; Stage 2: Process - apply effect with zone expressions - (stage :process - :requires [:analyze] - :inputs [energy-data] - :outputs [result audio-clip] - (def clip (-> video (segment :start 0 :duration 10))) - (def audio-clip (-> audio (segment :start 60 :duration 10))) - - ;; Apply effect with lambdas - ;; Lambda receives zone dict: {row, col, row-norm, col-norm, lum, sat, hue, r, g, b, char} - ;; Plus any extra params like energy, rotation_scale - (def result (-> clip - (effect ascii_fx_zone - :char_size (bind energy-data values :range [8 24]) - :color_mode color_mode - :background background - ;; Pass energy as extra param so lambda can access it via zone dict - :energy (bind energy-data values :range [0 1]) - :rotation_scale rotation_scale - ;; Rotation: energy * scale * position (bottom-left=0, top-right=3) - :char_rotation (lambda [z] - (* (* (get z "energy") (get z "rotation_scale")) - (* 1.5 (+ (get z "col-norm") - (- 1 (get z "row-norm")))))))))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [result audio-clip] - (mux result audio-clip))) diff --git a/effects/ascii_zones.sexp b/effects/ascii_zones.sexp deleted file mode 100644 index ad7c85c..0000000 --- a/effects/ascii_zones.sexp +++ /dev/null @@ -1,33 +0,0 @@ -;; ASCII zones effect - region-based ASCII rendering -;; Usage: python3 plan.py effects/ascii_zones.sexp -p char_size=10 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; char_size: character cell size in pixels (4 to 20, default: 10) - -(recipe "ascii_zones" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") - (analyzer energy :path "../artdag-analyzers/energy/analyzer.py") - - - ;; Default parameter (overridden by -p char_size=N) - (def char_size 10) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def audio (source :path "dizzy.mp3")) - (def audio-clip (-> audio (segment :start 60 :duration 10))) - - ;; Analyze just the audio clip - times are 0-10s relative to clip - (def energy-data (-> audio-clip (analyze energy))) - - - - ;; char_size bound to energy - no offset needed since analysis is on clip - (def result (-> clip - (effect ascii_zones - :char_size (bind energy-data values :range [2 32])))) - - (mux result audio-clip)) diff --git a/effects/beam.py b/effects/beam.py deleted file mode 100644 index 85a543b..0000000 --- a/effects/beam.py +++ /dev/null @@ -1,164 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect beam -@version 1.0.0 -@author artdag - -@description -Beam effect. Creates animated light beams / lasers from a starting -point to an ending point with glow effect. - -@param start_x float - @range 0 1 - @default 0 - Beam start X position (0-1). - -@param start_y float - @range 0 1 - @default 0.5 - Beam start Y position (0-1). - -@param end_x float - @range 0 1 - @default 1 - Beam end X position (0-1). - -@param end_y float - @range 0 1 - @default 0.5 - Beam end Y position (0-1). - -@param thickness float - @range 1 50 - @default 5 - Beam core thickness in pixels. - -@param glow_radius float - @range 0 100 - @default 20 - Outer glow size in pixels. - -@param color list - @default [0, 255, 255] - Beam color RGB (default cyan). - -@param intensity float - @range 0 2 - @default 1.0 - Beam brightness. - -@param pulse bool - @default false - Enable pulsing animation. - -@param pulse_speed float - @range 0.1 10 - @default 2.0 - Pulse animation speed. - -@example - (effect beam :start_x 0 :start_y 0.5 :end_x 1 :end_y 0.5) - -@example - ;; Reactive laser - (effect beam :intensity (bind bass :range [0.5 2]) :color [255 0 0]) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply beam effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - start_x = params.get("start_x", 0) - start_y = params.get("start_y", 0.5) - end_x = params.get("end_x", 1) - end_y = params.get("end_y", 0.5) - thickness = params.get("thickness", 5) - glow_radius = params.get("glow_radius", 20) - color = params.get("color", [0, 255, 255]) - intensity = params.get("intensity", 1.0) - pulse = params.get("pulse", False) - pulse_speed = params.get("pulse_speed", 2.0) - t = params.get("_time", 0) - - if state is None: - state = {} - - h, w = frame.shape[:2] - result = frame.copy().astype(np.float32) - - # Calculate beam endpoints in pixels - x1, y1 = int(start_x * w), int(start_y * h) - x2, y2 = int(end_x * w), int(end_y * h) - - # Apply pulse modulation - if pulse: - pulse_mod = 0.5 + 0.5 * np.sin(t * pulse_speed * 2 * np.pi) - intensity = intensity * pulse_mod - - # Create coordinate grids - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) - - # Calculate distance from each pixel to the line segment - line_vec = np.array([x2 - x1, y2 - y1], dtype=np.float32) - line_len = np.sqrt(line_vec[0]**2 + line_vec[1]**2) - - if line_len < 1: - return frame, state - - line_unit = line_vec / line_len - - # Vector from start to each pixel - px = x_coords - x1 - py = y_coords - y1 - - # Project onto line - proj_len = px * line_unit[0] + py * line_unit[1] - proj_len = np.clip(proj_len, 0, line_len) - - # Closest point on line - closest_x = x1 + proj_len * line_unit[0] - closest_y = y1 + proj_len * line_unit[1] - - # Distance to closest point - dist = np.sqrt((x_coords - closest_x)**2 + (y_coords - closest_y)**2) - - # Get beam color - if isinstance(color, (list, tuple)) and len(color) >= 3: - beam_color = np.array(color[:3], dtype=np.float32) - else: - beam_color = np.array([0, 255, 255], dtype=np.float32) - - # Core beam (bright center) - core_mask = dist < thickness - core_intensity = intensity * (1 - dist[core_mask] / max(1, thickness)) - for c in range(3): - result[core_mask, c] = np.clip( - result[core_mask, c] + beam_color[c] * core_intensity, - 0, 255 - ) - - # Glow (fading outer region) - glow_mask = (dist >= thickness) & (dist < thickness + glow_radius) - glow_dist = dist[glow_mask] - thickness - glow_intensity = intensity * 0.5 * (1 - glow_dist / max(1, glow_radius)) ** 2 - for c in range(3): - result[glow_mask, c] = np.clip( - result[glow_mask, c] + beam_color[c] * glow_intensity, - 0, 255 - ) - - return result.astype(np.uint8), state diff --git a/effects/bloom.py b/effects/bloom.py deleted file mode 100644 index 089e4e0..0000000 --- a/effects/bloom.py +++ /dev/null @@ -1,108 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect bloom -@version 1.0.0 -@author artdag - -@description -Bloom effect. Creates a soft glow around bright areas, simulating -camera lens bloom. Great for dreamy or ethereal looks. - -@param intensity float - @range 0 2 - @default 0.5 - Bloom brightness. - -@param threshold int - @range 0 255 - @default 200 - Brightness threshold for bloom (pixels above this glow). - -@param radius int - @range 1 50 - @default 15 - Blur radius for the glow. - -@param soft_threshold bool - @default true - Use soft threshold (gradual) vs hard threshold. - -@param color_tint list - @default [255, 255, 255] - Tint color for the bloom. - -@example - (effect bloom :intensity 0.7 :threshold 180) - -@example - ;; Warm bloom - (effect bloom :intensity 0.6 :color_tint [255 200 150]) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply bloom effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - intensity: bloom brightness (default 0.5) - - threshold: brightness cutoff (default 200) - - radius: blur radius (default 15) - - soft_threshold: gradual vs hard (default True) - - color_tint: RGB tint (default white) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - intensity = params.get("intensity", 0.5) - threshold = int(params.get("threshold", 200)) - radius = max(1, int(params.get("radius", 15))) - soft_threshold = params.get("soft_threshold", True) - color_tint = params.get("color_tint", [255, 255, 255]) - - if state is None: - state = {} - - if intensity <= 0: - return frame, state - - # Convert to float - result = frame.astype(np.float32) - - # Get brightness (luminance) - lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2] - - # Create bloom mask - if soft_threshold: - # Soft threshold - gradual falloff - bloom_mask = np.clip((lum - threshold) / (255 - threshold + 1e-6), 0, 1) - else: - # Hard threshold - bloom_mask = (lum > threshold).astype(np.float32) - - # Extract bright areas - bloom = result * bloom_mask[:, :, np.newaxis] - - # Apply blur to create glow - ksize = radius * 2 + 1 - bloom = cv2.GaussianBlur(bloom, (ksize, ksize), 0) - - # Apply color tint - if isinstance(color_tint, (list, tuple)) and len(color_tint) >= 3: - tint = np.array(color_tint[:3], dtype=np.float32) / 255.0 - for c in range(3): - bloom[:, :, c] *= tint[c] - - # Add bloom to original (screen blend) - result = result + bloom * intensity - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/bloom.sexp b/effects/bloom.sexp deleted file mode 100644 index 50fcfdc..0000000 --- a/effects/bloom.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Bloom effect - glow around bright areas -;; Usage: python3 plan.py effects/bloom.sexp -p intensity=0.6 -p radius=20 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; intensity: bloom strength (0.0 to 2.0, default: 0.6) -;; radius: bloom spread in pixels (5 to 50, default: 20) - -(recipe "bloom" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect bloom :path "sexp_effects/effects/bloom.sexp") - - ;; Default parameters (overridden by -p intensity=N -p radius=N) - (def intensity 0.6) - (def radius 20) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect bloom :intensity intensity :radius radius))) - result) diff --git a/effects/blur.py b/effects/blur.py deleted file mode 100644 index 49aad81..0000000 --- a/effects/blur.py +++ /dev/null @@ -1,54 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect blur -@version 1.0.0 -@author artdag - -@description -Gaussian blur effect. Softens the image by averaging neighboring pixels. -Useful for dreamy effects or reducing detail on quiet parts. - -@param radius float - @range 0 50 - @default 5 - Blur radius in pixels. Higher = more blur. Must be positive. - -@example - (effect blur :radius 10) - -@example - ;; Blur on quiet, sharp on loud - (effect blur :radius (bind energy :range [20 0])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply Gaussian blur to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - radius: blur radius in pixels (default 5) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - radius = params.get("radius", 5) - - if radius <= 0: - return frame, state - - # Kernel size must be odd - ksize = int(radius) * 2 + 1 - - result = cv2.GaussianBlur(frame, (ksize, ksize), 0) - - return result, state diff --git a/effects/blur.sexp b/effects/blur.sexp deleted file mode 100644 index 5992559..0000000 --- a/effects/blur.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Blur effect - gaussian blur -;; Usage: python3 plan.py effects/blur.sexp -p radius=10 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; radius: blur radius in pixels (1 to 50, default: 10) -;; higher = more blurry - -(recipe "blur" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect blur :path "sexp_effects/effects/blur.sexp") - - ;; Default parameter (overridden by -p radius=N) - (def radius 10) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect blur :radius radius))) - result) diff --git a/effects/brightness.py b/effects/brightness.py deleted file mode 100644 index 11a5f0c..0000000 --- a/effects/brightness.py +++ /dev/null @@ -1,52 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect brightness -@version 1.0.0 -@author artdag - -@description -Adjusts the brightness of each frame by multiplying pixel values. -Values > 1.0 brighten, < 1.0 darken. Useful for pulse effects synced to music. - -@param factor float - @range 0 3 - @default 1.0 - Brightness multiplier. 0 = black, 1 = unchanged, 2 = double brightness. - Bind to bass/energy for reactive brightness pulses. - -@example - (effect brightness :factor 1.5) - -@example - ;; Pulse brighter on bass hits - (effect brightness :factor (bind bass :range [1.0 2.0] :transform sqrt)) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Adjust brightness of a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - factor: brightness multiplier (default 1.0) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - factor = params.get("factor", 1.0) - - if factor == 1.0: - return frame, state - - # Apply brightness multiplier with clipping - result = np.clip(frame.astype(np.float32) * factor, 0, 255).astype(np.uint8) - - return result, state diff --git a/effects/brightness.sexp b/effects/brightness.sexp deleted file mode 100644 index 9ef1ccc..0000000 --- a/effects/brightness.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Brightness effect - adjust image brightness -;; Usage: python3 plan.py effects/brightness.sexp [-p amount=30] | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amount: brightness adjustment (-255 to 255, default: 30) -;; negative = darker, positive = brighter - -(recipe "brightness" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect brightness :path "sexp_effects/effects/brightness.sexp") - - ;; Default parameter (overridden by -p amount=N) - (def amount 30) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect brightness :amount amount))) - result) diff --git a/effects/cartoon.py b/effects/cartoon.py deleted file mode 100644 index de362e2..0000000 --- a/effects/cartoon.py +++ /dev/null @@ -1,117 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "scipy"] -# /// -""" -@effect cartoon -@version 1.0.0 -@author artdag - -@description -Cartoon / Cel-shaded effect. Simplifies colors into flat regions -and adds dark edge outlines for a hand-drawn cartoon appearance. - -@param detail float - @range 0.1 1.0 - @default 0.5 - Edge detection sensitivity. Higher = more edges detected. - -@param edge_thickness int - @range 1 5 - @default 1 - Outline thickness in pixels. - -@param color_levels int - @range 2 32 - @default 6 - Number of color levels per channel. - -@param edge_color list - @default [0, 0, 0] - RGB color for edges (default black). - -@param blur_size int - @range 0 10 - @default 2 - Pre-blur for smoother color regions. - -@example - (effect cartoon :detail 0.6 :color_levels 4) - -@example - ;; Thick outlines, fewer colors - (effect cartoon :edge_thickness 3 :color_levels 3 :blur_size 4) -""" - -import numpy as np -from scipy import ndimage - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply cartoon effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - detail: edge sensitivity 0.1-1.0 (default 0.5) - - edge_thickness: outline thickness (default 1) - - color_levels: posterization levels (default 6) - - edge_color: RGB tuple (default [0,0,0]) - - blur_size: pre-blur amount (default 2) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - detail = np.clip(params.get("detail", 0.5), 0.1, 1.0) - edge_thickness = max(1, min(int(params.get("edge_thickness", 1)), 5)) - color_levels = max(2, min(int(params.get("color_levels", 6)), 32)) - edge_color = params.get("edge_color", [0, 0, 0]) - blur_size = max(0, int(params.get("blur_size", 2))) - - if state is None: - state = {} - - h, w = frame.shape[:2] - result = frame.copy().astype(np.float32) - - # Step 1: Blur to reduce noise and create smoother regions - if blur_size > 0: - for c in range(3): - result[:, :, c] = ndimage.uniform_filter(result[:, :, c], size=blur_size) - - # Step 2: Posterize colors (reduce to N levels) - step = 256 / color_levels - result = (np.floor(result / step) * step).astype(np.uint8) - - # Step 3: Detect edges using Sobel - gray = np.mean(frame, axis=2).astype(np.float32) - - sobel_x = ndimage.sobel(gray, axis=1) - sobel_y = ndimage.sobel(gray, axis=0) - edges = np.sqrt(sobel_x**2 + sobel_y**2) - - # Normalize and threshold - edge_max = edges.max() - if edge_max > 0: - edges = edges / edge_max - - edge_threshold = 1.0 - detail - edge_mask = edges > edge_threshold - - # Dilate edges for thickness - if edge_thickness > 1: - struct = ndimage.generate_binary_structure(2, 1) - for _ in range(edge_thickness - 1): - edge_mask = ndimage.binary_dilation(edge_mask, structure=struct) - - # Step 4: Apply edge color - if isinstance(edge_color, (list, tuple)) and len(edge_color) >= 3: - color = np.array(edge_color[:3], dtype=np.uint8) - else: - color = np.array([0, 0, 0], dtype=np.uint8) - - result[edge_mask] = color - - return result, state diff --git a/effects/chromatic.py b/effects/chromatic.py deleted file mode 100644 index 911beab..0000000 --- a/effects/chromatic.py +++ /dev/null @@ -1,115 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect chromatic -@version 1.0.0 -@author artdag - -@description -Chromatic aberration effect. Creates color fringing by offsetting -RGB channels radially from the center (lens distortion simulation). - -@param strength float - @range 0 50 - @default 10 - Aberration strength. Bind to bass for reactive effect. - -@param center_x float - @range 0 1 - @default 0.5 - Aberration center X. - -@param center_y float - @range 0 1 - @default 0.5 - Aberration center Y. - -@param radial bool - @default true - If true, aberration increases from center to edges. - -@example - (effect chromatic :strength 20) - -@example - ;; Beat-reactive chromatic aberration - (effect chromatic :strength (bind bass :range [0 30])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply chromatic aberration effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - strength: aberration amount (default 10) - - center_x: center X 0-1 (default 0.5) - - center_y: center Y 0-1 (default 0.5) - - radial: increase from center (default True) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - strength = params.get("strength", 10) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - radial = params.get("radial", True) - - if state is None: - state = {} - - if strength == 0: - return frame, state - - h, w = frame.shape[:2] - r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2] - - if radial: - # Create distance-from-center map - y_coords, x_coords = np.ogrid[:h, :w] - cx, cy = w * center_x, h * center_y - dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2) - max_dist = np.sqrt(cx**2 + cy**2) - dist_normalized = (dist / max_dist).astype(np.float32) - - # Create coordinate maps for remapping - map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) - map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) - - # Direction from center - dx = (map_x - cx) / (dist + 1e-6) - dy = (map_y - cy) / (dist + 1e-6) - - # Apply radial offset to red channel (outward) - r_offset = strength * dist_normalized - r_map_x = (map_x + dx * r_offset).astype(np.float32) - r_map_y = (map_y + dy * r_offset).astype(np.float32) - r_shifted = cv2.remap(r, r_map_x, r_map_y, - cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) - - # Apply radial offset to blue channel (inward) - b_offset = -strength * dist_normalized - b_map_x = (map_x + dx * b_offset).astype(np.float32) - b_map_y = (map_y + dy * b_offset).astype(np.float32) - b_shifted = cv2.remap(b, b_map_x, b_map_y, - cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) - - return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state - else: - # Simple uniform offset - offset = int(strength) - M_r = np.float32([[1, 0, offset], [0, 1, 0]]) - M_b = np.float32([[1, 0, -offset], [0, 1, 0]]) - - r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE) - b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE) - - return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state diff --git a/effects/color-adjust.sexp b/effects/color-adjust.sexp deleted file mode 100644 index d55f74d..0000000 --- a/effects/color-adjust.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Color adjust effect - combined brightness and contrast -;; Usage: python3 plan.py effects/color-adjust.sexp -p brightness=20 -p contrast=1.2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; brightness: brightness adjustment (-255 to 255, default: 20) -;; contrast: contrast multiplier (0.0 to 3.0, default: 1.2) - -(recipe "color-adjust" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") - - ;; Default parameters (overridden by -p brightness=N -p contrast=N) - (def brightness 20) - (def contrast 1.2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect color-adjust :brightness brightness :contrast contrast))) - result) diff --git a/effects/color_cycle.py b/effects/color_cycle.py deleted file mode 100644 index 7483bb8..0000000 --- a/effects/color_cycle.py +++ /dev/null @@ -1,104 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect color_cycle -@version 1.0.0 -@author artdag - -@description -Color cycle effect. Shifts all hues over time creating psychedelic -rainbow cycling. Great for trippy visuals. - -@param speed float - @range 0 10 - @default 1 - Cycle speed (rotations per second). - -@param offset float - @range 0 360 - @default 0 - Initial hue offset in degrees. - -@param saturation_boost float - @range 0 2 - @default 1 - Saturation multiplier. - -@param mode string - @enum all highlights shadows midtones - @default all - Which tones to affect. - -@example - (effect color_cycle :speed 0.5) - -@example - ;; Beat-synced color shift - (effect color_cycle :offset (bind beat_position :range [0 360])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply color cycle effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - speed: rotations per second (default 1) - - offset: initial hue offset (default 0) - - saturation_boost: saturation multiplier (default 1) - - mode: which tones to affect (default all) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - speed = params.get("speed", 1) - offset = params.get("offset", 0) - saturation_boost = max(0, min(params.get("saturation_boost", 1), 2)) - mode = params.get("mode", "all") - t = params.get("_time", 0) - - if state is None: - state = {} - - # Calculate hue shift - hue_shift = int((offset + speed * t * 360) % 360) - - # Convert to HSV (OpenCV uses BGR, our frame is RGB) - frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - hsv = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV).astype(np.float32) - - if mode == "all": - # Shift all hues - hsv[:, :, 0] = (hsv[:, :, 0] + hue_shift / 2) % 180 - hsv[:, :, 1] = np.clip(hsv[:, :, 1] * saturation_boost, 0, 255) - else: - # Calculate luminance mask - lum = hsv[:, :, 2] / 255.0 - - if mode == "highlights": - mask = np.clip((lum - 0.67) * 3, 0, 1) - elif mode == "shadows": - mask = np.clip(1 - lum * 3, 0, 1) - else: # midtones - shadow_mask = np.clip(1 - lum * 3, 0, 1) - highlight_mask = np.clip((lum - 0.67) * 3, 0, 1) - mask = 1 - shadow_mask - highlight_mask - - # Apply selective hue shift - shifted_hue = (hsv[:, :, 0] + hue_shift / 2) % 180 - hsv[:, :, 0] = hsv[:, :, 0] * (1 - mask) + shifted_hue * mask - - # Convert back - hsv = np.clip(hsv, 0, 255).astype(np.uint8) - result_bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) - result = cv2.cvtColor(result_bgr, cv2.COLOR_BGR2RGB) - - return result, state diff --git a/effects/color_cycle.sexp b/effects/color_cycle.sexp deleted file mode 100644 index c3f3901..0000000 --- a/effects/color_cycle.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Color cycle effect - animated hue rotation over time -;; Usage: python3 plan.py effects/color_cycle.sexp -p speed=2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; speed: rotation speed multiplier (0.1 to 10, default: 2) -;; higher = faster color cycling - -(recipe "color_cycle" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") - - ;; Default parameter (overridden by -p speed=N) - (def speed 2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect color_cycle :speed speed))) - result) diff --git a/effects/color_grade.py b/effects/color_grade.py deleted file mode 100644 index 38ddf78..0000000 --- a/effects/color_grade.py +++ /dev/null @@ -1,139 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect color_grade -@version 1.0.0 -@author artdag - -@description -Color grading effect. Applies cinematic color adjustments including -shadows/midtones/highlights tinting, lift/gamma/gain, and temperature. - -@param shadows list - @default [0, 0, 0] - RGB tint for dark areas. - -@param midtones list - @default [0, 0, 0] - RGB tint for middle tones. - -@param highlights list - @default [0, 0, 0] - RGB tint for bright areas. - -@param lift float - @range -0.5 0.5 - @default 0 - Raise/lower shadow levels. - -@param gamma float - @range 0.5 2 - @default 1 - Midtone brightness curve. - -@param gain float - @range 0.5 2 - @default 1 - Highlight intensity. - -@param temperature float - @range -100 100 - @default 0 - Color temperature (-100 = cool/blue, +100 = warm/orange). - -@param tint float - @range -100 100 - @default 0 - Green/magenta tint (-100 = green, +100 = magenta). - -@example - (effect color_grade :temperature 30 :shadows [0 0 20]) - -@example - ;; Cinematic teal-orange look - (effect color_grade :shadows [0 10 20] :highlights [20 10 0]) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply color grading to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - shadows = params.get("shadows", [0, 0, 0]) - midtones = params.get("midtones", [0, 0, 0]) - highlights = params.get("highlights", [0, 0, 0]) - lift = params.get("lift", 0) - gamma = max(0.5, min(params.get("gamma", 1), 2)) - gain = max(0.5, min(params.get("gain", 1), 2)) - temperature = params.get("temperature", 0) - tint = params.get("tint", 0) - - if state is None: - state = {} - - result = frame.astype(np.float32) / 255.0 - - # Apply lift (shadows) - result = result + lift - - # Apply gamma (midtones) - result = np.power(np.clip(result, 0.001, 1), 1 / gamma) - - # Apply gain (highlights) - result = result * gain - - # Convert tints to float - if isinstance(shadows, (list, tuple)) and len(shadows) >= 3: - shadows = np.array(shadows[:3], dtype=np.float32) / 255.0 - else: - shadows = np.zeros(3, dtype=np.float32) - - if isinstance(midtones, (list, tuple)) and len(midtones) >= 3: - midtones = np.array(midtones[:3], dtype=np.float32) / 255.0 - else: - midtones = np.zeros(3, dtype=np.float32) - - if isinstance(highlights, (list, tuple)) and len(highlights) >= 3: - highlights = np.array(highlights[:3], dtype=np.float32) / 255.0 - else: - highlights = np.zeros(3, dtype=np.float32) - - # Calculate luminance for zone-based grading - lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2] - - # Create zone masks - shadow_mask = np.clip(1 - lum * 3, 0, 1)[:, :, np.newaxis] - highlight_mask = np.clip((lum - 0.67) * 3, 0, 1)[:, :, np.newaxis] - midtone_mask = 1 - shadow_mask - highlight_mask - - # Apply zone tints - for c in range(3): - result[:, :, c] += shadows[c] * shadow_mask[:, :, 0] - result[:, :, c] += midtones[c] * midtone_mask[:, :, 0] - result[:, :, c] += highlights[c] * highlight_mask[:, :, 0] - - # Apply temperature (blue <-> orange) - if temperature != 0: - temp_shift = temperature / 100.0 - result[:, :, 0] += temp_shift * 0.3 # Red - result[:, :, 2] -= temp_shift * 0.3 # Blue - - # Apply tint (green <-> magenta) - if tint != 0: - tint_shift = tint / 100.0 - result[:, :, 1] -= tint_shift * 0.2 # Green - - return (np.clip(result, 0, 1) * 255).astype(np.uint8), state diff --git a/effects/contrast.py b/effects/contrast.py deleted file mode 100644 index d14c497..0000000 --- a/effects/contrast.py +++ /dev/null @@ -1,52 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect contrast -@version 1.0.0 -@author artdag - -@description -Adjusts contrast by scaling pixel values around the midpoint (128). -Higher values increase contrast, lower values flatten the image. - -@param factor float - @range 0 3 - @default 1.0 - Contrast multiplier. 0.5 = low contrast, 1 = unchanged, 2 = high contrast. - -@example - (effect contrast :factor 1.5) - -@example - ;; Dramatic contrast on energy peaks - (effect contrast :factor (bind energy :range [1.0 2.0])) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Adjust contrast of a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - factor: contrast multiplier (default 1.0) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - factor = params.get("factor", 1.0) - - if factor == 1.0: - return frame, state - - # Adjust contrast around midpoint (128) - img_float = frame.astype(np.float32) - result = 128 + factor * (img_float - 128) - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/contrast.sexp b/effects/contrast.sexp deleted file mode 100644 index 2c4a2ef..0000000 --- a/effects/contrast.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Contrast effect - adjust image contrast -;; Usage: python3 plan.py effects/contrast.sexp -p amount=1.5 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amount: contrast multiplier (0.0 to 3.0, default: 1.5) -;; <1 = less contrast, >1 = more contrast - -(recipe "contrast" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect contrast :path "sexp_effects/effects/contrast.sexp") - - ;; Default parameter (overridden by -p amount=N) - (def amount 1.5) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect contrast :amount amount))) - result) diff --git a/effects/crt.py b/effects/crt.py deleted file mode 100644 index 5690c05..0000000 --- a/effects/crt.py +++ /dev/null @@ -1,193 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect crt -@version 1.0.0 -@author artdag - -@description -CRT / Scanlines effect. Simulates CRT monitor aesthetics with visible -scan lines, optional RGB subpixels, barrel distortion, and vignette. - -@param line_spacing int - @range 1 10 - @default 2 - Pixels between scanlines. - -@param line_opacity float - @range 0 1 - @default 0.3 - Darkness of scanlines. - -@param rgb_subpixels bool - @default false - Show RGB subpixel pattern. - -@param curvature float - @range 0 0.5 - @default 0 - Barrel distortion amount for curved screen look. - -@param vignette float - @range 0 1 - @default 0 - Dark corners effect. - -@param bloom float - @range 0 1 - @default 0 - Glow/blur on bright areas. - -@param flicker float - @range 0 0.3 - @default 0 - Brightness variation. - -@param seed int - @default 42 - Random seed for flicker. - -@state rng DeterministicRNG - Random number generator for flicker. - -@example - (effect crt :line_spacing 2 :line_opacity 0.4) - -@example - ;; Full retro CRT look - (effect crt :curvature 0.2 :vignette 0.3 :rgb_subpixels true :bloom 0.2) -""" - -import numpy as np -import cv2 -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply CRT effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - line_spacing = max(1, int(params.get("line_spacing", 2))) - line_opacity = params.get("line_opacity", 0.3) - rgb_subpixels = params.get("rgb_subpixels", False) - curvature = params.get("curvature", 0) - vignette = params.get("vignette", 0) - bloom = params.get("bloom", 0) - flicker = params.get("flicker", 0) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - rng = state["rng"] - - h, w = frame.shape[:2] - result = frame.astype(np.float32).copy() - - # Apply barrel distortion (curvature) - if curvature > 0: - result = _apply_curvature(result, curvature) - - # Apply bloom (glow on bright areas) - if bloom > 0: - result = _apply_bloom(result, bloom) - - # Apply scanlines - if line_opacity > 0: - for y in range(0, h, line_spacing): - result[y, :] = result[y, :] * (1 - line_opacity) - - # Apply RGB subpixel pattern - if rgb_subpixels: - for x in range(w): - col_type = x % 3 - if col_type == 0: - result[:, x, 0] *= 1.2 - result[:, x, 1] *= 0.8 - result[:, x, 2] *= 0.8 - elif col_type == 1: - result[:, x, 0] *= 0.8 - result[:, x, 1] *= 1.2 - result[:, x, 2] *= 0.8 - else: - result[:, x, 0] *= 0.8 - result[:, x, 1] *= 0.8 - result[:, x, 2] *= 1.2 - - # Apply vignette - if vignette > 0: - y_coords, x_coords = np.ogrid[:h, :w] - center_x, center_y = w / 2, h / 2 - dist = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2) - max_dist = np.sqrt(center_x**2 + center_y**2) - vignette_mask = 1 - (dist / max_dist) * vignette - vignette_mask = np.clip(vignette_mask, 0, 1) - result = result * vignette_mask[:, :, np.newaxis] - - # Apply flicker - if flicker > 0: - flicker_amount = 1.0 + rng.uniform(-flicker, flicker) - result = result * flicker_amount - - return np.clip(result, 0, 255).astype(np.uint8), state - - -def _apply_curvature(frame: np.ndarray, strength: float) -> np.ndarray: - """Apply barrel distortion.""" - h, w = frame.shape[:2] - - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) - - # Normalize to -1 to 1 - x_norm = (x_coords - w / 2) / (w / 2) - y_norm = (y_coords - h / 2) / (h / 2) - - # Calculate radius - r = np.sqrt(x_norm**2 + y_norm**2) - - # Apply barrel distortion - r_distorted = r * (1 + strength * r**2) - - # Scale factor - scale = np.where(r > 0, r_distorted / r, 1) - - # New coordinates - new_x = (x_norm * scale * (w / 2) + w / 2).astype(np.float32) - new_y = (y_norm * scale * (h / 2) + h / 2).astype(np.float32) - - result = cv2.remap(frame.astype(np.uint8), new_x, new_y, - cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, - borderValue=(0, 0, 0)) - return result.astype(np.float32) - - -def _apply_bloom(frame: np.ndarray, strength: float) -> np.ndarray: - """Apply bloom (glow on bright areas).""" - gray = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2GRAY) - _, bright = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) - - bloom = cv2.GaussianBlur(bright, (21, 21), 0) - bloom = cv2.cvtColor(bloom, cv2.COLOR_GRAY2RGB) - - result = frame + bloom.astype(np.float32) * strength * 0.5 - return result diff --git a/effects/crt.sexp b/effects/crt.sexp deleted file mode 100644 index 6876a44..0000000 --- a/effects/crt.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; CRT effect - old TV/monitor look with scanlines and vignette -;; Usage: python3 plan.py effects/crt.sexp -p line_spacing=3 -p vignette_amount=0.3 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; line_spacing: pixels between scanlines (1 to 8, default: 3) -;; vignette_amount: edge darkening strength (0 to 1, default: 0.3) - -(recipe "crt" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect crt :path "sexp_effects/effects/crt.sexp") - - ;; Default parameters (overridden by -p line_spacing=N -p vignette_amount=N) - (def line_spacing 3) - (def vignette_amount 0.3) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect crt :line_spacing line_spacing :vignette_amount vignette_amount))) - result) diff --git a/effects/datamosh.py b/effects/datamosh.py deleted file mode 100644 index 2e638cb..0000000 --- a/effects/datamosh.py +++ /dev/null @@ -1,141 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect datamosh -@version 1.0.0 -@author artdag - -@description -Digital corruption / glitch block effect. Randomly corrupts rectangular -blocks by shifting, swapping, or duplicating from previous frames. -Simulates video compression artifacts. - -@param block_size int - @range 8 128 - @default 32 - Size of corruption blocks in pixels. - -@param corruption float - @range 0 1 - @default 0.3 - Probability of corrupting each block. Bind to energy for reactive glitch. - -@param max_offset int - @range 0 200 - @default 50 - Maximum pixel offset when shifting blocks. - -@param color_corrupt bool - @default true - Also apply color channel shifts to blocks. - -@param seed int - @default 42 - Random seed for deterministic glitch patterns. - -@state previous_frame ndarray - Stores previous frame for frame-blending corruption. - -@state rng DeterministicRNG - Random number generator for reproducible results. - -@example - (effect datamosh :corruption 0.4) - -@example - ;; Heavy glitch on energy peaks - (effect datamosh :corruption (bind energy :range [0 0.8]) :block_size 16) - -@example - ;; Reproducible glitch with seed - (effect datamosh :corruption 0.5 :seed 12345) -""" - -import numpy as np -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply datamosh/glitch block effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - block_size: corruption block size (default 32) - - corruption: probability 0-1 (default 0.3) - - max_offset: max shift in pixels (default 50) - - color_corrupt: apply color shifts (default True) - state: Persistent state dict - - previous_frame: last frame for duplication effect - - Returns: - Tuple of (processed_frame, new_state) - """ - block_size = max(8, min(int(params.get("block_size", 32)), 128)) - corruption = max(0, min(params.get("corruption", 0.3), 1)) - max_offset = int(params.get("max_offset", 50)) - color_corrupt = params.get("color_corrupt", True) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - rng = state["rng"] - - if corruption == 0: - state["previous_frame"] = frame.copy() - return frame, state - - h, w = frame.shape[:2] - result = frame.copy() - prev_frame = state.get("previous_frame") - - # Process blocks - for by in range(0, h, block_size): - for bx in range(0, w, block_size): - bh = min(block_size, h - by) - bw = min(block_size, w - bx) - - if rng.uniform() < corruption: - corruption_type = rng.choice(["shift", "duplicate", "color", "swap"]) - - if corruption_type == "shift" and max_offset > 0: - ox = rng.randint(-max_offset, max_offset) - oy = rng.randint(-max_offset, max_offset) - src_x = max(0, min(bx + ox, w - bw)) - src_y = max(0, min(by + oy, h - bh)) - result[by:by+bh, bx:bx+bw] = frame[src_y:src_y+bh, src_x:src_x+bw] - - elif corruption_type == "duplicate" and prev_frame is not None: - if prev_frame.shape == frame.shape: - result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw] - - elif corruption_type == "color" and color_corrupt: - block = result[by:by+bh, bx:bx+bw].copy() - shift = rng.randint(1, 3) - channel = rng.randint(0, 2) - block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0) - result[by:by+bh, bx:bx+bw] = block - - elif corruption_type == "swap": - other_bx = rng.randint(0, max(0, w - bw - 1)) - other_by = rng.randint(0, max(0, h - bh - 1)) - temp = result[by:by+bh, bx:bx+bw].copy() - result[by:by+bh, bx:bx+bw] = frame[other_by:other_by+bh, other_bx:other_bx+bw] - result[other_by:other_by+bh, other_bx:other_bx+bw] = temp - - state["previous_frame"] = frame.copy() - return result, state diff --git a/effects/datamosh.sexp b/effects/datamosh.sexp deleted file mode 100644 index 14f9ade..0000000 --- a/effects/datamosh.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Datamosh effect - glitch art block corruption -;; Usage: python3 plan.py effects/datamosh.sexp -p corruption=0.5 -p block_size=24 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; corruption: corruption intensity (0.1 to 1.0, default: 0.5) -;; block_size: size of corrupted blocks (8 to 64, default: 24) - -(recipe "datamosh" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect datamosh :path "sexp_effects/effects/datamosh.sexp") - - ;; Default parameters (overridden by -p corruption=N -p block_size=N) - (def corruption 0.5) - (def block_size 24) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect datamosh :corruption corruption :block_size block_size))) - result) diff --git a/effects/displacement.py b/effects/displacement.py deleted file mode 100644 index 32251b4..0000000 --- a/effects/displacement.py +++ /dev/null @@ -1,170 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect displacement -@version 1.0.0 -@author artdag - -@description -Displacement effect. Warps the image based on a pattern (sine waves, -noise, or radial). Creates flowing, liquid-like distortions. - -@param amount float - @range 0 100 - @default 20 - Displacement strength in pixels. - -@param pattern string - @enum sine noise radial turbulence - @default sine - Displacement pattern type: - - sine: smooth sine wave - - noise: random displacement - - radial: swirl from center - - turbulence: multi-scale noise - -@param frequency float - @range 1 50 - @default 10 - Pattern frequency (waves per frame width). - -@param speed float - @range 0 10 - @default 1 - Animation speed. - -@param direction string - @enum horizontal vertical both - @default both - Displacement direction. - -@param seed int - @default 42 - Random seed for noise patterns. - -@state rng DeterministicRNG - Random number generator. - -@example - (effect displacement :amount 30 :pattern "sine" :frequency 5) - -@example - ;; Reactive turbulence - (effect displacement :amount (bind energy :range [10 50]) :pattern "turbulence") -""" - -import numpy as np -import cv2 -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply displacement effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - amount = params.get("amount", 20) - pattern = params.get("pattern", "sine") - frequency = max(1, params.get("frequency", 10)) - speed = params.get("speed", 1) - direction = params.get("direction", "both") - seed = int(params.get("seed", 42)) - t = params.get("_time", 0) - - if state is None: - state = {} - - if amount == 0: - return frame, state - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - - h, w = frame.shape[:2] - - # Create base coordinate maps - map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) - map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) - - # Generate displacement based on pattern - if pattern == "sine": - # Sine wave displacement - phase = t * speed * 2 * np.pi - if direction in ["horizontal", "both"]: - map_x = map_x + amount * np.sin(2 * np.pi * map_y / h * frequency + phase) - if direction in ["vertical", "both"]: - map_y = map_y + amount * np.sin(2 * np.pi * map_x / w * frequency + phase) - - elif pattern == "noise": - # Generate noise displacement - rng = state["rng"] - if "noise_x" not in state or state.get("noise_size") != (h, w): - state["noise_x"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32) - state["noise_y"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32) - state["noise_size"] = (h, w) - - if direction in ["horizontal", "both"]: - map_x = map_x + amount * state["noise_x"] - if direction in ["vertical", "both"]: - map_y = map_y + amount * state["noise_y"] - - elif pattern == "radial": - # Radial/swirl displacement - cx, cy = w / 2, h / 2 - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) - dx = x_coords - cx - dy = y_coords - cy - dist = np.sqrt(dx**2 + dy**2) + 1e-6 - angle = np.arctan2(dy, dx) - - # Swirl amount varies with distance and time - swirl = amount * 0.01 * np.sin(dist / (w / frequency) + t * speed * 2 * np.pi) - - new_angle = angle + swirl - if direction in ["horizontal", "both"]: - map_x = cx + dist * np.cos(new_angle) - if direction in ["vertical", "both"]: - map_y = cy + dist * np.sin(new_angle) - - elif pattern == "turbulence": - # Multi-scale noise - rng = state["rng"] - disp_x = np.zeros((h, w), dtype=np.float32) - disp_y = np.zeros((h, w), dtype=np.float32) - - for scale in [1, 2, 4]: - sh, sw = h // scale, w // scale - noise_x = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32) - noise_y = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32) - if scale > 1: - noise_x = cv2.resize(noise_x, (w, h)) - noise_y = cv2.resize(noise_y, (w, h)) - disp_x += noise_x / scale - disp_y += noise_y / scale - - if direction in ["horizontal", "both"]: - map_x = map_x + amount * disp_x - if direction in ["vertical", "both"]: - map_y = map_y + amount * disp_y - - # Apply remapping - result = cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) - - return result, state diff --git a/effects/echo.py b/effects/echo.py deleted file mode 100644 index 8a144de..0000000 --- a/effects/echo.py +++ /dev/null @@ -1,139 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect echo -@version 1.0.0 -@author artdag - -@description -Motion trail / echo effect. Blends current frame with previous frames -to create ghosting/trailing effects. Great for fast movement scenes. - -Uses a frame buffer in state to store recent frames for blending. - -@param num_echoes int - @range 1 20 - @default 4 - Number of trailing frames to blend. - -@param decay float - @range 0 1 - @default 0.5 - Opacity ratio between successive echoes. 0.5 = each echo half as bright. - -@param blend_mode string - @enum blend add screen maximum - @default blend - How to combine echoes: - - blend: weighted average - - add: sum (can overexpose) - - screen: like add but resists overexposure - - maximum: brightest pixel wins - -@state frame_buffer list - Circular buffer of recent frames. - -@example - (effect echo :num_echoes 6 :decay 0.6) - -@example - ;; More echoes on energy - (effect echo :num_echoes (bind energy :range [2 10])) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply echo/motion trail effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - num_echoes: number of trailing frames (default 4) - - decay: opacity decay ratio (default 0.5) - - blend_mode: blend/add/screen/maximum (default blend) - state: Persistent state dict - - frame_buffer: list of recent frames - - Returns: - Tuple of (processed_frame, new_state) - """ - num_echoes = max(1, min(int(params.get("num_echoes", 4)), 20)) - decay = max(0, min(params.get("decay", 0.5), 1)) - blend_mode = params.get("blend_mode", "blend") - - if state is None: - state = {} - - # Initialize frame buffer - if "frame_buffer" not in state: - state["frame_buffer"] = [] - - buffer = state["frame_buffer"] - - # Add current frame to buffer - buffer.append(frame.copy()) - - # Limit buffer size - max_buffer = num_echoes + 5 - while len(buffer) > max_buffer: - buffer.pop(0) - - # Collect frames and intensities for blending - frames = [] - intensities = [] - intensity = 1.0 - - # Current frame first, then older frames - for i in range(min(num_echoes + 1, len(buffer))): - idx = len(buffer) - 1 - i - if idx >= 0: - frames.append(buffer[idx].astype(np.float32)) - intensities.append(intensity) - intensity *= decay - - if not frames: - return frame, state - - # Blend frames according to mode - result = _blend_frames(frames, intensities, blend_mode) - - return np.clip(result, 0, 255).astype(np.uint8), state - - -def _blend_frames(frames, intensities, blend_mode): - """Blend multiple frames according to blend mode.""" - if not frames: - return frames[0] - - if blend_mode == "add": - result = np.zeros_like(frames[0]) - for frame, intensity in zip(frames, intensities): - result += frame * intensity - return result - - elif blend_mode == "screen": - result = np.zeros_like(frames[0]) - for frame, intensity in zip(frames, intensities): - weighted = (frame / 255.0) * intensity - result = 255 * (1 - (1 - result / 255.0) * (1 - weighted)) - return result - - elif blend_mode == "maximum": - result = frames[0] * intensities[0] - for frame, intensity in zip(frames[1:], intensities[1:]): - result = np.maximum(result, frame * intensity) - return result - - else: # blend - weighted average - total = sum(intensities) - if total == 0: - return frames[0] - result = np.zeros_like(frames[0]) - for frame, intensity in zip(frames, intensities): - result += frame * (intensity / total) - return result diff --git a/effects/echo.sexp b/effects/echo.sexp deleted file mode 100644 index 2cee146..0000000 --- a/effects/echo.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Echo effect - temporal ghosting/trails -;; Usage: python3 plan.py effects/echo.sexp -p num_echoes=4 -p decay=0.5 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; num_echoes: number of ghost frames (1 to 10, default: 4) -;; decay: fade rate per echo (0.2 to 0.9, default: 0.5) - -(recipe "echo" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect echo :path "sexp_effects/effects/echo.sexp") - - ;; Default parameters (overridden by -p num_echoes=N -p decay=N) - (def num_echoes 4) - (def decay 0.5) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect echo :num_echoes num_echoes :decay decay))) - result) diff --git a/effects/edge_detect.sexp b/effects/edge_detect.sexp deleted file mode 100644 index a1d6743..0000000 --- a/effects/edge_detect.sexp +++ /dev/null @@ -1,22 +0,0 @@ -;; Edge detect effect - Canny edge detection -;; Usage: python3 plan.py effects/edge_detect.sexp -p low=50 -p high=150 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; low: lower threshold (10 to 100, default: 50) -;; high: upper threshold (100 to 300, default: 150) -;; edges between low-high are kept if connected to strong edges - -(recipe "edge_detect" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") - - ;; Default parameters (overridden by -p low=N -p high=N) - (def low 50) - (def high 150) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect edge_detect :low low :high high))) - result) diff --git a/effects/emboss.py b/effects/emboss.py deleted file mode 100644 index 6900307..0000000 --- a/effects/emboss.py +++ /dev/null @@ -1,88 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect emboss -@version 1.0.0 -@author artdag - -@description -Emboss / relief effect. Creates a 3D raised appearance by highlighting -edges from a simulated light direction. Great for sculptural looks. - -@param strength float - @range 0.5 3 - @default 1.0 - Emboss intensity. - -@param direction float - @range 0 360 - @default 135 - Light direction in degrees. Bind to beat for rotating light. - -@param blend float - @range 0 1 - @default 0.3 - Blend with original (0 = full emboss, 1 = original). - -@example - (effect emboss :strength 1.5) - -@example - ;; Rotating light direction - (effect emboss :direction (bind beat_position :range [0 360])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply emboss effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - strength: emboss intensity (default 1.0) - - direction: light angle in degrees (default 135) - - blend: mix with original (default 0.3) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - strength = params.get("strength", 1.0) - direction = params.get("direction", 135) - blend = params.get("blend", 0.3) - - # Calculate kernel based on direction - angle_rad = np.deg2rad(direction) - dx = np.cos(angle_rad) - dy = np.sin(angle_rad) - - # Create emboss kernel - kernel = np.array([ - [-strength * dy - strength * dx, -strength * dy, -strength * dy + strength * dx], - [-strength * dx, 1, strength * dx], - [strength * dy - strength * dx, strength * dy, strength * dy + strength * dx] - ], dtype=np.float32) - - # Apply to grayscale - gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) - embossed = cv2.filter2D(gray, -1, kernel) - - # Normalize - embossed = embossed + 128 - embossed = np.clip(embossed, 0, 255) - - # Convert to RGB - embossed_rgb = cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2RGB) - - # Blend with original - if blend > 0: - result = frame.astype(np.float32) * blend + embossed_rgb.astype(np.float32) * (1 - blend) - return np.clip(result, 0, 255).astype(np.uint8), state - - return embossed_rgb, state diff --git a/effects/emboss.sexp b/effects/emboss.sexp deleted file mode 100644 index 81a3f74..0000000 --- a/effects/emboss.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Emboss effect - 3D raised surface look -;; Usage: python3 plan.py effects/emboss.sexp -p strength=1.5 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; strength: emboss intensity (0.5 to 3.0, default: 1.5) - -(recipe "emboss" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect emboss :path "sexp_effects/effects/emboss.sexp") - - ;; Default parameter (overridden by -p strength=N) - (def strength 1.5) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect emboss :strength strength))) - result) diff --git a/effects/film_grain.py b/effects/film_grain.py deleted file mode 100644 index 445358b..0000000 --- a/effects/film_grain.py +++ /dev/null @@ -1,148 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect film_grain -@version 1.0.0 -@author artdag - -@description -Film grain / Noise effect. Adds realistic film grain texture. -Great for vintage aesthetics and subtle texture. - -@param intensity float - @range 0 1 - @default 0.2 - Noise intensity. Bind to energy for reactive grain. - -@param grain_size float - @range 0.5 5 - @default 1.0 - Size of grain particles. Larger = coarser grain. - -@param colored bool - @default false - Use colored noise instead of monochrome. - -@param temporal_variation float - @range 0 1 - @default 1.0 - How much grain changes frame-to-frame. 0 = static, 1 = full animation. - -@param seed int - @default 42 - Random seed for reproducible grain. - -@state rng DeterministicRNG - Random number generator for consistent grain. - -@example - (effect film_grain :intensity 0.3) - -@example - ;; Colored grain, reactive to energy - (effect film_grain :intensity (bind energy :range [0.1 0.5]) :colored true) -""" - -import numpy as np -import cv2 -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply film grain effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - intensity: noise intensity 0-1 (default 0.2) - - grain_size: grain particle size (default 1.0) - - colored: use colored noise (default False) - - temporal_variation: grain animation 0-1 (default 1.0) - - seed: random seed (default 42) - state: Persistent state dict - - rng: DeterministicRNG instance - - frame_count: frame counter - - Returns: - Tuple of (processed_frame, new_state) - """ - intensity = params.get("intensity", 0.2) - grain_size = max(0.5, params.get("grain_size", 1.0)) - colored = params.get("colored", False) - temporal_var = params.get("temporal_variation", 1.0) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - if intensity <= 0: - return frame, state - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - state["frame_count"] = 0 - state["static_noise"] = None - - rng = state["rng"] - frame_count = state["frame_count"] - - h, w = frame.shape[:2] - - # Calculate noise dimensions - if grain_size > 1: - noise_h = max(1, int(h / grain_size)) - noise_w = max(1, int(w / grain_size)) - else: - noise_h, noise_w = h, w - - # Generate noise based on temporal variation - if temporal_var >= 1.0 or state.get("static_noise") is None: - # Generate new noise - if colored: - noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)] - for _ in range(noise_w)] - for _ in range(noise_h)]) * intensity * 50 - else: - noise_2d = np.array([[rng.gaussian(0, 1) - for _ in range(noise_w)] - for _ in range(noise_h)]) * intensity * 50 - noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) - - if temporal_var < 1.0: - state["static_noise"] = noise - else: - # Blend static and new noise - static = state["static_noise"] - if colored: - new_noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)] - for _ in range(noise_w)] - for _ in range(noise_h)]) * intensity * 50 - else: - noise_2d = np.array([[rng.gaussian(0, 1) - for _ in range(noise_w)] - for _ in range(noise_h)]) * intensity * 50 - new_noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) - - noise = static * (1 - temporal_var) + new_noise * temporal_var - - # Scale noise up if using larger grain - if grain_size > 1: - noise = cv2.resize(noise.astype(np.float32), (w, h), interpolation=cv2.INTER_LINEAR) - - # Add noise to frame - result = frame.astype(np.float32) + noise - - state["frame_count"] = frame_count + 1 - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/film_grain.sexp b/effects/film_grain.sexp deleted file mode 100644 index 24984b8..0000000 --- a/effects/film_grain.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Film grain effect - analog film noise -;; Usage: python3 plan.py effects/film_grain.sexp -p intensity=0.25 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; intensity: grain amount (0.05 to 0.5, default: 0.25) - -(recipe "film_grain" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect film_grain :path "sexp_effects/effects/film_grain.sexp") - - ;; Default parameter (overridden by -p intensity=N) - (def intensity 0.25) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect film_grain :intensity intensity))) - result) diff --git a/effects/fisheye.py b/effects/fisheye.py deleted file mode 100644 index a785598..0000000 --- a/effects/fisheye.py +++ /dev/null @@ -1,116 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect fisheye -@version 1.0.0 -@author artdag - -@description -Barrel/fisheye lens distortion. Positive values bulge outward (fisheye), -negative values pinch inward (pincushion). Great for emphasis effects. - -@param strength float - @range -1 1 - @default 0.3 - Distortion strength. Positive = fisheye bulge, negative = pincushion pinch. - -@param center_x float - @range 0 1 - @default 0.5 - Horizontal center of distortion (0 = left, 1 = right). - -@param center_y float - @range 0 1 - @default 0.5 - Vertical center of distortion (0 = top, 1 = bottom). - -@param zoom_correction bool - @default true - Automatically zoom to hide black edges on fisheye. - -@example - (effect fisheye :strength 0.5) - -@example - ;; Pulse fisheye on bass - (effect fisheye :strength (bind bass :range [0 0.8] :transform sqrt)) - -@example - ;; Pincushion effect - (effect fisheye :strength -0.3) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply fisheye/barrel distortion to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - strength: distortion strength -1 to 1 (default 0.3) - - center_x: horizontal center 0-1 (default 0.5) - - center_y: vertical center 0-1 (default 0.5) - - zoom_correction: auto-zoom for fisheye (default True) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - strength = params.get("strength", 0.3) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - zoom_correction = params.get("zoom_correction", True) - - if strength == 0: - return frame, state - - h, w = frame.shape[:2] - - # Calculate center in pixels - cx = w * center_x - cy = h * center_y - - # Create coordinate grids - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) - - # Normalize coordinates relative to center - x_norm = (x_coords - cx) / (w / 2) - y_norm = (y_coords - cy) / (h / 2) - - # Calculate radius from center - r = np.sqrt(x_norm**2 + y_norm**2) - - # Apply barrel/pincushion distortion - if strength > 0: - # Barrel distortion (fisheye) - r_distorted = r * (1 + strength * r**2) - else: - # Pincushion distortion - r_distorted = r / (1 - strength * r**2 + 0.001) - - # Calculate scale factor - scale = np.where(r > 0, r_distorted / r, 1) - - # Apply zoom correction to hide black edges - if zoom_correction and strength > 0: - zoom = 1 + strength * 0.5 - scale = scale / zoom - - # Calculate new coordinates - new_x = (x_norm * scale * (w / 2) + cx).astype(np.float32) - new_y = (y_norm * scale * (h / 2) + cy).astype(np.float32) - - # Remap - result = cv2.remap( - frame, new_x, new_y, - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_REFLECT - ) - - return result, state diff --git a/effects/fisheye.sexp b/effects/fisheye.sexp deleted file mode 100644 index 13accae..0000000 --- a/effects/fisheye.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Fisheye effect - barrel/pincushion distortion -;; Usage: python3 plan.py effects/fisheye.sexp -p strength=0.5 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; strength: distortion amount (-1.0 to 1.0, default: 0.5) -;; positive = barrel (bulge), negative = pincushion - -(recipe "fisheye" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect fisheye :path "sexp_effects/effects/fisheye.sexp") - - ;; Default parameter (overridden by -p strength=N) - (def strength 0.5) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect fisheye :strength strength))) - result) diff --git a/effects/flip.py b/effects/flip.py deleted file mode 100644 index 57c08f7..0000000 --- a/effects/flip.py +++ /dev/null @@ -1,64 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect flip -@version 1.0.0 -@author artdag - -@description -Flips the frame horizontally and/or vertically. -Can be used for mirror effects or beat-triggered flips. - -@param horizontal bool - @default false - Flip horizontally (left-right mirror). - -@param vertical bool - @default false - Flip vertically (top-bottom mirror). - -@example - (effect flip :horizontal true) - -@example - (effect flip :vertical true) - -@example - (effect flip :horizontal true :vertical true) ; 180 degree rotation -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Flip a video frame horizontally and/or vertically. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - horizontal: flip left-right (default False) - - vertical: flip top-bottom (default False) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - horizontal = params.get("horizontal", False) - vertical = params.get("vertical", False) - - result = frame - - if horizontal: - result = np.flip(result, axis=1) - - if vertical: - result = np.flip(result, axis=0) - - # Ensure contiguous array after flips - if horizontal or vertical: - result = np.ascontiguousarray(result) - - return result, state diff --git a/effects/flip.sexp b/effects/flip.sexp deleted file mode 100644 index 9aac79a..0000000 --- a/effects/flip.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Flip effect - flip image horizontally or vertically -;; Usage: python3 plan.py effects/flip.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; direction: flip axis ("horizontal" or "vertical", default: "horizontal") - -(recipe "flip" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect flip :path "sexp_effects/effects/flip.sexp") - - ;; Default parameter (overridden by -p direction=horizontal|vertical) - (def direction "horizontal") - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect flip :direction direction))) - result) diff --git a/effects/grayscale.sexp b/effects/grayscale.sexp deleted file mode 100644 index e14bf10..0000000 --- a/effects/grayscale.sexp +++ /dev/null @@ -1,14 +0,0 @@ -;; Grayscale effect - converts to black and white -;; Usage: python3 plan.py effects/grayscale.sexp | python3 execute.py - -d . -o output.mp4 -;; No parameters - -(recipe "grayscale" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect grayscale :path "sexp_effects/effects/grayscale.sexp") - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect grayscale))) - result) diff --git a/effects/hue_shift.py b/effects/hue_shift.py deleted file mode 100644 index db35929..0000000 --- a/effects/hue_shift.py +++ /dev/null @@ -1,59 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect hue_shift -@version 1.0.0 -@author artdag - -@description -Rotates the hue of all colors by a specified amount (0-360 degrees). -Creates rainbow cycling effects when animated over time. - -@param shift float - @range 0 360 - @default 0 - Hue rotation in degrees. 0/360 = unchanged, 180 = complementary colors. - -@example - (effect hue_shift :shift 180) ; complementary colors - -@example - ;; Rainbow cycling based on time (use with frame counter in state) - (effect hue_shift :shift (bind beat_position :range [0 360])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Shift hue of a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - shift: hue rotation in degrees (default 0) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - shift = params.get("shift", 0) - - if shift == 0: - return frame, state - - # Convert RGB to HSV (OpenCV uses H: 0-179, S: 0-255, V: 0-255) - hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) - - # Shift hue (OpenCV hue is 0-179, so divide by 2) - hue_shift = int((shift % 360) / 2) - hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int16) + hue_shift) % 180 - - # Convert back to RGB - result = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) - - return result, state diff --git a/effects/hue_shift.sexp b/effects/hue_shift.sexp deleted file mode 100644 index df1df59..0000000 --- a/effects/hue_shift.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Hue shift effect - rotate colors around color wheel -;; Usage: python3 plan.py effects/hue_shift.sexp -p degrees=90 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; degrees: hue rotation in degrees (0 to 360, default: 90) -;; 90 = quarter rotation, 180 = complementary colors - -(recipe "hue_shift" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") - - ;; Default parameter (overridden by -p degrees=N) - (def degrees 90) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect hue_shift :degrees degrees))) - result) diff --git a/effects/invert.py b/effects/invert.py deleted file mode 100644 index a92db36..0000000 --- a/effects/invert.py +++ /dev/null @@ -1,62 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect invert -@version 1.0.0 -@author artdag - -@description -Inverts the colors of each frame. Can be partial (blended with original) -based on intensity parameter. Useful for beat-reactive flashing effects. - -@param intensity float - @range 0 100 - @default 100 - Intensity of the inversion effect (0 = original, 100 = fully inverted). - Bind to audio analysis for reactive effects. - -@example - (effect invert) - -@example - (effect invert :intensity 50) - -@example - ;; Beat-reactive inversion - (effect invert :intensity (bind bass :range [0 100] :transform sqrt)) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Invert colors of a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - intensity: 0-100, how much to invert (default 100) - state: Persistent state dict (unused for this effect) - - Returns: - Tuple of (processed_frame, new_state) - """ - intensity = params.get("intensity", 100) - - # Normalize intensity to 0-1 - t = intensity / 100.0 - - if t <= 0: - return frame, state - - if t >= 1: - return (255 - frame), state - - # Partial inversion: blend between original and inverted - inverted = 255 - frame - result = (frame * (1 - t) + inverted * t).astype(np.uint8) - - return result, state diff --git a/effects/invert.sexp b/effects/invert.sexp deleted file mode 100644 index a355967..0000000 --- a/effects/invert.sexp +++ /dev/null @@ -1,14 +0,0 @@ -;; Invert effect - inverts all colors -;; Usage: python3 plan.py effects/invert.sexp | python3 execute.py - -d . -o output.mp4 -;; No parameters - -(recipe "invert" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect invert :path "sexp_effects/effects/invert.sexp") - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect invert))) - result) diff --git a/effects/kaleidoscope.py b/effects/kaleidoscope.py deleted file mode 100644 index d998dd8..0000000 --- a/effects/kaleidoscope.py +++ /dev/null @@ -1,137 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect kaleidoscope -@version 1.0.0 -@author artdag - -@description -Kaleidoscope effect. Creates mesmerizing mandala-like patterns by -dividing the frame into pie-slice segments and reflecting them. -Great for psychedelic visuals. - -@param segments int - @range 3 16 - @default 6 - Number of symmetry segments. - -@param rotation float - @range 0 360 - @default 0 - Base rotation angle in degrees. - -@param rotation_speed float - @range -180 180 - @default 0 - Continuous rotation speed in degrees/second. - -@param center_x float - @range 0 1 - @default 0.5 - Center X position (0-1). - -@param center_y float - @range 0 1 - @default 0.5 - Center Y position (0-1). - -@param zoom float - @range 0.5 3.0 - @default 1.0 - Zoom factor for the source region. - -@state cumulative_rotation float - Tracks rotation over time. - -@example - (effect kaleidoscope :segments 8 :rotation_speed 30) - -@example - ;; Beat-reactive segments - (effect kaleidoscope :segments (bind bass :range [4 12]) :zoom 1.5) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply kaleidoscope effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - segments: number of segments 3-16 (default 6) - - rotation: base rotation degrees (default 0) - - rotation_speed: degrees per second (default 0) - - center_x: center X 0-1 (default 0.5) - - center_y: center Y 0-1 (default 0.5) - - zoom: zoom factor 0.5-3 (default 1.0) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - segments = max(3, min(int(params.get("segments", 6)), 16)) - rotation = params.get("rotation", 0) - rotation_speed = params.get("rotation_speed", 0) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - zoom = max(0.5, min(params.get("zoom", 1.0), 3.0)) - - # Get time for animation - t = params.get("_time", 0) - - if state is None: - state = {} - - h, w = frame.shape[:2] - - # Calculate center in pixels - cx = int(w * center_x) - cy = int(h * center_y) - - # Total rotation including time-based animation - total_rotation = rotation + rotation_speed * t - - # Calculate the angle per segment - segment_angle = 2 * np.pi / segments - - # Create coordinate maps - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) - - # Translate to center - x_centered = x_coords - cx - y_centered = y_coords - cy - - # Convert to polar coordinates - r = np.sqrt(x_centered**2 + y_centered**2) - theta = np.arctan2(y_centered, x_centered) - - # Apply rotation - theta = theta - np.deg2rad(total_rotation) - - # Fold angle into first segment and mirror - theta_normalized = theta % (2 * np.pi) - segment_idx = (theta_normalized / segment_angle).astype(int) - theta_in_segment = theta_normalized - segment_idx * segment_angle - - # Mirror alternating segments - mirror_mask = (segment_idx % 2) == 1 - theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment) - - # Apply zoom - r = r / zoom - - # Convert back to Cartesian (source coordinates) - src_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32) - src_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32) - - # Remap - result = cv2.remap(frame, src_x, src_y, - cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) - - return result, state diff --git a/effects/kaleidoscope.sexp b/effects/kaleidoscope.sexp deleted file mode 100644 index 7c039bc..0000000 --- a/effects/kaleidoscope.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Kaleidoscope effect - radial mirror segments -;; Usage: python3 plan.py effects/kaleidoscope.sexp -p segments=6 -p rotation_speed=30 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; segments: number of mirror segments (2 to 16, default: 6) -;; rotation_speed: rotation in degrees per second (0 to 180, default: 30) - -(recipe "kaleidoscope" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") - - ;; Default parameters (overridden by -p segments=N -p rotation_speed=N) - (def segments 6) - (def rotation_speed 30) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect kaleidoscope :segments segments :rotation_speed rotation_speed))) - result) diff --git a/effects/mirror.py b/effects/mirror.py deleted file mode 100644 index b412c22..0000000 --- a/effects/mirror.py +++ /dev/null @@ -1,86 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect mirror -@version 1.0.0 -@author artdag - -@description -Creates mirror/reflection effects by duplicating and flipping portions -of the frame. Can create kaleidoscope-like symmetry effects. - -@param mode string - @enum left right top bottom quad - @default left - Mirror mode: - - left: Left half mirrored to right - - right: Right half mirrored to left - - top: Top half mirrored to bottom - - bottom: Bottom half mirrored to top - - quad: Four-way symmetry (kaleidoscope) - -@example - (effect mirror :mode "left") - -@example - (effect mirror :mode "quad") ; kaleidoscope effect -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Create mirror/reflection effects. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - mode: mirror mode (default "left") - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - mode = params.get("mode", "left") - - h, w = frame.shape[:2] - result = frame.copy() - - if mode == "left": - # Mirror left half to right - half = w // 2 - result[:, half:] = np.flip(result[:, :half], axis=1)[:, :w - half] - - elif mode == "right": - # Mirror right half to left - half = w // 2 - result[:, :half] = np.flip(result[:, half:], axis=1)[:, :half] - - elif mode == "top": - # Mirror top half to bottom - half = h // 2 - result[half:, :] = np.flip(result[:half, :], axis=0)[:h - half, :] - - elif mode == "bottom": - # Mirror bottom half to top - half = h // 2 - result[:half, :] = np.flip(result[half:, :], axis=0)[:half, :] - - elif mode == "quad": - # Four-way symmetry: use top-left quadrant - half_h = h // 2 - half_w = w // 2 - - # Get top-left quadrant - tl = result[:half_h, :half_w] - - # Mirror to top-right - result[:half_h, half_w:] = np.flip(tl, axis=1)[:, :w - half_w] - - # Mirror top half to bottom - result[half_h:, :] = np.flip(result[:half_h, :], axis=0)[:h - half_h, :] - - return np.ascontiguousarray(result), state diff --git a/effects/mirror.sexp b/effects/mirror.sexp deleted file mode 100644 index b30008f..0000000 --- a/effects/mirror.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Mirror effect - reflect image -;; Usage: python3 plan.py effects/mirror.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; direction: mirror axis ("horizontal" or "vertical", default: "horizontal") - -(recipe "mirror" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect mirror :path "sexp_effects/effects/mirror.sexp") - - ;; Default parameter (overridden by -p direction=horizontal|vertical) - (def direction "horizontal") - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect mirror :direction direction))) - result) diff --git a/effects/motion_blur.py b/effects/motion_blur.py deleted file mode 100644 index 4f17f9d..0000000 --- a/effects/motion_blur.py +++ /dev/null @@ -1,122 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect motion_blur -@version 1.0.0 -@author artdag - -@description -Motion blur effect. Creates directional blur simulating camera or -subject motion. Great for speed effects. - -@param amount int - @range 1 100 - @default 20 - Blur length in pixels. - -@param angle float - @range 0 360 - @default 0 - Blur direction in degrees (0 = horizontal right). - -@param zoom bool - @default false - Radial zoom blur instead of directional. - -@param zoom_center_x float - @range 0 1 - @default 0.5 - Zoom blur center X. - -@param zoom_center_y float - @range 0 1 - @default 0.5 - Zoom blur center Y. - -@example - (effect motion_blur :amount 30 :angle 45) - -@example - ;; Zoom blur - (effect motion_blur :amount 20 :zoom true) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply motion blur effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - amount: blur length (default 20) - - angle: blur direction (default 0) - - zoom: radial zoom blur (default False) - - zoom_center_x/y: zoom center (default 0.5) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - amount = max(1, int(params.get("amount", 20))) - angle = params.get("angle", 0) - zoom = params.get("zoom", False) - zoom_center_x = params.get("zoom_center_x", 0.5) - zoom_center_y = params.get("zoom_center_y", 0.5) - - if state is None: - state = {} - - if amount <= 1: - return frame, state - - h, w = frame.shape[:2] - - if zoom: - # Radial zoom blur - cx, cy = int(w * zoom_center_x), int(h * zoom_center_y) - - # Accumulate multiple scaled versions - result = np.zeros_like(frame, dtype=np.float32) - num_samples = min(amount, 20) - - for i in range(num_samples): - scale = 1.0 + (i / num_samples) * (amount / 100.0) - M = cv2.getRotationMatrix2D((cx, cy), 0, scale) - scaled = cv2.warpAffine(frame, M, (w, h), borderMode=cv2.BORDER_REFLECT) - result += scaled.astype(np.float32) / num_samples - - return result.astype(np.uint8), state - else: - # Directional motion blur using a line kernel - angle_rad = np.deg2rad(angle) - dx = np.cos(angle_rad) - dy = np.sin(angle_rad) - - # Create motion blur kernel - kernel_size = amount - kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32) - - # Draw a line in the kernel - cx, cy = kernel_size // 2, kernel_size // 2 - for i in range(kernel_size): - t = (i - cx) / max(1, cx) - x = int(cx + t * cx * dx) - y = int(cy + t * cy * dy) - if 0 <= x < kernel_size and 0 <= y < kernel_size: - kernel[y, x] = 1 - - # Normalize kernel - kernel_sum = kernel.sum() - if kernel_sum > 0: - kernel /= kernel_sum - - # Apply kernel - result = cv2.filter2D(frame, -1, kernel) - - return result, state diff --git a/effects/neon_glow.py b/effects/neon_glow.py deleted file mode 100644 index ebfa62d..0000000 --- a/effects/neon_glow.py +++ /dev/null @@ -1,130 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect neon_glow -@version 1.0.0 -@author artdag - -@description -Neon edge glow effect. Detects edges and applies a glowing colored outline. -Great for cyberpunk/synthwave aesthetics synced to music. - -@param glow_radius float - @range 0 50 - @default 15 - Blur radius for the glow. Bind to bass for pulsing glow. - -@param glow_intensity float - @range 0.5 5 - @default 2.0 - Brightness multiplier for the glow. - -@param edge_low float - @range 10 200 - @default 50 - Lower threshold for edge detection. - -@param edge_high float - @range 50 300 - @default 150 - Upper threshold for edge detection. - -@param color_r int - @range 0 255 - @default 0 - Red component of glow color. - -@param color_g int - @range 0 255 - @default 255 - Green component of glow color. - -@param color_b int - @range 0 255 - @default 255 - Blue component of glow color. - -@param background float - @range 0 1 - @default 0.3 - How much of the original image shows through (0 = glow only). - -@example - (effect neon_glow :glow_radius 20 :color_r 255 :color_g 0 :color_b 255) - -@example - ;; Pulsing cyan glow on bass - (effect neon_glow :glow_radius (bind bass :range [5 30] :transform sqrt)) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply neon glow effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - glow_radius: blur radius (default 15) - - glow_intensity: brightness (default 2.0) - - edge_low: canny low threshold (default 50) - - edge_high: canny high threshold (default 150) - - color_r/g/b: glow color (default cyan 0,255,255) - - background: original visibility (default 0.3) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - glow_radius = int(params.get("glow_radius", 15)) - glow_intensity = params.get("glow_intensity", 2.0) - edge_low = int(params.get("edge_low", 50)) - edge_high = int(params.get("edge_high", 150)) - color_r = int(params.get("color_r", 0)) - color_g = int(params.get("color_g", 255)) - color_b = int(params.get("color_b", 255)) - background = params.get("background", 0.3) - - h, w = frame.shape[:2] - color = np.array([color_r, color_g, color_b], dtype=np.float32) - - # Edge detection - gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) - blurred = cv2.GaussianBlur(gray, (5, 5), 0) - edges = cv2.Canny(blurred, edge_low, edge_high) - - # Create colored edge image - glow_base = np.zeros((h, w, 3), dtype=np.float32) - for c in range(3): - glow_base[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0) - - # Apply blur for glow - if glow_radius > 0: - ksize = glow_radius * 2 + 1 - glow = cv2.GaussianBlur(glow_base, (ksize, ksize), 0) - else: - glow = glow_base - - # Intensify - glow = glow * glow_intensity - - # Add sharp edges on top - edge_layer = np.zeros((h, w, 3), dtype=np.float32) - for c in range(3): - edge_layer[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0) * 255 - glow = np.maximum(glow, edge_layer) - - # Blend with original - if background > 0: - a = frame.astype(np.float32) / 255.0 * background - b = glow / 255.0 - result = (1 - (1 - a) * (1 - b)) * 255 # Screen blend - else: - result = glow - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/neon_glow.sexp b/effects/neon_glow.sexp deleted file mode 100644 index fcdb4c8..0000000 --- a/effects/neon_glow.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Neon glow effect - glowing edges on dark background -;; Usage: python3 plan.py effects/neon_glow.sexp -p glow_radius=20 -p glow_intensity=2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; glow_radius: glow spread in pixels (5 to 50, default: 20) -;; glow_intensity: glow brightness (0.5 to 5, default: 2) - -(recipe "neon_glow" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") - - ;; Default parameters (overridden by -p glow_radius=N -p glow_intensity=N) - (def glow_radius 20) - (def glow_intensity 2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect neon_glow :glow_radius glow_radius :glow_intensity glow_intensity))) - result) diff --git a/effects/noise.py b/effects/noise.py deleted file mode 100644 index 764c8e6..0000000 --- a/effects/noise.py +++ /dev/null @@ -1,137 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect noise -@version 1.0.0 -@author artdag - -@description -Noise effect. Adds various types of noise to the image including -static, gaussian, salt & pepper, and more. - -@param intensity float - @range 0 1 - @default 0.2 - Noise intensity. - -@param mode string - @enum gaussian uniform salt_pepper scanline - @default gaussian - Type of noise: - - gaussian: smooth normal distribution - - uniform: flat random noise - - salt_pepper: random black/white pixels - - scanline: horizontal line noise - -@param colored bool - @default false - Use colored noise instead of monochrome. - -@param animate bool - @default true - Different noise each frame. - -@param seed int - @default 42 - Random seed for reproducible noise. - -@state rng DeterministicRNG - Random number generator. - -@example - (effect noise :intensity 0.3 :mode "gaussian") - -@example - ;; Static TV noise - (effect noise :intensity 0.5 :mode "uniform" :animate true) -""" - -import numpy as np -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply noise effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - intensity = params.get("intensity", 0.2) - mode = params.get("mode", "gaussian") - colored = params.get("colored", False) - animate = params.get("animate", True) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - if intensity <= 0: - return frame, state - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - rng = state["rng"] - - h, w = frame.shape[:2] - result = frame.astype(np.float32) - - if mode == "gaussian": - # Gaussian noise - if colored: - noise = np.array([[[rng.gaussian(0, intensity * 50) for _ in range(3)] - for _ in range(w)] - for _ in range(h)]) - else: - noise_2d = np.array([[rng.gaussian(0, intensity * 50) - for _ in range(w)] - for _ in range(h)]) - noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) - result = result + noise - - elif mode == "uniform": - # Uniform random noise - if colored: - noise = np.array([[[rng.uniform(-intensity * 100, intensity * 100) for _ in range(3)] - for _ in range(w)] - for _ in range(h)]) - else: - noise_2d = np.array([[rng.uniform(-intensity * 100, intensity * 100) - for _ in range(w)] - for _ in range(h)]) - noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) - result = result + noise - - elif mode == "salt_pepper": - # Salt and pepper noise - for y in range(h): - for x in range(w): - if rng.uniform() < intensity * 0.1: - if rng.uniform() < 0.5: - result[y, x] = [0, 0, 0] - else: - result[y, x] = [255, 255, 255] - - elif mode == "scanline": - # Horizontal scanline noise - for y in range(h): - if rng.uniform() < intensity * 0.2: - noise_val = rng.uniform(-intensity * 100, intensity * 100) - result[y] = result[y] + noise_val - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/noise.sexp b/effects/noise.sexp deleted file mode 100644 index 908b751..0000000 --- a/effects/noise.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Noise effect - random pixel noise -;; Usage: python3 plan.py effects/noise.sexp -p amount=40 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amount: noise intensity (5 to 100, default: 40) - -(recipe "noise" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect noise :path "sexp_effects/effects/noise.sexp") - - ;; Default parameter (overridden by -p amount=N) - (def amount 40) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect noise :amount amount))) - result) diff --git a/effects/outline.py b/effects/outline.py deleted file mode 100644 index 8690af1..0000000 --- a/effects/outline.py +++ /dev/null @@ -1,111 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect outline -@version 1.0.0 -@author artdag - -@description -Outline / Toon Edges effect. Extracts and displays edges as outlines, -optionally with fill. Great for cartoon/comic book aesthetics. - -@param edge_thickness int - @range 1 10 - @default 2 - Thickness of outlines in pixels. - -@param threshold float - @range 20 300 - @default 100 - Edge detection sensitivity. - -@param outline_color list - @default [0, 0, 0] - RGB color for outlines (default black). - -@param fill_mode string - @enum original solid transparent - @default original - What to show in non-edge areas: - - original: keep source image - - solid: fill with solid color - - transparent: black background - -@param fill_color list - @default [255, 255, 255] - RGB color for solid fill mode. - -@example - (effect outline :edge_thickness 3 :threshold 80) - -@example - ;; White outlines on black - (effect outline :outline_color [255 255 255] :fill_mode "transparent") -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply outline effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - edge_thickness: outline width (default 2) - - threshold: edge sensitivity (default 100) - - outline_color: RGB tuple (default [0,0,0]) - - fill_mode: original/solid/transparent (default original) - - fill_color: RGB tuple for solid fill (default [255,255,255]) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - thickness = max(1, min(int(params.get("edge_thickness", 2)), 10)) - threshold = params.get("threshold", 100) - outline_color = params.get("outline_color", [0, 0, 0]) - fill_mode = params.get("fill_mode", "original") - fill_color = params.get("fill_color", [255, 255, 255]) - - if state is None: - state = {} - - h, w = frame.shape[:2] - - # Convert to grayscale - gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) - - # Apply edge detection - edges = cv2.Canny(gray, int(threshold * 0.5), int(threshold)) - - # Dilate edges for thickness - if thickness > 1: - kernel = np.ones((thickness, thickness), np.uint8) - edges = cv2.dilate(edges, kernel, iterations=1) - - # Create result based on fill mode - if fill_mode == "original": - result = frame.copy() - elif fill_mode == "solid": - if isinstance(fill_color, (list, tuple)) and len(fill_color) >= 3: - result = np.full((h, w, 3), fill_color[:3], dtype=np.uint8) - else: - result = np.full((h, w, 3), 255, dtype=np.uint8) - else: # transparent/none - result = np.zeros((h, w, 3), dtype=np.uint8) - - # Apply outline color where edges exist - if isinstance(outline_color, (list, tuple)) and len(outline_color) >= 3: - color = np.array(outline_color[:3], dtype=np.uint8) - else: - color = np.array([0, 0, 0], dtype=np.uint8) - - edge_mask = edges > 0 - result[edge_mask] = color - - return result, state diff --git a/effects/outline.sexp b/effects/outline.sexp deleted file mode 100644 index a78bcf4..0000000 --- a/effects/outline.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Outline effect - draw edges as lines -;; Usage: python3 plan.py effects/outline.sexp -p thickness=2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; thickness: outline width in pixels (1 to 5, default: 2) - -(recipe "outline" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect outline :path "sexp_effects/effects/outline.sexp") - - ;; Default parameter (overridden by -p thickness=N) - (def thickness 2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect outline :thickness thickness))) - result) diff --git a/effects/pixelate.py b/effects/pixelate.py deleted file mode 100644 index 3194964..0000000 --- a/effects/pixelate.py +++ /dev/null @@ -1,72 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect pixelate -@version 1.0.0 -@author artdag - -@description -Pixelate effect. Reduces resolution to create blocky, retro pixel art -look. Great for 8-bit aesthetics. - -@param block_size int - @range 2 64 - @default 8 - Size of pixel blocks. Larger = more pixelated. - -@param maintain_edges bool - @default false - Try to preserve edges while pixelating. - -@example - (effect pixelate :block_size 16) - -@example - ;; Beat-reactive pixelation - (effect pixelate :block_size (bind bass :range [4 32])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply pixelate effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - block_size: pixel block size (default 8) - - maintain_edges: preserve edges (default False) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - block_size = max(2, min(int(params.get("block_size", 8)), 64)) - maintain_edges = params.get("maintain_edges", False) - - if state is None: - state = {} - - h, w = frame.shape[:2] - - # Scale down then up to create pixelation - small_h = max(1, h // block_size) - small_w = max(1, w // block_size) - - small = cv2.resize(frame, (small_w, small_h), interpolation=cv2.INTER_AREA) - result = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST) - - if maintain_edges: - # Detect edges in original and overlay - gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) - edges = cv2.Canny(gray, 50, 150) - edges_dilated = cv2.dilate(edges, np.ones((2, 2), np.uint8)) - edge_mask = edges_dilated > 0 - result[edge_mask] = frame[edge_mask] - - return result, state diff --git a/effects/pixelate.sexp b/effects/pixelate.sexp deleted file mode 100644 index 7b627d2..0000000 --- a/effects/pixelate.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Pixelate effect - reduce resolution with blocky pixels -;; Usage: python3 plan.py effects/pixelate.sexp -p block_size=16 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; block_size: pixel block size (2 to 64, default: 16) -;; higher = more pixelated - -(recipe "pixelate" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect pixelate :path "sexp_effects/effects/pixelate.sexp") - - ;; Default parameter (overridden by -p block_size=N) - (def block_size 16) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect pixelate :block_size block_size))) - result) diff --git a/effects/pixelsort.py b/effects/pixelsort.py deleted file mode 100644 index 6c569af..0000000 --- a/effects/pixelsort.py +++ /dev/null @@ -1,152 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect pixelsort -@version 1.0.0 -@author artdag - -@description -Pixel sorting glitch art effect. Sorts pixels within rows by brightness, -hue, or other properties. Creates distinctive streaked/melted aesthetics. - -@param sort_by string - @enum lightness hue saturation red green blue - @default lightness - Property to sort pixels by. - -@param threshold_low float - @range 0 255 - @default 50 - Pixels darker than this are not sorted. - -@param threshold_high float - @range 0 255 - @default 200 - Pixels brighter than this are not sorted. - -@param angle float - @range 0 180 - @default 0 - Sort direction: 0 = horizontal, 90 = vertical. - -@param reverse bool - @default false - Reverse the sort order. - -@example - (effect pixelsort) - -@example - ;; Vertical pixel sort - (effect pixelsort :angle 90) - -@example - ;; Sort by hue for rainbow streaks - (effect pixelsort :sort_by "hue" :threshold_low 20 :threshold_high 240) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply pixel sorting to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - sort_by: property to sort by (default "lightness") - - threshold_low: min brightness to sort (default 50) - - threshold_high: max brightness to sort (default 200) - - angle: 0 = horizontal, 90 = vertical (default 0) - - reverse: reverse sort order (default False) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - sort_by = params.get("sort_by", "lightness") - threshold_low = params.get("threshold_low", 50) - threshold_high = params.get("threshold_high", 200) - angle = params.get("angle", 0) - reverse = params.get("reverse", False) - - h, w = frame.shape[:2] - - # Rotate for non-horizontal sorting - if 45 <= (angle % 180) <= 135: - frame = np.transpose(frame, (1, 0, 2)) - h, w = frame.shape[:2] - rotated = True - else: - rotated = False - - result = frame.copy() - - # Get sort values - sort_values = _get_sort_values(frame, sort_by) - - # Create mask of pixels to sort - mask = (sort_values >= threshold_low) & (sort_values <= threshold_high) - - # Sort each row - for y in range(h): - row = result[y].copy() - row_mask = mask[y] - row_values = sort_values[y] - - # Find contiguous segments to sort - segments = _find_segments(row_mask) - - for start, end in segments: - if end - start > 1: - segment_values = row_values[start:end] - sort_indices = np.argsort(segment_values) - if reverse: - sort_indices = sort_indices[::-1] - row[start:end] = row[start:end][sort_indices] - - result[y] = row - - # Rotate back if needed - if rotated: - result = np.transpose(result, (1, 0, 2)) - - return np.ascontiguousarray(result), state - - -def _get_sort_values(frame, sort_by): - """Get values to sort pixels by.""" - if sort_by == "lightness": - return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) - elif sort_by == "hue": - hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) - return hsv[:, :, 0].astype(np.float32) - elif sort_by == "saturation": - hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) - return hsv[:, :, 1].astype(np.float32) - elif sort_by == "red": - return frame[:, :, 0].astype(np.float32) - elif sort_by == "green": - return frame[:, :, 1].astype(np.float32) - elif sort_by == "blue": - return frame[:, :, 2].astype(np.float32) - return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) - - -def _find_segments(mask): - """Find contiguous True segments in mask.""" - segments = [] - start = None - for i, val in enumerate(mask): - if val and start is None: - start = i - elif not val and start is not None: - segments.append((start, i)) - start = None - if start is not None: - segments.append((start, len(mask))) - return segments diff --git a/effects/pixelsort.sexp b/effects/pixelsort.sexp deleted file mode 100644 index 6b9877d..0000000 --- a/effects/pixelsort.sexp +++ /dev/null @@ -1,22 +0,0 @@ -;; Pixelsort effect - glitch art pixel sorting -;; Usage: python3 plan.py effects/pixelsort.sexp -p threshold_low=30 -p threshold_high=220 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; threshold_low: lower brightness threshold (0 to 128, default: 30) -;; threshold_high: upper brightness threshold (128 to 255, default: 220) -;; pixels between thresholds get sorted - -(recipe "pixelsort" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") - - ;; Default parameters (overridden by -p threshold_low=N -p threshold_high=N) - (def threshold_low 30) - (def threshold_high 220) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect pixelsort :threshold_low threshold_low :threshold_high threshold_high))) - result) diff --git a/effects/posterize.py b/effects/posterize.py deleted file mode 100644 index b0767b8..0000000 --- a/effects/posterize.py +++ /dev/null @@ -1,79 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect posterize -@version 1.0.0 -@author artdag - -@description -Posterize / Color Quantization effect. Reduces the number of colors -to create a poster/cartoon look. Great for stylized visuals. - -@param levels int - @range 2 32 - @default 8 - Number of color levels per channel. Lower = more stylized. - -@param dither bool - @default false - Apply dithering to reduce color banding. - -@example - (effect posterize :levels 4) - -@example - ;; Beat-reactive posterization - (effect posterize :levels (bind bass :range [2 16])) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply posterize effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - levels: color levels per channel (default 8) - - dither: apply dithering (default False) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - levels = max(2, min(int(params.get("levels", 8)), 256)) - dither = params.get("dither", False) - - if state is None: - state = {} - - step = 256 // levels - - if dither: - # Floyd-Steinberg dithering - result = frame.astype(np.float32).copy() - h, w = result.shape[:2] - - for y in range(h - 1): - for x in range(1, w - 1): - for c in range(3): - old_val = result[y, x, c] - new_val = np.round(old_val / step) * step - result[y, x, c] = new_val - error = old_val - new_val - - # Distribute error to neighbors - result[y, x + 1, c] += error * 7 / 16 - result[y + 1, x - 1, c] += error * 3 / 16 - result[y + 1, x, c] += error * 5 / 16 - result[y + 1, x + 1, c] += error * 1 / 16 - - return np.clip(result, 0, 255).astype(np.uint8), state - else: - # Simple quantization - quantized = (frame // step) * step - return quantized.astype(np.uint8), state diff --git a/effects/posterize.sexp b/effects/posterize.sexp deleted file mode 100644 index d26a9f7..0000000 --- a/effects/posterize.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Posterize effect - reduce color levels for poster-like look -;; Usage: python3 plan.py effects/posterize.sexp -p levels=6 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; levels: number of color levels per channel (2 to 32, default: 6) -;; lower = more posterized, higher = more gradual - -(recipe "posterize" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect posterize :path "sexp_effects/effects/posterize.sexp") - - ;; Default parameter (overridden by -p levels=N) - (def levels 6) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect posterize :levels levels))) - result) diff --git a/effects/quick_test.sexp b/effects/quick_test.sexp deleted file mode 100644 index dacfc97..0000000 --- a/effects/quick_test.sexp +++ /dev/null @@ -1,178 +0,0 @@ -;; Quick Test Recipe -;; -;; Cycles between three video pairs (monday, duel, ecstacy) with smooth zoom-based crossfade. -;; Each pair is two copies of the same source with opposite rotations. -;; Each pair rotates in its own direction (per-pair rotation via template). -;; Cycle: active pair plays -> crossfade -> new pair plays -> advance and repeat. -;; Ripple drops on the final combined output only. - -(recipe "quick_test" - :version "1.0" - :description "Cycling crossfade between three video pairs" - :minimal-primitives true - :encoding (:codec "libx264" :crf 23 :preset "ultrafast" :audio-codec "aac" :fps 30) - - :params ( - (audio_start :type float :default 60 :range [0 300] - :desc "Audio start time in seconds") - (audio_duration :type float :default nil - :desc "Audio duration (nil = full remaining)") - (blend_opacity :type float :default 0.5 :range [0 1] - :desc "Blend opacity within each pair") - (seed :type int :default 42 :desc "Master random seed") - ) - - ;; Registry - (effect rotate :path "../sexp_effects/effects/rotate.sexp") - (effect zoom :path "../sexp_effects/effects/zoom.sexp") - (effect blend :path "../sexp_effects/effects/blend.sexp") - (effect invert :path "../sexp_effects/effects/invert.sexp") - (effect hue_shift :path "../sexp_effects/effects/hue_shift.sexp") - (effect ascii_art :path "../sexp_effects/effects/ascii_art.sexp") - (effect ripple :path "../sexp_effects/effects/ripple.sexp") - (effect blend_multi :path "../sexp_effects/effects/blend_multi.sexp") - (analyzer energy :path "../../artdag-analyzers/energy/analyzer.py") - (analyzer beats :path "../../artdag-analyzers/beats/analyzer.py") - - ;; Sources - - (def video-1 (source :path "../1.mp4")) - (def video-2 (source :path "../2.webm")) - (def video-4 (source :path "../4.mp4")) - (def video-5 (source :path "../5.mp4")) - (def video-a (source :path "../monday.webm")) - (def video-b (source :path "../escher.webm")) - (def video-c (source :path "../dopple.webm")) - (def video-d (source :path "../disruptors.webm")) - (def video-e (source :path "../ecstacy.mp4")) - (def audio (source :path "../dizzy.mp3")) - - ;; Templates: reusable video-pair processor and cycle-crossfade - (include :path "../templates/process-pair.sexp") - (include :path "../templates/cycle-crossfade.sexp") - - ;; Unified RNG: auto-derives unique seeds for all scans - (def rng (make-rng seed)) - - ;; Stage 1: Analysis - energy, beats, and global-level scans - (stage :analyze - :outputs [energy-data beat-data whole-spin - ripple-gate ripple-cx ripple-cy] - (def audio-clip (-> audio (segment :start audio_start :duration audio_duration))) - (def energy-data (-> audio-clip (analyze energy))) - (def beat-data (-> audio-clip (analyze beats))) - - ;; --- Whole-video continuous spin: cumulative rotation that reverses direction periodically --- - (def whole-spin (scan beat-data :rng rng - :init (dict :beat 0 :clen 25 :dir 1 :angle 0) - :step (if (< (+ beat 1) clen) - (dict :beat (+ beat 1) :clen clen :dir dir - :angle (+ angle (* dir (/ 360 clen)))) - (dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1) - :angle angle)) - :emit angle)) - - ;; --- Ripple drops on final output --- - (def ripple (scan beat-data :rng rng - :init (dict :rem 0 :cx 0.5 :cy 0.5) - :step (if (> rem 0) - (dict :rem (- rem 1) :cx cx :cy cy) - (if (< (rand) 0.05) - (dict :rem (rand-int 1 20) :cx (rand-range 0.1 0.9) :cy (rand-range 0.1 0.9)) - (dict :rem 0 :cx 0.5 :cy 0.5))) - :emit {:gate (if (> rem 0) 1 0) :cx cx :cy cy}))) - - ;; Stage 2: Process videos via template - ;; Per-pair scans (inv/hue/ascii triggers, pair-mix, pair-rot) are now - ;; defined inside the process-pair template using seed offsets. - (stage :process - :requires [:analyze] - :inputs [energy-data beat-data whole-spin - ripple-gate ripple-cx ripple-cy] - :outputs [final-video audio-clip] - - ;; Re-segment audio for final mux - (def audio-clip (-> audio (segment :start audio_start :duration audio_duration))) - - ;; --- Process each pair via template --- - - - - - (def monday-blend (process-pair - :video video-a :energy energy-data :beat-data beat-data - :rng rng :rot-dir -1 - :rot-a [0 45] :rot-b [0 -45] - :zoom-a [1 1.5] :zoom-b [1 0.5])) - - - - (def escher-blend (process-pair - :video video-b :energy energy-data :beat-data beat-data - :rng rng :rot-dir 1 - :rot-a [0 45] :rot-b [0 -45] - :zoom-a [1 1.5] :zoom-b [1 0.5])) - - (def duel-blend (process-pair - :video video-d :energy energy-data :beat-data beat-data - :rng rng :rot-dir -1 - :rot-a [0 -45] :rot-b [0 45] - :zoom-a [1 0.5] :zoom-b [1 1.5])) - - (def blend-2 (process-pair - :video video-2 :energy energy-data :beat-data beat-data - :rng rng :rot-dir 1 - :rot-a [0 45] :rot-b [0 -45] - :zoom-a [1 1.5] :zoom-b [1 0.5])) - - - - (def dopple-blend (process-pair - :video video-c :energy energy-data :beat-data beat-data - :rng rng :rot-dir -1 - :rot-a [0 -45] :rot-b [0 45] - :zoom-a [1 0.5] :zoom-b [1 1.5])) - - (def blend-4 (process-pair - :video video-4 :energy energy-data :beat-data beat-data - :rng rng :rot-dir -1 - :rot-a [0 45] :rot-b [0 -45] - :zoom-a [1 1.5] :zoom-b [1 0.5])) - - - - (def ext-blend (process-pair - :video video-e :energy energy-data :beat-data beat-data - :rng rng :rot-dir 1 - :rot-a [0 30] :rot-b [0 -30] - :zoom-a [1 1.3] :zoom-b [1 0.7])) - - (def blend-5 (process-pair - :video video-5 :energy energy-data :beat-data beat-data - :rng rng :rot-dir 1 - :rot-a [0 45] :rot-b [0 -45] - :zoom-a [1 1.5] :zoom-b [1 0.5])) - - ;; --- Cycle zoom + crossfade via template --- - - (def combined (cycle-crossfade - :beat-data beat-data - :input-videos [monday-blend escher-blend blend-2 duel-blend blend-4 ext-blend dopple-blend blend-5] - :init-clen 60)) - - ;; --- Final output: sporadic spin + ripple --- - (def final-video (-> combined - (effect rotate :angle (bind whole-spin values)) - (effect ripple - :amplitude (* (bind ripple-gate values) (bind energy-data values :range [5 50])) - :center_x (bind ripple-cx values) - :center_y (bind ripple-cy values) - :frequency 8 - :decay 2 - :speed 5)))) - - ;; Stage 3: Output - (stage :output - :requires [:process] - :inputs [final-video audio-clip] - (mux final-video audio-clip))) diff --git a/effects/quick_test_explicit.sexp b/effects/quick_test_explicit.sexp new file mode 100644 index 0000000..0a3698b --- /dev/null +++ b/effects/quick_test_explicit.sexp @@ -0,0 +1,150 @@ +;; Quick Test - Fully Explicit Streaming Version +;; +;; The interpreter is completely generic - knows nothing about video/audio. +;; All domain logic is explicit via primitives. +;; +;; Run with built-in sources/audio: +;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp --fps 30 +;; +;; Run with external config files: +;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp \ +;; --sources configs/sources-default.sexp \ +;; --audio configs/audio-dizzy.sexp \ +;; --fps 30 + +(stream "quick_test_explicit" + :fps 30 + :width 1920 + :height 1080 + :seed 42 + + ;; Load standard primitives and effects + (include :path "../templates/standard-primitives.sexp") + (include :path "../templates/standard-effects.sexp") + + ;; Load reusable templates + (include :path "../templates/stream-process-pair.sexp") + (include :path "../templates/crossfade-zoom.sexp") + + ;; === SOURCES AS ARRAY === + (def sources [ + (streaming:make-video-source "monday.webm" 30) + (streaming:make-video-source "escher.webm" 30) + (streaming:make-video-source "2.webm" 30) + (streaming:make-video-source "disruptors.webm" 30) + (streaming:make-video-source "4.mp4" 30) + (streaming:make-video-source "ecstacy.mp4" 30) + (streaming:make-video-source "dopple.webm" 30) + (streaming:make-video-source "5.mp4" 30) + ]) + + ;; Per-pair config: [rot-dir, rot-a-max, rot-b-max, zoom-a-max, zoom-b-max] + ;; Pairs 3,6: reversed (negative rot-a, positive rot-b, shrink zoom-a, grow zoom-b) + ;; Pair 5: smaller ranges + (def pair-configs [ + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2 + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed) + {:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4 + {:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller) + {:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed) + {:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5 + ]) + + ;; Audio analyzer + (def music (streaming:make-audio-analyzer "dizzy.mp3")) + + ;; Audio playback + (audio-playback "../dizzy.mp3") + + ;; === GLOBAL SCANS === + + ;; Cycle state: which source is active (recipe-specific) + ;; clen = beats per source (8-24 beats = ~4-12 seconds) + (scan cycle (streaming:audio-beat music t) + :init {:active 0 :beat 0 :clen 16} + :step (if (< (+ beat 1) clen) + (dict :active active :beat (+ beat 1) :clen clen) + (dict :active (mod (+ active 1) (len sources)) :beat 0 + :clen (+ 8 (mod (* (streaming:audio-beat-count music t) 7) 17))))) + + ;; Reusable scans from templates (require 'music' to be defined) + (include :path "../templates/scan-oscillating-spin.sexp") + (include :path "../templates/scan-ripple-drops.sexp") + + ;; === PER-PAIR STATE (dynamically sized based on sources) === + ;; Each pair has: inv-a, inv-b, hue-a, hue-b, mix, rot-angle + (scan pairs (streaming:audio-beat music t) + :init {:states (map (core:range (len sources)) (lambda (_) + {:inv-a 0 :inv-b 0 :hue-a 0 :hue-b 0 :hue-a-val 0 :hue-b-val 0 :mix 0.5 :mix-rem 5 :angle 0 :rot-beat 0 :rot-clen 25}))} + :step (dict :states (map states (lambda (p) + (let [;; Invert toggles (10% chance, lasts 1-4 beats) + new-inv-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-a) 1))) + new-inv-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-b) 1))) + ;; Hue shifts (10% chance, lasts 1-4 beats) - use countdown like invert + old-hue-a (get p :hue-a) + old-hue-b (get p :hue-b) + new-hue-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-a 1))) + new-hue-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-b 1))) + ;; Pick random hue value when triggering (stored separately) + new-hue-a-val (if (> new-hue-a old-hue-a) (+ 30 (* (core:rand) 300)) (get p :hue-a-val)) + new-hue-b-val (if (> new-hue-b old-hue-b) (+ 30 (* (core:rand) 300)) (get p :hue-b-val)) + ;; Mix (holds for 1-10 beats, then picks 0, 0.5, or 1) + mix-rem (get p :mix-rem) + old-mix (get p :mix) + new-mix-rem (if (> mix-rem 0) (- mix-rem 1) (+ 1 (core:rand-int 1 10))) + new-mix (if (> mix-rem 0) old-mix (* (core:rand-int 0 2) 0.5)) + ;; Rotation (accumulates, reverses direction when cycle completes) + rot-beat (get p :rot-beat) + rot-clen (get p :rot-clen) + old-angle (get p :angle) + ;; Note: dir comes from pair-configs, but we store rotation state here + new-rot-beat (if (< (+ rot-beat 1) rot-clen) (+ rot-beat 1) 0) + new-rot-clen (if (< (+ rot-beat 1) rot-clen) rot-clen (+ 20 (core:rand-int 0 10))) + new-angle (+ old-angle (/ 360 rot-clen))] + (dict :inv-a new-inv-a :inv-b new-inv-b + :hue-a new-hue-a :hue-b new-hue-b + :hue-a-val new-hue-a-val :hue-b-val new-hue-b-val + :mix new-mix :mix-rem new-mix-rem + :angle new-angle :rot-beat new-rot-beat :rot-clen new-rot-clen)))))) + + ;; === FRAME PIPELINE === + (frame + (let [now t + e (streaming:audio-energy music now) + + ;; Get cycle state + active (bind cycle :active) + beat-pos (bind cycle :beat) + clen (bind cycle :clen) + + ;; Transition logic: last third of cycle crossfades to next + phase3 (* beat-pos 3) + fading (and (>= phase3 (* clen 2)) (< phase3 (* clen 3))) + fade-amt (if fading (/ (- phase3 (* clen 2)) clen) 0) + next-idx (mod (+ active 1) (len sources)) + + ;; Get pair states array (required by process-pair macro) + pair-states (bind pairs :states) + + ;; Process active pair using macro from template + active-frame (process-pair active) + + ;; Crossfade with zoom during transition (using macro) + result (if fading + (crossfade-zoom active-frame (process-pair next-idx) fade-amt) + active-frame) + + ;; Final: global spin + ripple + spun (rotate result :angle (bind spin :angle)) + rip-gate (bind ripple-state :gate) + rip-amp (* rip-gate (core:map-range e 0 1 5 50))] + + (ripple spun + :amplitude rip-amp + :center_x (bind ripple-state :cx) + :center_y (bind ripple-state :cy) + :frequency 8 + :decay 2 + :speed 5)))) diff --git a/effects/random.py b/effects/random.py deleted file mode 100644 index b6d866d..0000000 --- a/effects/random.py +++ /dev/null @@ -1,217 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect random -@version 1.0.0 -@author artdag - -@description -Deterministic random number generator for use in recipes and effects. -Given the same seed, produces the same sequence of values every time. - -This effect doesn't modify the frame - it provides random values that -can be bound to other effect parameters. The random state persists -across frames for consistent sequences. - -@param seed int - @default 42 - Random seed for reproducibility. Same seed = same sequence. - -@param min float - @default 0 - Minimum output value. - -@param max float - @default 1 - Maximum output value. - -@param mode string - @enum uniform gaussian integer choice - @default uniform - Distribution type: - - uniform: even distribution between min and max - - gaussian: normal distribution (min=mean, max=stddev) - - integer: random integers between min and max (inclusive) - - choice: randomly pick from a list (use choices param) - -@param choices list - @default [] - List of values to choose from (for mode=choice). - -@param step_every int - @default 1 - Only generate new value every N frames (1 = every frame). - -@state rng RandomState - Numpy random state for deterministic sequence. - -@state frame_count int - Tracks frames for step_every. - -@state current_value float - Current random value (persists between steps). - -@example - ;; Random value 0-1 each frame - (bind (random :seed 123)) - -@example - ;; Random integer 1-10, changes every 5 frames - (random :seed 42 :mode "integer" :min 1 :max 10 :step_every 5) - -@example - ;; Gaussian noise around 0.5 - (random :mode "gaussian" :min 0.5 :max 0.1) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Generate deterministic random values. - - This effect passes through the frame unchanged but updates state - with random values that can be used by the recipe/executor. - - Args: - frame: Input frame (passed through unchanged) - params: Effect parameters - - seed: random seed (default 42) - - min: minimum value (default 0) - - max: maximum value (default 1) - - mode: uniform/gaussian/integer/choice (default uniform) - - choices: list for choice mode - - step_every: frames between new values (default 1) - state: Persistent state dict - - rng: numpy RandomState - - frame_count: frame counter - - current_value: last generated value - - Returns: - Tuple of (frame, state_with_random_value) - """ - seed = int(params.get("seed", 42)) - min_val = params.get("min", 0) - max_val = params.get("max", 1) - mode = params.get("mode", "uniform") - choices = params.get("choices", []) - step_every = max(1, int(params.get("step_every", 1))) - - if state is None: - state = {} - - # Initialize RNG on first call - if "rng" not in state: - state["rng"] = np.random.RandomState(seed) - state["frame_count"] = 0 - state["current_value"] = None - - rng = state["rng"] - frame_count = state["frame_count"] - - # Generate new value if needed - if frame_count % step_every == 0 or state["current_value"] is None: - if mode == "uniform": - value = rng.uniform(min_val, max_val) - elif mode == "gaussian": - # min = mean, max = stddev - value = rng.normal(min_val, max_val) - elif mode == "integer": - value = rng.randint(int(min_val), int(max_val) + 1) - elif mode == "choice" and choices: - value = choices[rng.randint(0, len(choices))] - else: - value = rng.uniform(min_val, max_val) - - state["current_value"] = value - - state["frame_count"] = frame_count + 1 - - # Store value in state for recipe access - state["value"] = state["current_value"] - - return frame, state - - -# Standalone RNG class for use in other effects -class DeterministicRNG: - """ - Deterministic random number generator for use in effects. - - Usage in effects: - from effects.random import DeterministicRNG - - def process_frame(frame, params, state): - if "rng" not in state: - state["rng"] = DeterministicRNG(params.get("seed", 42)) - rng = state["rng"] - - value = rng.uniform(0, 1) - integer = rng.randint(0, 10) - choice = rng.choice(["a", "b", "c"]) - """ - - def __init__(self, seed: int = 42): - """Initialize with seed for reproducibility.""" - self._rng = np.random.RandomState(seed) - self._seed = seed - - def seed(self, seed: int): - """Reset with new seed.""" - self._rng = np.random.RandomState(seed) - self._seed = seed - - def uniform(self, low: float = 0, high: float = 1) -> float: - """Random float in [low, high).""" - return self._rng.uniform(low, high) - - def randint(self, low: int, high: int) -> int: - """Random integer in [low, high].""" - return self._rng.randint(low, high + 1) - - def gaussian(self, mean: float = 0, stddev: float = 1) -> float: - """Random float from normal distribution.""" - return self._rng.normal(mean, stddev) - - def choice(self, items: list): - """Random choice from list.""" - if not items: - return None - return items[self._rng.randint(0, len(items))] - - def shuffle(self, items: list) -> list: - """Return shuffled copy of list.""" - result = list(items) - self._rng.shuffle(result) - return result - - def sample(self, items: list, n: int) -> list: - """Random sample of n items without replacement.""" - if n >= len(items): - return self.shuffle(items) - indices = self._rng.choice(len(items), n, replace=False) - return [items[i] for i in indices] - - def weighted_choice(self, items: list, weights: list): - """Random choice with weights.""" - if not items or not weights: - return None - weights = np.array(weights, dtype=float) - weights /= weights.sum() - idx = self._rng.choice(len(items), p=weights) - return items[idx] - - @property - def state(self) -> dict: - """Get RNG state for serialization.""" - return {"seed": self._seed, "state": self._rng.get_state()} - - @classmethod - def from_state(cls, state: dict) -> 'DeterministicRNG': - """Restore RNG from serialized state.""" - rng = cls(state["seed"]) - rng._rng.set_state(state["state"]) - return rng diff --git a/effects/rgb_split.py b/effects/rgb_split.py deleted file mode 100644 index bbee47d..0000000 --- a/effects/rgb_split.py +++ /dev/null @@ -1,106 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect rgb_split -@version 1.0.0 -@author artdag - -@description -Chromatic aberration / RGB channel separation. Offsets red and blue channels -in different directions, creating color fringing. Classic glitch aesthetic. - -@param amount float - @range 0 50 - @default 10 - Offset amount in pixels. Bind to bass for punchy glitch effect. - -@param angle float - @range 0 360 - @default 0 - Direction of split in degrees. 0 = horizontal, 90 = vertical. - -@param red_offset float - @range -50 50 - @default 0 - Override: specific red channel X offset (ignores amount/angle if set). - -@param blue_offset float - @range -50 50 - @default 0 - Override: specific blue channel X offset (ignores amount/angle if set). - -@example - (effect rgb_split :amount 15) - -@example - ;; Bass-reactive chromatic aberration - (effect rgb_split :amount (bind bass :range [0 30] :transform sqrt)) - -@example - ;; Vertical split - (effect rgb_split :amount 20 :angle 90) -""" - -import numpy as np -import cv2 -import math - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply RGB channel split to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - amount: offset in pixels (default 10) - - angle: split direction in degrees (default 0) - - red_offset: override red X offset - - blue_offset: override blue X offset - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - amount = params.get("amount", 10) - angle = params.get("angle", 0) - red_override = params.get("red_offset") - blue_override = params.get("blue_offset") - - # Calculate offsets - if red_override is not None or blue_override is not None: - # Use explicit offsets - r_x = int(red_override or 0) - r_y = 0 - b_x = int(blue_override or 0) - b_y = 0 - else: - # Calculate from amount and angle - angle_rad = math.radians(angle) - r_x = int(amount * math.cos(angle_rad)) - r_y = int(amount * math.sin(angle_rad)) - b_x = -r_x # Blue goes opposite direction - b_y = -r_y - - if r_x == 0 and r_y == 0 and b_x == 0 and b_y == 0: - return frame, state - - h, w = frame.shape[:2] - - # Split channels - r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2] - - # Create translation matrices - M_r = np.float32([[1, 0, r_x], [0, 1, r_y]]) - M_b = np.float32([[1, 0, b_x], [0, 1, b_y]]) - - # Translate red and blue channels - r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE) - b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE) - - # Merge channels - result = np.stack([r_shifted, g, b_shifted], axis=-1) - - return result, state diff --git a/effects/rgb_split.sexp b/effects/rgb_split.sexp deleted file mode 100644 index 3708bac..0000000 --- a/effects/rgb_split.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; RGB split effect - chromatic aberration -;; Usage: python3 plan.py effects/rgb_split.sexp -p offset_x=20 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; offset_x: horizontal color channel offset in pixels (2 to 50, default: 20) - -(recipe "rgb_split" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") - - ;; Default parameter (overridden by -p offset_x=N) - (def offset_x 20) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect rgb_split :offset_x offset_x))) - result) diff --git a/effects/ripple.sexp b/effects/ripple.sexp deleted file mode 100644 index 865776d..0000000 --- a/effects/ripple.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Ripple effect - concentric wave distortion from center -;; Usage: python3 plan.py effects/ripple.sexp -p amplitude=20 -p frequency=6 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amplitude: ripple strength in pixels (5 to 50, default: 20) -;; frequency: number of ripples (1 to 20, default: 6) - -(recipe "ripple" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect ripple :path "sexp_effects/effects/ripple.sexp") - - ;; Default parameters (overridden by -p amplitude=N -p frequency=N) - (def amplitude 20) - (def frequency 6) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect ripple :amplitude amplitude :frequency frequency))) - result) diff --git a/effects/rotate.py b/effects/rotate.py deleted file mode 100644 index c0b1c28..0000000 --- a/effects/rotate.py +++ /dev/null @@ -1,110 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect rotate -@version 1.0.0 -@author artdag - -@description -Rotates the frame by a specified angle. Supports two modes: -- Static: rotate by fixed angle -- Continuous: rotate by speed (degrees per frame), accumulating over time - -The effect uses state to track cumulative rotation for smooth continuous motion. - -@param angle float - @range -360 360 - @default 0 - Rotation angle in degrees. Positive = counter-clockwise. - -@param speed float - @range -30 30 - @default 0 - Rotation speed in degrees per frame (for continuous rotation). - When non-zero, angle is ignored and rotation accumulates. - -@param auto_zoom bool - @default false - If true, automatically zooms to crop out black corners. - -@state cumulative_angle float - Tracks total rotation for continuous mode. Persists across frames. - -@example - (effect rotate :angle 45) - -@example - ;; Continuous spin at 5 degrees per frame - (effect rotate :speed 5) - -@example - ;; Beat-reactive rotation - (effect rotate :angle (bind bass :range [0 90])) -""" - -import numpy as np -import cv2 -import math - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Rotate a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - angle: rotation angle in degrees (default 0) - - speed: rotation speed per frame for continuous mode (default 0) - - auto_zoom: zoom to hide black corners (default False) - state: Persistent state dict - - cumulative_angle: total rotation for continuous mode - - Returns: - Tuple of (processed_frame, new_state) - """ - angle = params.get("angle", 0) - speed = params.get("speed", 0) - auto_zoom = params.get("auto_zoom", False) - - # Initialize state - if state is None: - state = {} - - # Handle continuous rotation mode - if speed != 0: - cumulative = state.get("cumulative_angle", 0) - cumulative += speed - state["cumulative_angle"] = cumulative - angle = cumulative - - if angle == 0: - return frame, state - - h, w = frame.shape[:2] - center = (w / 2, h / 2) - - # Calculate zoom factor to hide black corners - scale = 1.0 - if auto_zoom: - angle_rad = math.radians(abs(angle) % 90) - if w >= h: - scale = math.cos(angle_rad) + (w / h) * math.sin(angle_rad) - else: - scale = math.cos(angle_rad) + (h / w) * math.sin(angle_rad) - scale = max(1.0, scale) - - # Get rotation matrix - rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale) - - # Apply rotation - result = cv2.warpAffine( - frame, rotation_matrix, (w, h), - flags=cv2.INTER_LINEAR, - borderMode=cv2.BORDER_CONSTANT, - borderValue=(0, 0, 0) - ) - - return result, state diff --git a/effects/rotate.sexp b/effects/rotate.sexp deleted file mode 100644 index 0999765..0000000 --- a/effects/rotate.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Rotate effect - rotate image around center -;; Usage: python3 plan.py effects/rotate.sexp -p angle=15 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; angle: rotation in degrees (-180 to 180, default: 15) -;; positive = clockwise - -(recipe "rotate" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect rotate :path "sexp_effects/effects/rotate.sexp") - - ;; Default parameter (overridden by -p angle=N) - (def angle 15) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect rotate :angle angle))) - result) diff --git a/effects/saturation.py b/effects/saturation.py deleted file mode 100644 index 7ab835e..0000000 --- a/effects/saturation.py +++ /dev/null @@ -1,61 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect saturation -@version 1.0.0 -@author artdag - -@description -Adjusts color saturation. 0 = grayscale, 1 = original, 2 = oversaturated. -Uses HSV color space for accurate saturation control. - -@param factor float - @range 0 3 - @default 1.0 - Saturation multiplier. 0 = grayscale, 1 = unchanged, 2 = double saturation. - -@example - (effect saturation :factor 0) ; grayscale - -@example - (effect saturation :factor 1.5) ; vibrant colors - -@example - ;; Desaturate on quiet parts, saturate on loud - (effect saturation :factor (bind energy :range [0.5 2.0])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Adjust saturation of a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - factor: saturation multiplier (default 1.0) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - factor = params.get("factor", 1.0) - - if factor == 1.0: - return frame, state - - # Convert RGB to HSV - hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV).astype(np.float32) - - # Scale saturation channel (index 1) - hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255) - - # Convert back to RGB - result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) - - return result, state diff --git a/effects/saturation.sexp b/effects/saturation.sexp deleted file mode 100644 index af70925..0000000 --- a/effects/saturation.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Saturation effect - adjust color saturation -;; Usage: python3 plan.py effects/saturation.sexp -p amount=2.0 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amount: saturation multiplier (0.0 to 3.0, default: 2.0) -;; 0 = grayscale, 1 = normal, >1 = vivid colors - -(recipe "saturation" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect saturation :path "sexp_effects/effects/saturation.sexp") - - ;; Default parameter (overridden by -p amount=N) - (def amount 2.0) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect saturation :amount amount))) - result) diff --git a/effects/scanlines.py b/effects/scanlines.py deleted file mode 100644 index 6a53735..0000000 --- a/effects/scanlines.py +++ /dev/null @@ -1,116 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect scanlines -@version 1.0.0 -@author artdag - -@description -VHS/CRT-style scan line shift. Horizontally displaces alternating lines -to create analog video distortion. Great for retro/glitch aesthetics. - -@param amplitude float - @range 0 100 - @default 10 - Maximum shift amount in pixels. Bind to bass for reactive glitch. - -@param frequency float - @range 1 100 - @default 10 - Lines per cycle (affects pattern density). - -@param randomness float - @range 0 1 - @default 0.5 - 0 = regular sine pattern, 1 = fully random shifts. - -@param line_gap int - @range 1 20 - @default 1 - Only shift every Nth line (1 = all lines). - -@param seed int - @default 42 - Random seed for deterministic patterns. - -@state rng DeterministicRNG - Random number generator for reproducible results. - -@example - (effect scanlines :amplitude 20) - -@example - ;; Heavy glitch on bass - (effect scanlines :amplitude (bind bass :range [0 50]) :randomness 0.8) - -@example - ;; Reproducible scanlines - (effect scanlines :amplitude 30 :randomness 0.7 :seed 999) -""" - -import numpy as np -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply scan line shift to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - amplitude: max shift in pixels (default 10) - - frequency: lines per cycle (default 10) - - randomness: 0-1 random vs sine (default 0.5) - - line_gap: shift every Nth line (default 1) - - seed: random seed (default 42) - state: Persistent state dict - - rng: DeterministicRNG instance - - Returns: - Tuple of (processed_frame, new_state) - """ - amplitude = params.get("amplitude", 10) - frequency = params.get("frequency", 10) or 1 - randomness = params.get("randomness", 0.5) - line_gap = max(1, int(params.get("line_gap", 1))) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - rng = state["rng"] - - if amplitude == 0: - return frame, state - - h, w = frame.shape[:2] - result = frame.copy() - - for y in range(0, h, line_gap): - # Calculate shift amount - if randomness >= 1.0: - shift = int(rng.uniform(-amplitude, amplitude)) - elif randomness <= 0: - shift = int(amplitude * np.sin(2 * np.pi * y / frequency)) - else: - sine_shift = amplitude * np.sin(2 * np.pi * y / frequency) - rand_shift = rng.uniform(-amplitude, amplitude) - shift = int(sine_shift * (1 - randomness) + rand_shift * randomness) - - if shift != 0: - result[y] = np.roll(result[y], shift, axis=0) - - return result, state diff --git a/effects/scanlines.sexp b/effects/scanlines.sexp deleted file mode 100644 index 5736927..0000000 --- a/effects/scanlines.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Scanlines effect - horizontal line overlay -;; Usage: python3 plan.py effects/scanlines.sexp -p spacing=3 -p intensity=0.4 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; spacing: pixels between lines (1 to 8, default: 3) -;; intensity: line darkness (0 to 1, default: 0.4) - -(recipe "scanlines" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect scanlines :path "sexp_effects/effects/scanlines.sexp") - - ;; Default parameters (overridden by -p spacing=N -p intensity=N) - (def spacing 3) - (def intensity 0.4) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect scanlines :spacing spacing :intensity intensity))) - result) diff --git a/effects/scatter.py b/effects/scatter.py deleted file mode 100644 index 9886b22..0000000 --- a/effects/scatter.py +++ /dev/null @@ -1,142 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "scipy"] -# /// -""" -@effect scatter -@version 1.0.0 -@author artdag - -@description -Scatter effect. Randomly redistributes pixels in local area creating -a dissolving, dispersed look. - -@param amount float - @range 0 100 - @default 10 - Scatter radius in pixels. Bind to energy for reactive dissolve. - -@param randomize_per_frame bool - @default false - Different scatter pattern each frame (vs. static pattern). - -@param grain float - @range 0 1 - @default 0 - Add film grain to scattered result. - -@param seed int - @default 42 - Random seed for reproducible patterns. - -@state rng DeterministicRNG - Random number generator for displacement. - -@state displacement_map ndarray - Cached displacement map for static mode. - -@example - (effect scatter :amount 20) - -@example - ;; Reactive dissolve - (effect scatter :amount (bind energy :range [5 50]) :randomize_per_frame true) -""" - -import numpy as np -from scipy import ndimage -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply scatter effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - amount: scatter radius (default 10) - - randomize_per_frame: animate pattern (default False) - - grain: add film grain (default 0) - - seed: random seed (default 42) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - amount = int(np.clip(params.get("amount", 10), 0, 100)) - randomize = params.get("randomize_per_frame", False) - grain = params.get("grain", 0) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - if amount < 1: - return frame, state - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - rng = state["rng"] - - h, w = frame.shape[:2] - - # Generate or reuse displacement map - last_size = state.get("last_size") - if randomize or "displacement_map" not in state or last_size != (h, w): - # Generate new displacement map - displacement = np.zeros((h, w, 2), dtype=np.float32) - for y in range(h): - for x in range(w): - displacement[y, x, 0] = rng.uniform(-amount, amount) - displacement[y, x, 1] = rng.uniform(-amount, amount) - state["displacement_map"] = displacement - state["last_size"] = (h, w) - - displacement_map = state["displacement_map"] - - # Create coordinate grids - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) - - # Apply displacement - new_y = y_coords + displacement_map[:, :, 0] - new_x = x_coords + displacement_map[:, :, 1] - - # Clip to valid range - new_y = np.clip(new_y, 0, h - 1) - new_x = np.clip(new_x, 0, w - 1) - - # Sample from displaced positions - result = np.zeros_like(frame) - for c in range(frame.shape[2] if len(frame.shape) > 2 else 1): - if len(frame.shape) > 2: - result[:, :, c] = ndimage.map_coordinates( - frame[:, :, c], - [new_y, new_x], - order=1, - mode='reflect' - ) - else: - result = ndimage.map_coordinates( - frame, - [new_y, new_x], - order=1, - mode='reflect' - ) - - # Add grain - if grain > 0: - noise = np.array([[[rng.uniform(-grain * 20, grain * 20) for _ in range(3)] - for _ in range(w)] - for _ in range(h)]) - result = np.clip(result.astype(np.float32) + noise, 0, 255).astype(np.uint8) - - return result, state diff --git a/effects/sepia.sexp b/effects/sepia.sexp deleted file mode 100644 index 47db6f5..0000000 --- a/effects/sepia.sexp +++ /dev/null @@ -1,14 +0,0 @@ -;; Sepia effect - vintage brown tone -;; Usage: python3 plan.py effects/sepia.sexp | python3 execute.py - -d . -o output.mp4 -;; No parameters - -(recipe "sepia" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect sepia :path "sexp_effects/effects/sepia.sexp") - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect sepia))) - result) diff --git a/effects/sharpen.py b/effects/sharpen.py deleted file mode 100644 index 6959d3b..0000000 --- a/effects/sharpen.py +++ /dev/null @@ -1,63 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect sharpen -@version 1.0.0 -@author artdag - -@description -Sharpening effect using unsharp mask technique. Enhances edges and detail. -Great for making footage pop on beats. - -@param amount float - @range 0 5 - @default 1.0 - Sharpening intensity. 0 = no change, 1 = normal, 2+ = aggressive. - -@param radius float - @range 0 10 - @default 1 - Radius of sharpening (affects edge thickness). - -@example - (effect sharpen :amount 1.5) - -@example - ;; Sharpen on beats - (effect sharpen :amount (bind bass :range [0.5 2.0])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply sharpening to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - amount: sharpening intensity (default 1.0) - - radius: edge radius (default 1) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - amount = params.get("amount", 1.0) - radius = params.get("radius", 1) - - if amount <= 0: - return frame, state - - # Create blurred version - ksize = max(1, int(radius)) * 2 + 1 - blurred = cv2.GaussianBlur(frame, (ksize, ksize), 0) - - # Unsharp mask: original + amount * (original - blurred) - result = frame.astype(np.float32) + amount * (frame.astype(np.float32) - blurred.astype(np.float32)) - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/sharpen.sexp b/effects/sharpen.sexp deleted file mode 100644 index 799c62e..0000000 --- a/effects/sharpen.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Sharpen effect - enhance edge detail -;; Usage: python3 plan.py effects/sharpen.sexp -p amount=2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amount: sharpening strength (0.5 to 5, default: 2) -;; higher = sharper edges - -(recipe "sharpen" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect sharpen :path "sexp_effects/effects/sharpen.sexp") - - ;; Default parameter (overridden by -p amount=N) - (def amount 2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect sharpen :amount amount))) - result) diff --git a/effects/shatter.py b/effects/shatter.py deleted file mode 100644 index 8fb690d..0000000 --- a/effects/shatter.py +++ /dev/null @@ -1,233 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "scipy"] -# /// -""" -@effect shatter -@version 1.0.0 -@author artdag - -@description -Shatter effect. Explodes the image into flying pieces that move -outward from a center point. Great for beat drops and transitions. - -@param intensity float - @range 0 1 - @default 0.5 - Explosion force (0 = no effect, 1 = full explosion). - -@param num_pieces int - @range 10 200 - @default 50 - Number of shatter pieces. - -@param center_x float - @range 0 1 - @default 0.5 - Explosion center X position. - -@param center_y float - @range 0 1 - @default 0.5 - Explosion center Y position. - -@param rotation_speed float - @range 0 5 - @default 1.0 - How fast pieces rotate as they fly. - -@param gravity float - @range 0 2 - @default 0.3 - Downward pull on pieces. - -@param fade_out bool - @default true - Fade pieces as they fly away. - -@param seed int - @default 42 - Random seed for piece positions. - -@state pieces list - List of piece positions and velocities. - -@example - (effect shatter :intensity 0.7 :num_pieces 80) - -@example - ;; Beat-reactive explosion - (effect shatter :intensity (bind onset :range [0 1]) :gravity 0.5) -""" - -import numpy as np -from scipy import ndimage -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply shatter effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - intensity = np.clip(params.get("intensity", 0.5), 0, 1) - num_pieces = max(10, min(int(params.get("num_pieces", 50)), 200)) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - rotation_speed = params.get("rotation_speed", 1.0) - gravity = params.get("gravity", 0.3) - fade_out = params.get("fade_out", True) - seed = int(params.get("seed", 42)) - t = params.get("_time", 0) - - if state is None: - state = {} - - if intensity < 0.01: - return frame, state - - h, w = frame.shape[:2] - cx, cy = int(center_x * w), int(center_y * h) - - # Initialize pieces - if "pieces" not in state or len(state["pieces"]) != num_pieces: - state["pieces"] = _init_pieces(w, h, num_pieces, cx, cy, seed) - state["start_time"] = t - - pieces = state["pieces"] - start_time = state.get("start_time", t) - effect_t = t - start_time - - # Create output with black background - result = np.zeros_like(frame) - - time_factor = effect_t * intensity * 3.0 - - # Draw each piece - for piece in pieces: - px, py, pw, ph, vx, vy, rot = piece - - if pw <= 0 or ph <= 0: - continue - - # Calculate current position with physics - curr_x = px + vx * time_factor * w * 0.5 - curr_y = py + vy * time_factor * h * 0.5 + gravity * time_factor ** 2 * h * 0.2 - curr_rot = rot * rotation_speed * time_factor * 180 - - # Calculate alpha (fade out over distance) - distance = np.sqrt((curr_x - px)**2 + (curr_y - py)**2) - alpha = 1.0 - (distance / max(w, h)) if fade_out else 1.0 - alpha = max(0, min(1, alpha)) - - if alpha < 0.05: - continue - - # Extract piece from original frame - px1, py1 = max(0, int(px)), max(0, int(py)) - px2, py2 = min(w, int(px + pw)), min(h, int(py + ph)) - - if px2 <= px1 or py2 <= py1: - continue - - piece_img = frame[py1:py2, px1:px2].copy() - - # Rotate piece - if abs(curr_rot) > 1: - piece_img = ndimage.rotate(piece_img, curr_rot, reshape=False, mode='constant', cval=0) - - # Calculate destination - dest_x = int(curr_x) - dest_y = int(curr_y) - piece_h, piece_w = piece_img.shape[:2] - - # Clip to frame bounds - src_x1, src_y1 = 0, 0 - src_x2, src_y2 = piece_w, piece_h - dst_x1, dst_y1 = dest_x, dest_y - dst_x2, dst_y2 = dest_x + piece_w, dest_y + piece_h - - if dst_x1 < 0: - src_x1 = -dst_x1 - dst_x1 = 0 - if dst_y1 < 0: - src_y1 = -dst_y1 - dst_y1 = 0 - if dst_x2 > w: - src_x2 -= (dst_x2 - w) - dst_x2 = w - if dst_y2 > h: - src_y2 -= (dst_y2 - h) - dst_y2 = h - - if dst_x2 <= dst_x1 or dst_y2 <= dst_y1: - continue - if src_x2 <= src_x1 or src_y2 <= src_y1: - continue - - # Blend piece onto result - piece_region = piece_img[src_y1:src_y2, src_x1:src_x2] - if piece_region.size == 0: - continue - - result_region = result[dst_y1:dst_y2, dst_x1:dst_x2] - if result_region.shape != piece_region.shape: - continue - - result[dst_y1:dst_y2, dst_x1:dst_x2] = ( - result_region * (1 - alpha) + piece_region * alpha - ).astype(np.uint8) - - # Blend with original based on intensity - final = ((1 - intensity) * frame + intensity * result).astype(np.uint8) - - return final, state - - -def _init_pieces(w: int, h: int, num_pieces: int, cx: int, cy: int, seed: int) -> list: - """Initialize shatter pieces with random positions and velocities.""" - rng = DeterministicRNG(seed) - - pieces = [] - - # Create grid of pieces - cols = max(1, int(np.sqrt(num_pieces * w / h))) - rows = max(1, int(num_pieces / cols)) - piece_w = w // cols - piece_h = h // rows - - for row in range(rows): - for col in range(cols): - px = col * piece_w - py = row * piece_h - pw = piece_w + (w % cols if col == cols - 1 else 0) - ph = piece_h + (h % rows if row == rows - 1 else 0) - - # Velocity away from center - piece_cx = px + pw // 2 - piece_cy = py + ph // 2 - dx = piece_cx - cx - dy = piece_cy - cy - dist = max(1, np.sqrt(dx*dx + dy*dy)) - - vx = dx / dist + rng.uniform(-0.3, 0.3) - vy = dy / dist + rng.uniform(-0.3, 0.3) - rot = rng.uniform(-2, 2) - - pieces.append((px, py, pw, ph, vx, vy, rot)) - - return pieces diff --git a/effects/strobe.py b/effects/strobe.py deleted file mode 100644 index 3933fb7..0000000 --- a/effects/strobe.py +++ /dev/null @@ -1,90 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect strobe -@version 1.0.0 -@author artdag - -@description -Strobe / Posterize Time effect. Locks video to a reduced frame rate, -creating a choppy, stop-motion look. Also known as frame hold. - -@param frame_rate float - @range 1 60 - @default 12 - Target frame rate in fps. Lower = choppier. - -@param sync_to_beat bool - @default false - If true, hold frames until next beat (overrides frame_rate). - -@param beat_divisor int - @range 1 8 - @default 1 - Hold for 1/N beats when sync_to_beat is true. - -@state held_frame ndarray - Currently held frame. - -@state held_until float - Time until which to hold the frame. - -@example - (effect strobe :frame_rate 8) - -@example - ;; Very choppy at 4 fps - (effect strobe :frame_rate 4) - -@example - ;; Beat-synced frame hold - (effect strobe :sync_to_beat true :beat_divisor 2) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply strobe/posterize time effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - frame_rate: target fps 1-60 (default 12) - - sync_to_beat: use beat timing (default False) - - beat_divisor: beat fraction (default 1) - state: Persistent state dict - - held_frame: currently held frame - - held_until: hold expiry time - - Returns: - Tuple of (processed_frame, new_state) - """ - target_fps = max(1, min(params.get("frame_rate", 12), 60)) - sync_to_beat = params.get("sync_to_beat", False) - beat_divisor = max(1, int(params.get("beat_divisor", 1))) - - # Get current time from params (executor should provide this) - t = params.get("_time", 0) - - if state is None: - state = {} - - # Initialize state - if "held_frame" not in state: - state["held_frame"] = None - state["held_until"] = 0.0 - state["last_beat"] = -1 - - # Frame rate based hold - frame_duration = 1.0 / target_fps - - if t >= state["held_until"]: - # Time for new frame - state["held_frame"] = frame.copy() - state["held_until"] = t + frame_duration - - return state["held_frame"] if state["held_frame"] is not None else frame, state diff --git a/effects/strobe.sexp b/effects/strobe.sexp deleted file mode 100644 index 44c1c98..0000000 --- a/effects/strobe.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Strobe effect - flashing/blinking -;; Usage: python3 plan.py effects/strobe.sexp -p frequency=4 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; frequency: flashes per second (1 to 15, default: 4) -;; WARNING: high values may cause discomfort - -(recipe "strobe" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect strobe :path "sexp_effects/effects/strobe.sexp") - - ;; Default parameter (overridden by -p frequency=N) - (def frequency 4) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect strobe :frequency frequency))) - result) diff --git a/effects/swirl.py b/effects/swirl.py deleted file mode 100644 index 39323b8..0000000 --- a/effects/swirl.py +++ /dev/null @@ -1,121 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect swirl -@version 1.0.0 -@author artdag - -@description -Spiral/vortex distortion that twists the image around a center point. -Creates whirlpool-like effects. Great for psychedelic/hypnotic visuals. - -@param strength float - @range -10 10 - @default 1.0 - Swirl strength in radians. Positive = counter-clockwise, negative = clockwise. - -@param radius float - @range 0.1 2 - @default 0.5 - Effect radius as fraction of image size. Larger = wider swirl. - -@param center_x float - @range 0 1 - @default 0.5 - Horizontal center of swirl (0 = left, 1 = right). - -@param center_y float - @range 0 1 - @default 0.5 - Vertical center of swirl (0 = top, 1 = bottom). - -@param falloff string - @enum linear quadratic gaussian - @default quadratic - How swirl strength decreases from center: - - linear: constant decrease - - quadratic: sharper center, softer edges - - gaussian: smooth bell curve - -@example - (effect swirl :strength 2) - -@example - ;; Reactive swirl - (effect swirl :strength (bind energy :range [0 5])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply swirl distortion to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - strength: swirl amount in radians (default 1.0) - - radius: effect radius as fraction (default 0.5) - - center_x: horizontal center 0-1 (default 0.5) - - center_y: vertical center 0-1 (default 0.5) - - falloff: linear/quadratic/gaussian (default quadratic) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - strength = params.get("strength", 1.0) - radius_frac = params.get("radius", 0.5) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - falloff = params.get("falloff", "quadratic") - - if strength == 0: - return frame, state - - h, w = frame.shape[:2] - - # Calculate center and radius in pixels - cx = w * center_x - cy = h * center_y - radius = max(w, h) * radius_frac - - # Create coordinate grids - y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float64) - - # Calculate distance and angle from center - dx = x_coords - cx - dy = y_coords - cy - dist = np.sqrt(dx**2 + dy**2) - angle = np.arctan2(dy, dx) - - # Normalized distance for falloff - norm_dist = dist / radius - - # Calculate falloff factor - if falloff == "linear": - factor = np.maximum(0, 1 - norm_dist) - elif falloff == "gaussian": - factor = np.exp(-norm_dist**2 * 2) - else: # quadratic - factor = np.maximum(0, 1 - norm_dist**2) - - # Apply swirl rotation - new_angle = angle + strength * factor - - # Calculate new coordinates - new_x = (cx + dist * np.cos(new_angle)).astype(np.float32) - new_y = (cy + dist * np.sin(new_angle)).astype(np.float32) - - # Remap - result = cv2.remap( - frame, new_x, new_y, - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_REFLECT - ) - - return result, state diff --git a/effects/swirl.sexp b/effects/swirl.sexp deleted file mode 100644 index 2155e05..0000000 --- a/effects/swirl.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Swirl effect - spiral distortion from center -;; Usage: python3 plan.py effects/swirl.sexp -p strength=3 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; strength: swirl intensity (-10 to 10, default: 3) -;; positive = clockwise, negative = counter-clockwise - -(recipe "swirl" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect swirl :path "sexp_effects/effects/swirl.sexp") - - ;; Default parameter (overridden by -p strength=N) - (def strength 3) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect swirl :strength strength))) - result) diff --git a/effects/threshold.py b/effects/threshold.py deleted file mode 100644 index 9f0ece6..0000000 --- a/effects/threshold.py +++ /dev/null @@ -1,85 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect threshold -@version 1.0.0 -@author artdag - -@description -Threshold effect. Converts to high-contrast black and white. -Creates stark, graphic look by converting grayscale to pure -black/white based on a threshold value. - -@param level int - @range 0 255 - @default 128 - Threshold level. Pixels above = white, below = black. - -@param invert bool - @default false - Swap black and white. - -@param color_mode string - @enum bw color - @default bw - Output mode: - - bw: pure black and white - - color: keep original colors where above threshold - -@example - (effect threshold :level 100) - -@example - ;; Beat-reactive threshold - (effect threshold :level (bind bass :range [80 180]) :invert true) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply threshold effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - level: threshold 0-255 (default 128) - - invert: swap black/white (default False) - - color_mode: bw or color (default bw) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - level = int(np.clip(params.get("level", 128), 0, 255)) - invert = params.get("invert", False) - color_mode = params.get("color_mode", "bw") - - if state is None: - state = {} - - # Convert to grayscale for threshold comparison - if len(frame.shape) == 3: - gray = np.mean(frame, axis=2) - else: - gray = frame - - # Apply threshold - mask = gray > level - - if invert: - mask = ~mask - - if color_mode == "bw": - # Pure black and white - result = np.where(mask[:, :, np.newaxis], 255, 0).astype(np.uint8) - if len(frame.shape) == 3: - result = np.repeat(result, frame.shape[2], axis=2) - else: - # Keep original colors where above threshold - result = np.where(mask[:, :, np.newaxis], frame, 0).astype(np.uint8) - - return result, state diff --git a/effects/threshold.sexp b/effects/threshold.sexp deleted file mode 100644 index 0960a32..0000000 --- a/effects/threshold.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Threshold effect - convert to pure black and white -;; Usage: python3 plan.py effects/threshold.sexp -p level=128 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; level: threshold cutoff (0 to 255, default: 128) -;; pixels below = black, above = white - -(recipe "threshold" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect threshold :path "sexp_effects/effects/threshold.sexp") - - ;; Default parameter (overridden by -p level=N) - (def level 128) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect threshold :level level))) - result) diff --git a/effects/tile_grid.py b/effects/tile_grid.py deleted file mode 100644 index 6a0e63e..0000000 --- a/effects/tile_grid.py +++ /dev/null @@ -1,134 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect tile_grid -@version 1.0.0 -@author artdag - -@description -Tile Grid effect. Repeats the frame in a grid pattern creating -a mosaic by tiling scaled-down copies. Great for psychedelic visuals. - -@param rows int - @range 1 10 - @default 2 - Number of rows in grid. - -@param cols int - @range 1 10 - @default 2 - Number of columns in grid. - -@param gap int - @range 0 50 - @default 0 - Gap between tiles in pixels. - -@param gap_color list - @default [0, 0, 0] - RGB color for gaps. - -@param rotation_per_tile float - @range -180 180 - @default 0 - Rotation increment per tile in degrees. - -@param alternate_flip bool - @default false - Flip alternating tiles horizontally. - -@example - (effect tile_grid :rows 3 :cols 3) - -@example - ;; Rotating tiles - (effect tile_grid :rows 2 :cols 2 :rotation_per_tile 90) - -@example - ;; Beat-reactive grid - (effect tile_grid :rows (bind bass :range [2 6]) :cols (bind bass :range [2 6])) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply tile grid effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - rows: number of rows (default 2) - - cols: number of columns (default 2) - - gap: gap between tiles (default 0) - - gap_color: RGB tuple (default [0,0,0]) - - rotation_per_tile: rotation increment (default 0) - - alternate_flip: flip alternating tiles (default False) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - rows = max(1, min(int(params.get("rows", 2)), 10)) - cols = max(1, min(int(params.get("cols", 2)), 10)) - gap = max(0, int(params.get("gap", 0))) - gap_color = params.get("gap_color", [0, 0, 0]) - rotation_per_tile = params.get("rotation_per_tile", 0) - alternate_flip = params.get("alternate_flip", False) - - if state is None: - state = {} - - h, w = frame.shape[:2] - - # Calculate tile size - tile_w = (w - gap * (cols - 1)) // cols - tile_h = (h - gap * (rows - 1)) // rows - - if tile_w <= 0 or tile_h <= 0: - return frame, state - - # Scale down the original frame to tile size - tile = cv2.resize(frame, (tile_w, tile_h), interpolation=cv2.INTER_LINEAR) - - # Create result with gap color - if isinstance(gap_color, (list, tuple)) and len(gap_color) >= 3: - result = np.full((h, w, 3), gap_color[:3], dtype=np.uint8) - else: - result = np.zeros((h, w, 3), dtype=np.uint8) - - # Place tiles - tile_idx = 0 - for row in range(rows): - for col in range(cols): - y = row * (tile_h + gap) - x = col * (tile_w + gap) - - current_tile = tile.copy() - - # Apply rotation if specified - if rotation_per_tile != 0: - angle = rotation_per_tile * tile_idx - center = (tile_w // 2, tile_h // 2) - M = cv2.getRotationMatrix2D(center, angle, 1.0) - current_tile = cv2.warpAffine(current_tile, M, (tile_w, tile_h), - borderMode=cv2.BORDER_REFLECT) - - # Apply flip for alternating tiles - if alternate_flip and (row + col) % 2 == 1: - current_tile = cv2.flip(current_tile, 1) - - # Place tile - y_end = min(y + tile_h, h) - x_end = min(x + tile_w, w) - tile_crop_h = y_end - y - tile_crop_w = x_end - x - result[y:y_end, x:x_end] = current_tile[:tile_crop_h, :tile_crop_w] - - tile_idx += 1 - - return result, state diff --git a/effects/tile_grid.sexp b/effects/tile_grid.sexp deleted file mode 100644 index f368263..0000000 --- a/effects/tile_grid.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Tile grid effect - repeat image in grid -;; Usage: python3 plan.py effects/tile_grid.sexp -p rows=2 -p cols=2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; rows: number of rows (1 to 8, default: 2) -;; cols: number of columns (1 to 8, default: 2) - -(recipe "tile_grid" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") - - ;; Default parameters (overridden by -p rows=N -p cols=N) - (def rows 2) - (def cols 2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect tile_grid :rows rows :cols cols))) - result) diff --git a/effects/trails.py b/effects/trails.py deleted file mode 100644 index 472d071..0000000 --- a/effects/trails.py +++ /dev/null @@ -1,95 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect trails -@version 1.0.0 -@author artdag - -@description -Trails effect. Creates persistent motion trails by blending current -frame with previous frames. Like echo but with configurable blend. - -@param persistence float - @range 0 0.99 - @default 0.8 - How much of previous frame remains (0 = none, 0.99 = very long trails). - -@param blend_mode string - @enum blend add screen lighten darken - @default blend - How to combine frames. - -@param fade_color list - @default [0, 0, 0] - Color to fade toward. - -@state trail_buffer ndarray - Accumulated trail buffer. - -@example - (effect trails :persistence 0.85) - -@example - ;; Long bright trails - (effect trails :persistence 0.9 :blend_mode "add") -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply trails effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - persistence: trail length 0-0.99 (default 0.8) - - blend_mode: how to combine (default blend) - - fade_color: color to fade to (default black) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - persistence = max(0, min(params.get("persistence", 0.8), 0.99)) - blend_mode = params.get("blend_mode", "blend") - fade_color = params.get("fade_color", [0, 0, 0]) - - if state is None: - state = {} - - # Initialize trail buffer - if "trail_buffer" not in state or state["trail_buffer"].shape != frame.shape: - state["trail_buffer"] = frame.astype(np.float32) - - buffer = state["trail_buffer"] - current = frame.astype(np.float32) - - # Get fade color - if isinstance(fade_color, (list, tuple)) and len(fade_color) >= 3: - fade = np.array(fade_color[:3], dtype=np.float32) - else: - fade = np.array([0, 0, 0], dtype=np.float32) - - # Blend buffer toward fade color - faded_buffer = buffer * persistence + fade * (1 - persistence) - - # Combine with current frame based on blend mode - if blend_mode == "add": - result = faded_buffer + current - elif blend_mode == "screen": - result = 255 - ((255 - faded_buffer) * (255 - current) / 255) - elif blend_mode == "lighten": - result = np.maximum(faded_buffer, current) - elif blend_mode == "darken": - result = np.minimum(faded_buffer, current) - else: # blend - result = faded_buffer + current * (1 - persistence) - - # Update buffer - state["trail_buffer"] = np.clip(result, 0, 255) - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/trails.sexp b/effects/trails.sexp deleted file mode 100644 index eaf1456..0000000 --- a/effects/trails.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Trails effect - motion blur/persistence -;; Usage: python3 plan.py effects/trails.sexp -p persistence=0.7 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; persistence: trail strength (0.3 to 0.95, default: 0.7) -;; higher = longer trails - -(recipe "trails" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect trails :path "sexp_effects/effects/trails.sexp") - - ;; Default parameter (overridden by -p persistence=N) - (def persistence 0.7) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect trails :persistence persistence))) - result) diff --git a/effects/vhs.py b/effects/vhs.py deleted file mode 100644 index a91ed21..0000000 --- a/effects/vhs.py +++ /dev/null @@ -1,163 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect vhs -@version 1.0.0 -@author artdag - -@description -VHS / Analog Video effect. Complete VHS tape simulation combining -tracking errors, color bleeding, noise, and scan line distortion. - -@param tracking_error float - @range 0 50 - @default 5 - Horizontal displacement amount. Sync to onset for glitches. - -@param color_bleed int - @range 0 20 - @default 3 - Horizontal color smearing (typical VHS artifact). - -@param noise_intensity float - @range 0 1 - @default 0.2 - Static noise amount. - -@param chroma_shift int - @range 0 15 - @default 2 - Color channel offset (VHS color alignment issues). - -@param head_switching bool - @default true - Bottom-of-frame distortion. - -@param tape_crease_prob float - @range 0 0.5 - @default 0.05 - Probability of random tape crease distortion per frame. - -@param blur_amount float - @range 0 5 - @default 1 - VHS softness blur. - -@param seed int - @default 42 - Random seed for deterministic artifacts. - -@state rng DeterministicRNG - Random number generator for artifacts. - -@example - (effect vhs :tracking_error 10 :color_bleed 5) - -@example - ;; Reactive VHS glitch - (effect vhs :tracking_error (bind onset :range [0 30]) :tape_crease_prob 0.1) -""" - -import numpy as np -import cv2 -from pathlib import Path -import sys - -# Import DeterministicRNG from same directory -_effects_dir = Path(__file__).parent -if str(_effects_dir) not in sys.path: - sys.path.insert(0, str(_effects_dir)) -from random import DeterministicRNG - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply VHS effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - tracking_error = params.get("tracking_error", 5) - color_bleed = int(params.get("color_bleed", 3)) - noise_intensity = params.get("noise_intensity", 0.2) - chroma_shift = int(params.get("chroma_shift", 2)) - head_switching = params.get("head_switching", True) - tape_crease_prob = params.get("tape_crease_prob", 0.05) - blur_amount = params.get("blur_amount", 1) - seed = int(params.get("seed", 42)) - - if state is None: - state = {} - - # Initialize RNG - if "rng" not in state: - state["rng"] = DeterministicRNG(seed) - rng = state["rng"] - - h, w = frame.shape[:2] - result = frame.astype(np.float32).copy() - - # Apply slight blur (VHS softness) - if blur_amount > 0: - ksize = int(blur_amount * 2) * 2 + 1 - result = cv2.GaussianBlur(result, (ksize, 1), 0) - - # Apply color bleed (horizontal color smearing) - if color_bleed > 0: - ksize = color_bleed * 2 + 1 - result[:, :, 0] = cv2.blur(result[:, :, 0], (ksize, 1)) - result[:, :, 2] = cv2.blur(result[:, :, 2], (ksize, 1)) - - # Apply chroma shift (color channel misalignment) - if chroma_shift > 0: - shifted = np.zeros_like(result) - M_r = np.float32([[1, 0, chroma_shift], [0, 1, 0]]) - M_b = np.float32([[1, 0, -chroma_shift], [0, 1, 0]]) - shifted[:, :, 0] = cv2.warpAffine(result[:, :, 0], M_r, (w, h), borderMode=cv2.BORDER_REPLICATE) - shifted[:, :, 1] = result[:, :, 1] - shifted[:, :, 2] = cv2.warpAffine(result[:, :, 2], M_b, (w, h), borderMode=cv2.BORDER_REPLICATE) - result = shifted - - # Apply tracking error (horizontal line displacement) - if tracking_error > 0: - for y in range(h): - sine_shift = np.sin(y * 0.05) - rand_shift = rng.uniform(-0.3, 0.3) - displacement = int(tracking_error * (sine_shift + rand_shift)) - if displacement != 0: - result[y] = np.roll(result[y], displacement, axis=0) - - # Apply tape crease (random distortion bands) - if tape_crease_prob > 0 and rng.uniform() < tape_crease_prob: - band_start = rng.randint(0, max(1, h - 20)) - band_height = rng.randint(5, 20) - for y in range(band_start, min(band_start + band_height, h)): - displacement = rng.randint(-50, 50) - result[y] = np.roll(result[y], displacement, axis=0) - result[y] = result[y] * rng.uniform(0.5, 1.5) - - # Apply head switching noise (bottom of frame distortion) - if head_switching: - switch_height = rng.randint(5, 15) - for y in range(h - switch_height, h): - factor = (y - (h - switch_height)) / switch_height - displacement = int(factor * 30 * rng.uniform(0.5, 1.5)) - result[y] = np.roll(result[y], displacement, axis=0) - noise = np.array([[rng.gaussian(0, 20 * factor) for _ in range(3)] for _ in range(w)]) - result[y] = result[y] + noise - - # Apply static noise - if noise_intensity > 0: - noise = np.array([[[rng.gaussian(0, noise_intensity * 30) for _ in range(3)] - for _ in range(w)] - for _ in range(h)]) - result = result + noise - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/vignette.py b/effects/vignette.py deleted file mode 100644 index dd7cdfa..0000000 --- a/effects/vignette.py +++ /dev/null @@ -1,116 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy"] -# /// -""" -@effect vignette -@version 1.0.0 -@author artdag - -@description -Vignette effect. Darkens the corners of the frame, drawing focus -to the center. Classic cinematic look. - -@param strength float - @range 0 1 - @default 0.5 - How dark the corners get (0 = none, 1 = black corners). - -@param radius float - @range 0.5 2 - @default 1.0 - Size of the bright center area. Smaller = more vignette. - -@param softness float - @range 0.1 1 - @default 0.5 - How gradual the falloff is. - -@param center_x float - @range 0 1 - @default 0.5 - Center X position. - -@param center_y float - @range 0 1 - @default 0.5 - Center Y position. - -@param color list - @default [0, 0, 0] - Vignette color (default black). - -@example - (effect vignette :strength 0.6) - -@example - ;; Off-center vignette - (effect vignette :center_x 0.3 :center_y 0.3 :strength 0.7) -""" - -import numpy as np - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply vignette effect to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - strength: darkness 0-1 (default 0.5) - - radius: center size 0.5-2 (default 1.0) - - softness: falloff gradient (default 0.5) - - center_x: center X 0-1 (default 0.5) - - center_y: center Y 0-1 (default 0.5) - - color: RGB tuple (default [0,0,0]) - state: Persistent state dict - - Returns: - Tuple of (processed_frame, new_state) - """ - strength = np.clip(params.get("strength", 0.5), 0, 1) - radius = max(0.5, min(params.get("radius", 1.0), 2)) - softness = max(0.1, min(params.get("softness", 0.5), 1)) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - color = params.get("color", [0, 0, 0]) - - if state is None: - state = {} - - if strength <= 0: - return frame, state - - h, w = frame.shape[:2] - - # Calculate center in pixels - cx = w * center_x - cy = h * center_y - - # Create distance map from center - y_coords, x_coords = np.ogrid[:h, :w] - dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2) - - # Normalize distance - max_dist = np.sqrt(cx**2 + cy**2) * radius - - # Create vignette mask - normalized_dist = dist / max_dist - - # Apply softness to the falloff - vignette_mask = 1 - np.clip((normalized_dist - (1 - softness)) / softness, 0, 1) * strength - - # Apply vignette - if isinstance(color, (list, tuple)) and len(color) >= 3: - vignette_color = np.array(color[:3], dtype=np.float32) - else: - vignette_color = np.array([0, 0, 0], dtype=np.float32) - - result = frame.astype(np.float32) - - # Blend toward vignette color based on mask - for c in range(3): - result[:, :, c] = result[:, :, c] * vignette_mask + vignette_color[c] * (1 - vignette_mask) - - return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/vignette.sexp b/effects/vignette.sexp deleted file mode 100644 index 123830f..0000000 --- a/effects/vignette.sexp +++ /dev/null @@ -1,19 +0,0 @@ -;; Vignette effect - darken edges -;; Usage: python3 plan.py effects/vignette.sexp -p strength=0.6 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; strength: edge darkening amount (0.1 to 1.0, default: 0.6) - -(recipe "vignette" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect vignette :path "sexp_effects/effects/vignette.sexp") - - ;; Default parameter (overridden by -p strength=N) - (def strength 0.6) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect vignette :strength strength))) - result) diff --git a/effects/wave.py b/effects/wave.py deleted file mode 100644 index dbd27f4..0000000 --- a/effects/wave.py +++ /dev/null @@ -1,108 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect wave -@version 1.0.0 -@author artdag - -@description -Sine wave displacement distortion. Creates wavy, liquid-like warping. -Great for psychedelic and underwater effects synced to music. - -@param amplitude float - @range 0 100 - @default 10 - Wave height in pixels. Bind to bass for punchy distortion. - -@param wavelength float - @range 10 500 - @default 50 - Distance between wave peaks in pixels. - -@param speed float - @range 0 10 - @default 1 - Wave animation speed. Uses state to track phase over time. - -@param direction string - @enum horizontal vertical both - @default horizontal - Wave direction: - - horizontal: waves move left-right - - vertical: waves move up-down - - both: waves in both directions - -@state phase float - Tracks wave animation phase across frames. - -@example - (effect wave :amplitude 20 :wavelength 100) - -@example - ;; Bass-reactive waves - (effect wave :amplitude (bind bass :range [0 50] :transform sqrt)) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Apply wave distortion to a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - amplitude: wave height in pixels (default 10) - - wavelength: distance between peaks (default 50) - - speed: animation speed (default 1) - - direction: horizontal/vertical/both (default horizontal) - state: Persistent state dict - - phase: current wave phase - - Returns: - Tuple of (processed_frame, new_state) - """ - amplitude = params.get("amplitude", 10) - wavelength = params.get("wavelength", 50) - speed = params.get("speed", 1) - direction = params.get("direction", "horizontal") - - if state is None: - state = {} - - if amplitude == 0: - return frame, state - - h, w = frame.shape[:2] - - # Update phase for animation - phase = state.get("phase", 0) - phase += speed * 0.1 - state["phase"] = phase - - # Create coordinate maps - map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) - map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) - - if direction in ("horizontal", "both"): - # Horizontal waves: displace X based on Y - wave = np.sin(2 * np.pi * map_y / wavelength + phase) * amplitude - map_x = map_x + wave - - if direction in ("vertical", "both"): - # Vertical waves: displace Y based on X - wave = np.sin(2 * np.pi * map_x / wavelength + phase) * amplitude - map_y = map_y + wave - - # Apply distortion - result = cv2.remap( - frame, map_x, map_y, - cv2.INTER_LINEAR, - borderMode=cv2.BORDER_REFLECT - ) - - return result, state diff --git a/effects/wave.sexp b/effects/wave.sexp deleted file mode 100644 index 90a79d9..0000000 --- a/effects/wave.sexp +++ /dev/null @@ -1,21 +0,0 @@ -;; Wave effect - sinusoidal distortion -;; Usage: python3 plan.py effects/wave.sexp -p amplitude=30 -p wavelength=60 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; amplitude: wave height in pixels (5 to 100, default: 30) -;; wavelength: wave period in pixels (20 to 200, default: 60) - -(recipe "wave" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect wave :path "sexp_effects/effects/wave.sexp") - - ;; Default parameters (overridden by -p amplitude=N -p wavelength=N) - (def amplitude 30) - (def wavelength 60) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect wave :amplitude amplitude :wavelength wavelength))) - result) diff --git a/effects/zoom.py b/effects/zoom.py deleted file mode 100644 index cc0d50c..0000000 --- a/effects/zoom.py +++ /dev/null @@ -1,91 +0,0 @@ -# /// script -# requires-python = ">=3.10" -# dependencies = ["numpy", "opencv-python"] -# /// -""" -@effect zoom -@version 1.0.0 -@author artdag - -@description -Zooms into the center of the frame. Values > 1 zoom in (magnify), -values < 1 zoom out (shrink with black borders). - -@param factor float - @range 0.1 5 - @default 1.0 - Zoom factor. 1 = unchanged, 2 = 2x magnification, 0.5 = half size. - -@param center_x float - @range 0 1 - @default 0.5 - Horizontal center of zoom (0 = left, 1 = right). - -@param center_y float - @range 0 1 - @default 0.5 - Vertical center of zoom (0 = top, 1 = bottom). - -@example - (effect zoom :factor 1.5) - -@example - ;; Pulse zoom on bass - (effect zoom :factor (bind bass :range [1.0 1.5] :transform sqrt)) - -@example - ;; Zoom to corner - (effect zoom :factor 2 :center_x 0 :center_y 0) -""" - -import numpy as np -import cv2 - - -def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: - """ - Zoom a video frame. - - Args: - frame: Input frame as numpy array (H, W, 3) RGB uint8 - params: Effect parameters - - factor: zoom multiplier (default 1.0) - - center_x: horizontal center 0-1 (default 0.5) - - center_y: vertical center 0-1 (default 0.5) - state: Persistent state dict (unused) - - Returns: - Tuple of (processed_frame, new_state) - """ - factor = params.get("factor", 1.0) - center_x = params.get("center_x", 0.5) - center_y = params.get("center_y", 0.5) - - if factor is None or factor <= 0.01: - factor = 1.0 - - if factor == 1.0: - return frame, state - - h, w = frame.shape[:2] - - # Calculate crop region for zoom in - new_w = int(w / factor) - new_h = int(h / factor) - - if new_w <= 0 or new_h <= 0: - return frame, state - - # Calculate crop offset based on center - x_start = int((w - new_w) * center_x) - y_start = int((h - new_h) * center_y) - - # Clamp to valid range - x_start = max(0, min(x_start, w - new_w)) - y_start = max(0, min(y_start, h - new_h)) - - # Crop and resize back to original dimensions - cropped = frame[y_start:y_start + new_h, x_start:x_start + new_w] - result = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_LINEAR) - - return result, state diff --git a/effects/zoom.sexp b/effects/zoom.sexp deleted file mode 100644 index 8bfdffd..0000000 --- a/effects/zoom.sexp +++ /dev/null @@ -1,20 +0,0 @@ -;; Zoom effect - scale from center -;; Usage: python3 plan.py effects/zoom.sexp -p factor=1.2 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters: -;; factor: zoom multiplier (0.5 to 3.0, default: 1.2) -;; <1 = zoom out, >1 = zoom in - -(recipe "zoom" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - (effect zoom :path "sexp_effects/effects/zoom.sexp") - - ;; Default parameter (overridden by -p factor=N) - (def factor 1.2) - - (def video (source :path "monday.webm")) - (def clip (-> video (segment :start 0 :duration 10))) - (def result (-> clip (effect zoom :factor factor))) - result) diff --git a/libs/all-effects.sexp b/libs/all-effects.sexp deleted file mode 100644 index 54364f2..0000000 --- a/libs/all-effects.sexp +++ /dev/null @@ -1,63 +0,0 @@ -;; All 42 Sexp Effects -;; Include with: (include :path "libs/all-effects.sexp") -;; Or from cache: (include :cid "bafy...") - -;; Color effects -(effect invert :path "sexp_effects/effects/invert.sexp") -(effect grayscale :path "sexp_effects/effects/grayscale.sexp") -(effect sepia :path "sexp_effects/effects/sepia.sexp") -(effect brightness :path "sexp_effects/effects/brightness.sexp") -(effect contrast :path "sexp_effects/effects/contrast.sexp") -(effect saturation :path "sexp_effects/effects/saturation.sexp") -(effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") -(effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") -(effect threshold :path "sexp_effects/effects/threshold.sexp") -(effect posterize :path "sexp_effects/effects/posterize.sexp") - -;; Blur/sharpen -(effect blur :path "sexp_effects/effects/blur.sexp") -(effect sharpen :path "sexp_effects/effects/sharpen.sexp") -(effect bloom :path "sexp_effects/effects/bloom.sexp") -(effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") - -;; Distortion -(effect swirl :path "sexp_effects/effects/swirl.sexp") -(effect fisheye :path "sexp_effects/effects/fisheye.sexp") -(effect wave :path "sexp_effects/effects/wave.sexp") -(effect ripple :path "sexp_effects/effects/ripple.sexp") -(effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") -(effect zoom :path "sexp_effects/effects/zoom.sexp") -(effect rotate :path "sexp_effects/effects/rotate.sexp") -(effect mirror :path "sexp_effects/effects/mirror.sexp") - -;; Stylization -(effect pixelate :path "sexp_effects/effects/pixelate.sexp") -(effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") -(effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") -(effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") -(effect emboss :path "sexp_effects/effects/emboss.sexp") -(effect outline :path "sexp_effects/effects/outline.sexp") -(effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") - -;; Retro/film -(effect crt :path "sexp_effects/effects/crt.sexp") -(effect scanlines :path "sexp_effects/effects/scanlines.sexp") -(effect film_grain :path "sexp_effects/effects/film_grain.sexp") -(effect vignette :path "sexp_effects/effects/vignette.sexp") -(effect noise :path "sexp_effects/effects/noise.sexp") - -;; Chromatic -(effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") - -;; Temporal -(effect echo :path "sexp_effects/effects/echo.sexp") -(effect trails :path "sexp_effects/effects/trails.sexp") -(effect strobe :path "sexp_effects/effects/strobe.sexp") - -;; Geometric -(effect flip :path "sexp_effects/effects/flip.sexp") -(effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") - -;; Glitch -(effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") -(effect datamosh :path "sexp_effects/effects/datamosh.sexp") diff --git a/libs/plan b/libs/plan deleted file mode 100644 index 07535ef..0000000 --- a/libs/plan +++ /dev/null @@ -1,68 +0,0 @@ -Exactly. You're describing a DAG of pipelines that can branch and merge: - - audio-a ─→ analyze ─→ plan-a ─┐ - ├─→ combine ─→ final - audio-b ─→ analyze ─→ plan-b ─┘ - - videos ─→ analyze ─────────────┴─→ (shared by both plans) - - Each node is independently cacheable. Parallel branches run in tandem. - - A clean syntax might be: - - (recipe "multi-track-video" - :encoding (...) - - ;; Sources (stage 0 - always available) - (def audio-a (source "track1.mp3")) - (def audio-b (source "track2.mp3")) - (def videos (source-glob "videos/*.mp4")) - - ;; Analysis stages (run in parallel, cached by input hash) - (stage :analyze-a - (def beats-a (-> audio-a (analyze beats)))) - - (stage :analyze-b - (def beats-b (-> audio-b (analyze beats)))) - - (stage :analyze-videos - (def video-infos (-> videos (analyze-each video-info)))) - - ;; Planning stages (depend on analysis, explicit deps) - (stage :plan-a :requires [:analyze-a :analyze-videos] - (def segments-a (make-segments :beats beats-a :video-infos video-infos))) - - (stage :plan-b :requires [:analyze-b :analyze-videos] - (def segments-b (make-segments :beats beats-b :video-infos video-infos))) - - ;; Render stages (can parallelize) - (stage :render-a :requires [:plan-a] - (def rendered-a (-> segments-a (sequence)))) - - (stage :render-b :requires [:plan-b] - (def rendered-b (-> segments-b (sequence)))) - - ;; Final combine - (stage :output :requires [:render-a :render-b] - (-> (list rendered-a rendered-b) - (concat) - (crossfade :duration 2) - (mux audio-a audio-b)))) - - What this gives you: - - 1. Explicit data availability - :requires declares what's available - 2. Parallel execution - :analyze-a and :analyze-b run simultaneously - 3. Granular caching - each stage output cached by its inputs' hashes - 4. Flexible composition - add more tracks, branches, merge points as needed - 5. Clear errors - referencing beats-a before :analyze-a is a compile error - - Changes needed to sexp system: - - 1. stage form with :requires dependency declaration - 2. Stage scheduler that builds execution DAG - 3. Cache layer keyed by stage + input hashes - 4. Dict iteration (keys, for-each) for generic constructs - - Want to prototype this direction? - \ No newline at end of file diff --git a/libs/standard-analyzers.sexp b/libs/standard-analyzers.sexp deleted file mode 100644 index 1544a86..0000000 --- a/libs/standard-analyzers.sexp +++ /dev/null @@ -1,11 +0,0 @@ -;; Standard Analyzers (Audio + Video) -;; Include with: (include :path "libs/standard-analyzers.sexp") -;; Or from cache: (include :cid "bafy...") - -;; Audio analyzers -(analyzer beats :path "../artdag-analyzers/beats/analyzer.py") -(analyzer bass :path "../artdag-analyzers/bass/analyzer.py") -(analyzer energy :path "../artdag-analyzers/energy/analyzer.py") - -;; Video analyzers -(analyzer video-info :path "../artdag-analyzers/video-info/analyzer.py") diff --git a/libs/standard-constructs.sexp b/libs/standard-constructs.sexp deleted file mode 100644 index ed492c7..0000000 --- a/libs/standard-constructs.sexp +++ /dev/null @@ -1,6 +0,0 @@ -;; Standard Constructs -;; Include with: (include :path "libs/standard-constructs.sexp") -;; Or from cache: (include :cid "bafy...") - -(construct slice-every-n :path "constructs/slice-every-n.sexp") -(construct cycle-effects-preset :path "constructs/cycle-effects-preset.sexp") diff --git a/recipe-1.sexp b/recipe-1.sexp deleted file mode 100644 index b122815..0000000 --- a/recipe-1.sexp +++ /dev/null @@ -1,16 +0,0 @@ -; test recipe for language development -(recipe "test" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "medium" :audio-codec "aac") - - (effect invert - :path "../artdag-effects/invert/effect.py") - - ; User-supplied input video - (def input-video - (-> (source :path "new.webm" :description "Input video") - (segment :start 0 :duration 3))) - - (def branch-01 (-> input-video (fx invert :intensity 1.0))) - - (sequence input-video branch-01)) diff --git a/recipe-all-effects-modular.sexp b/recipe-all-effects-modular.sexp deleted file mode 100644 index 1fe9574..0000000 --- a/recipe-all-effects-modular.sexp +++ /dev/null @@ -1,55 +0,0 @@ -;; All Effects Showcase (Modular Version) -;; Uses data-driven effect presets for easy customization -(recipe "all-effects-showcase-modular" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - ;; Standard libraries - analyzers, effects, constructs - (include :path "libs/standard-analyzers.sexp") - (include :path "libs/all-effects.sexp") - (include :path "libs/standard-constructs.sexp") - - ;; Effect preset - SWAP THIS to change the entire effect palette - (include all-42-preset :path "effect-presets/all-42.sexp") - - ;; Sources with auto-detected info - (def video-a (source :path "monday.webm")) - (def video-b (source :path "new.webm")) - (def video-c (source :path "ecstacy.mp4")) - - ;; Video info analysis (duration, resolution, fps, etc.) - (def video-a-info (-> video-a (analyze video-info))) - (def video-b-info (-> video-b (analyze video-info))) - (def video-c-info (-> video-c (analyze video-info))) - - (def videos (list video-a video-b video-c)) - (def video-infos (list video-a-info video-b-info video-c-info)) - - ;; Audio - (def audio (source :path "dizzy.mp3")) - - ;; Analysis - (def beats-data (-> audio (analyze beats))) - (def bass-data (-> audio (analyze bass))) - (def energy-data (-> audio (analyze energy))) - - ;; Analyzers dict for binding resolution (use dict fn, not literal, for symbol resolution) - (def analyzers (dict :bass bass-data :energy energy-data)) - - ;; Segments - now just one clean call! - ;; Note: kwargs use underscores (planner converts - to _) - (def segments (cycle-effects-preset - :beats beats-data - :videos videos - :video_infos video-infos - :preset all-42-preset - :analyzers analyzers - :beats_per_segment 21)) - - (assert (> (len segments) 0) "No segments created - all videos too short") - - ;; Sequence and output - (def showcase (-> segments - (sequence :resize-mode :fit :priority :width))) - - (mux showcase audio)) diff --git a/recipe-all-effects.sexp b/recipe-all-effects.sexp deleted file mode 100644 index fc2151d..0000000 --- a/recipe-all-effects.sexp +++ /dev/null @@ -1,172 +0,0 @@ -;; All Effects Showcase -;; Cycles through every sexp effect on beat, using entire audio -(recipe "all-effects-showcase" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - ;; Analyzers - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - (analyzer bass :path "../artdag-analyzers/bass/analyzer.py") - (analyzer energy :path "../artdag-analyzers/energy/analyzer.py") - - ;; All sexp effects - (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") - (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") - (effect datamosh :path "sexp_effects/effects/datamosh.sexp") - (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") - (effect bloom :path "sexp_effects/effects/bloom.sexp") - (effect blur :path "sexp_effects/effects/blur.sexp") - (effect brightness :path "sexp_effects/effects/brightness.sexp") - (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") - (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") - (effect contrast :path "sexp_effects/effects/contrast.sexp") - (effect crt :path "sexp_effects/effects/crt.sexp") - (effect echo :path "sexp_effects/effects/echo.sexp") - (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") - (effect emboss :path "sexp_effects/effects/emboss.sexp") - (effect film_grain :path "sexp_effects/effects/film_grain.sexp") - (effect fisheye :path "sexp_effects/effects/fisheye.sexp") - (effect flip :path "sexp_effects/effects/flip.sexp") - (effect grayscale :path "sexp_effects/effects/grayscale.sexp") - (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") - (effect invert :path "sexp_effects/effects/invert.sexp") - (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") - (effect mirror :path "sexp_effects/effects/mirror.sexp") - (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") - (effect noise :path "sexp_effects/effects/noise.sexp") - (effect outline :path "sexp_effects/effects/outline.sexp") - (effect pixelate :path "sexp_effects/effects/pixelate.sexp") - (effect posterize :path "sexp_effects/effects/posterize.sexp") - (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") - (effect ripple :path "sexp_effects/effects/ripple.sexp") - (effect rotate :path "sexp_effects/effects/rotate.sexp") - (effect saturation :path "sexp_effects/effects/saturation.sexp") - (effect scanlines :path "sexp_effects/effects/scanlines.sexp") - (effect sepia :path "sexp_effects/effects/sepia.sexp") - (effect sharpen :path "sexp_effects/effects/sharpen.sexp") - (effect strobe :path "sexp_effects/effects/strobe.sexp") - (effect swirl :path "sexp_effects/effects/swirl.sexp") - (effect threshold :path "sexp_effects/effects/threshold.sexp") - (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") - (effect trails :path "sexp_effects/effects/trails.sexp") - (effect vignette :path "sexp_effects/effects/vignette.sexp") - (effect wave :path "sexp_effects/effects/wave.sexp") - (effect zoom :path "sexp_effects/effects/zoom.sexp") - - ;; Constructs - (construct slice-every-n :path "constructs/slice-every-n.sexp") - - ;; Sources with durations (seconds) - (def video-a (source :path "monday.webm")) - (def video-a-duration 30) ;; adjust to actual duration - (def video-b (source :path "new.webm")) - (def video-b-duration 60) ;; adjust to actual duration - (def video-c (source :path "ecstacy.mp4")) - (def video-c-duration 45) ;; adjust to actual duration - - ;; Video list with durations for easy lookup - (def videos (list video-a video-b video-c)) - (def video-durations (list video-a-duration video-b-duration video-c-duration)) - - ;; Audio - entire file - (def audio (source :path "dizzy.mp3")) - - ;; Analysis - (def beats-data (-> audio (analyze beats))) - (def bass-data (-> audio (analyze bass))) - (def energy-data (-> audio (analyze energy))) - - ;; Group every 21 beats into one segment (~42 segments for this track) - (def beats-per-seg 21) - - ;; Slice into segments, one effect each - ;; Wraps video start time; skips if segment longer than all videos - (def segments (slice-every-n beats-data beats-per-seg - :init 0 - :reducer (fn [acc i start end] - (let [seg-duration (- end start) - ;; Try preferred video first, then others - vid-idx (mod acc 3) - ;; Find a video long enough for this segment - valid-vid-idx (cond - (>= (nth video-durations vid-idx) seg-duration) vid-idx - (>= (nth video-durations (mod (+ vid-idx 1) 3)) seg-duration) (mod (+ vid-idx 1) 3) - (>= (nth video-durations (mod (+ vid-idx 2) 3)) seg-duration) (mod (+ vid-idx 2) 3) - :else nil)] - ;; Skip if no video is long enough - (if (= valid-vid-idx nil) - {:skip true :acc (inc acc)} - (let [src (nth videos valid-vid-idx) - src-duration (nth video-durations valid-vid-idx) - ;; Wrap start time within video duration - wrapped-start (mod start src-duration) - effect-idx (mod acc 42) - fx (cond - ;; Color effects 0-9 - DRAMATIC ranges for visible music reactivity - (= effect-idx 0) {:effect invert} - (= effect-idx 1) {:effect grayscale} - (= effect-idx 2) {:effect sepia} - (= effect-idx 3) {:effect brightness :amount (bind bass values :range [-80 80])} - (= effect-idx 4) {:effect contrast :amount (bind energy values :range [0.5 2.5])} - (= effect-idx 5) {:effect saturation :amount (bind bass values :range [0.2 3.0])} - (= effect-idx 6) {:effect hue_shift :degrees (bind energy values :range [0 360])} - (= effect-idx 7) {:effect color_cycle :speed 2} - (= effect-idx 8) {:effect threshold :level 128} - (= effect-idx 9) {:effect posterize :levels 6} - ;; Blur/sharpen 10-13 - wider ranges - (= effect-idx 10) {:effect blur :radius (bind bass values :range [1 30])} - (= effect-idx 11) {:effect sharpen :amount (bind energy values :range [0.5 4])} - (= effect-idx 12) {:effect bloom :intensity 0.6 :radius 20} - (= effect-idx 13) {:effect color-adjust :brightness 20 :contrast 1.2} - ;; Distortion 14-21 - much more dramatic - (= effect-idx 14) {:effect swirl :strength (bind bass values :range [-6 6])} - (= effect-idx 15) {:effect fisheye :strength (bind bass values :range [-0.5 0.8])} - (= effect-idx 16) {:effect wave :amplitude (bind bass values :range [10 60]) :wavelength 60} - (= effect-idx 17) {:effect ripple :amplitude (bind bass values :range [10 40]) :frequency 6} - (= effect-idx 18) {:effect kaleidoscope :segments 6 :rotation_speed 30} - (= effect-idx 19) {:effect zoom :factor (bind bass values :range [0.8 1.5])} - (= effect-idx 20) {:effect rotate :angle (bind energy values :range [-30 30])} - (= effect-idx 21) {:effect mirror :direction "horizontal"} - ;; Stylization 22-28 - more variation - (= effect-idx 22) {:effect pixelate :block_size (bind bass values :range [4 32])} - (= effect-idx 23) {:effect ascii_art :char_size 8 :color_mode "color"} - (= effect-idx 24) {:effect ascii_zones :char_size 10} - (= effect-idx 25) {:effect edge_detect :low 50 :high 150} - (= effect-idx 26) {:effect emboss :strength 1.5} - (= effect-idx 27) {:effect outline :thickness 2} - (= effect-idx 28) {:effect neon_glow :glow_radius 20 :glow_intensity 2} - ;; Retro/film 29-33 - (= effect-idx 29) {:effect crt :line_spacing 3 :vignette_amount 0.3} - (= effect-idx 30) {:effect scanlines :spacing 3 :intensity 0.4} - (= effect-idx 31) {:effect film_grain :intensity 0.25} - (= effect-idx 32) {:effect vignette :strength 0.6} - (= effect-idx 33) {:effect noise :amount (bind bass values :range [10 80])} - ;; Chromatic 34 - bigger split - (= effect-idx 34) {:effect rgb_split :offset_x (bind bass values :range [5 40])} - ;; Temporal 35-37 - (= effect-idx 35) {:effect echo :num_echoes 4 :decay 0.5} - (= effect-idx 36) {:effect trails :persistence 0.7} - (= effect-idx 37) {:effect strobe :frequency 4} - ;; Geometric 38-39 - (= effect-idx 38) {:effect flip :direction "horizontal"} - (= effect-idx 39) {:effect tile_grid :rows 2 :cols 2} - ;; Glitch 40-41 - more glitchy - (= effect-idx 40) {:effect pixelsort :threshold_low 30 :threshold_high 220} - (= effect-idx 41) {:effect datamosh :corruption (bind bass values :range [0.2 0.8]) :block_size 24} - ;; Default fallback - :else {:effect invert})] - {:source src - :start wrapped-start - :duration seg-duration - :effects (list fx) - :acc (inc acc)})))))) - - ;; Error if no segments were created (all videos too short) - (assert (> (len segments) 0) "No segments created - all videos too short for segment durations") - - ;; Sequence all segments - (def showcase (-> segments - (sequence :resize-mode :fit :priority :width))) - - ;; Output with original audio - (mux showcase audio)) diff --git a/recipe-bound copy.sexp b/recipe-bound copy.sexp deleted file mode 100644 index c1756e8..0000000 --- a/recipe-bound copy.sexp +++ /dev/null @@ -1,45 +0,0 @@ -; Beat-synced video with full lambda control -(recipe "bound-effects" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) - - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - (analyzer bass :path "../artdag-analyzers/bass/analyzer.py") - (effect invert :path "../artdag-effects/invert/effect.py") - (effect sepia :path "sexp_effects/effects/sepia.sexp") - - (construct slice-on :path "constructs/slice-on.sexp") - - - (def video-a (source :path "monday.webm" :description "Video A")) - (def video-b (source :path "new.webm" :description "Video B")) - - (def video-c (source :path "ecstacy.mp4" :description "Video c")) - - (def audio (-> (source :path "dizzy.mp3" :description "Audio input") - (segment :start 0 :duration 10))) - - (def beats-data (-> audio (analyze beats))) - (def bass-data (-> audio (analyze bass))) - - ; Alternate sources and bind invert intensity to bass - ; slice-on produces a list of segments, then we explicitly concat with normalization - ; video-b always gets sepia effect applied - (def segments (slice-on beats-data - :init 0 - :reducer (fn [acc i start end] - (let [idx (mod acc 3) - src (nth (list video-a video-b video-c) idx) - base-effects (list {:effect invert :intensity (bind bass-data :range [0 100])}) - effects (if (= idx 1) - (concat base-effects (list {:effect sepia})) - base-effects)] - {:source src - :effects effects - :acc (inc acc)})))) - - ; Concatenate with resize normalization (fit to width, pad height) - (def synced-video (-> segments - (sequence :resize-mode :fit :priority :width))) - - (mux synced-video audio)) diff --git a/recipe-bound.sexp b/recipe-bound.sexp deleted file mode 100644 index 17c73e5..0000000 --- a/recipe-bound.sexp +++ /dev/null @@ -1,67 +0,0 @@ -; Beat-synced video demonstrating all node types -; Uses: source, segment, effect, resize, blend, layer, sequence, mux, analyze -(recipe "bound-effects" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) - - ;; Analyzers - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - (analyzer bass :path "../artdag-analyzers/bass/analyzer.py") - - ;; Effects (all sexp-based now) - (effect invert :path "../artdag-effects/invert/effect.py") - (effect sepia :path "sexp_effects/effects/sepia.sexp") - (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") - (effect resize-frame :path "sexp_effects/effects/resize-frame.sexp") - (effect blend :path "sexp_effects/effects/blend.sexp") - (effect layer :path "sexp_effects/effects/layer.sexp") - - ;; Construct - (construct slice-on :path "constructs/slice-on.sexp") - - ;; Sources - (def video-a (source :path "monday.webm" :description "Video A")) - (def video-b (source :path "new.webm" :description "Video B")) - (def video-c (source :path "ecstacy.mp4" :description "Video C")) - - ;; Audio with segment - (def audio (-> (source :path "dizzy.mp3" :description "Audio input") - (segment :start 0 :duration 10))) - - ;; Analysis - (def beats-data (-> audio (analyze beats))) - (def bass-data (-> audio (analyze bass))) - - ;; Create an overlay track: video-c resized small, with color adjustment - (def overlay-track (-> video-c - (segment :start 0 :duration 10) - (resize 320 180 :mode "linear") - (effect color-adjust :brightness 20 :contrast 1.2 :saturation 1.5))) - - ;; Main sliced video with effects - (def segments (slice-on beats-data - :init 0 - :reducer (fn [acc i start end] - (let [idx (mod acc 3) - src (nth (list video-a video-b video-c) idx) - base-effects (list {:effect invert :intensity (bind bass-data :range [0 100])}) - effects (if (= idx 1) - (concat base-effects (list {:effect sepia})) - base-effects)] - {:source src - :effects effects - :acc (inc acc)})))) - - ;; Sequence the beat-sliced segments - (def synced-video (-> segments - (sequence :resize-mode :fit :priority :width))) - - ;; Blend overlay with main video (picture-in-picture style) - (def with-overlay (layer synced-video overlay-track :x 20 :y 20 :opacity 0.8)) - - ;; Final blend with original video-a for ghosting effect - (def final-video (-> video-a - (segment :start 0 :duration 10) - (blend with-overlay :mode "screen" :opacity 0.15))) - - (mux final-video audio)) diff --git a/recipe-construct.sexp b/recipe-construct.sexp deleted file mode 100644 index 82aac23..0000000 --- a/recipe-construct.sexp +++ /dev/null @@ -1,24 +0,0 @@ -; Test recipe using user-defined construct -(recipe "construct-test" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) - - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - (construct beat-alternate :path "constructs/beat-alternate.sexp") - - (def video-a (source :path "monday.webm" :description "Video A")) - (def video-b (source :path "new.webm" :description "Video B")) - - (def audio (-> (source :path "dizzy.mp3" :description "Audio input") - (segment :start 0 :duration 10))) - - (def beats-data (-> audio (analyze beats))) - - ;; Use the beat-alternate construct instead of slice-on - (def segments (beat-alternate beats-data (list video-a video-b))) - - ;; Concatenate with resize normalization - (def synced-video (-> segments - (sequence :resize-mode :fit :priority :width))) - - (mux synced-video audio)) diff --git a/recipe-lambda.sexp b/recipe-lambda.sexp deleted file mode 100644 index dd6db52..0000000 --- a/recipe-lambda.sexp +++ /dev/null @@ -1,28 +0,0 @@ -; Beat-synced video recipe with lambda reducer -(recipe "beat-sync-lambda" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac") - - ; Declare analyzers and effects - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - (effect invert :path "../artdag-effects/invert/effect.py") - - ; Sources - (def video (source :path "new.webm" :description "Video input")) - (def audio (-> (source :path "dizzy.mp3" :description "Audio input") - (segment :start 0 :duration 10))) - - ; Analyze audio for beats - (def analysis (-> audio (analyze beats))) - - ; Slice video at beat times using lambda reducer - (def synced-video (slice-on video analysis - :times times - :init 0 - :fn (fn [acc i start end] - {:source video - :effects (if (odd? i) (list invert) (list)) - :acc (inc acc)}))) - - ; Mux video with audio - (mux synced-video audio)) diff --git a/recipe-parametric.sexp b/recipe-parametric.sexp deleted file mode 100644 index 14f12f9..0000000 --- a/recipe-parametric.sexp +++ /dev/null @@ -1,33 +0,0 @@ -;; Parametric Effect Test -;; Demonstrates using command-line parameters for effect values -;; -;; Usage: -;; python3 plan.py recipe-parametric.sexp -p strength=3 -p amount=50 | python3 execute.py - -d . -o output.mp4 -;; -;; Parameters (with defaults): -;; strength - swirl strength (default: 2) -;; amount - brightness amount (default: 0) - -(recipe "parametric-test" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - ;; Effects - (effect swirl :path "sexp_effects/effects/swirl.sexp") - (effect brightness :path "sexp_effects/effects/brightness.sexp") - - ;; Source - (def video (source :path "monday.webm")) - (def audio (source :path "dizzy.mp3")) - - ;; 10 second segments - (def clip (-> video (segment :start 0 :duration 10))) - (def audio-clip (-> audio (segment :start 0 :duration 10))) - - ;; Apply effects with parameters (passed via -p flag) - ;; Parameters are injected as bindings before compilation - (def result (-> clip - (effect swirl :strength strength) - (effect brightness :amount amount))) - - (mux result audio-clip)) diff --git a/recipe-sexp-slice.sexp b/recipe-sexp-slice.sexp deleted file mode 100644 index 62243c0..0000000 --- a/recipe-sexp-slice.sexp +++ /dev/null @@ -1,30 +0,0 @@ -; Test recipe using S-expression slice-on construct -(recipe "sexp-slice-test" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) - - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - - ;; Import slice-on as a user-defined construct (not the built-in!) - (construct slice-on :path "constructs/slice-on.sexp") - - (def video-a (source :path "monday.webm" :description "Video A")) - (def video-b (source :path "new.webm" :description "Video B")) - - (def audio (-> (source :path "dizzy.mp3" :description "Audio input") - (segment :start 0 :duration 10))) - - (def beats-data (-> audio (analyze beats))) - - ;; Use the S-expression slice-on with a reducer function - (def segments (slice-on beats-data - :init 0 - :fn (fn [acc i start end] - {:source (nth (list video-a video-b) (mod acc 2)) - :effects (list) - :acc (inc acc)}))) - - (def synced-video (-> segments - (sequence :resize-mode :fit :priority :width))) - - (mux synced-video audio)) diff --git a/recipe-single-effect.sexp b/recipe-single-effect.sexp deleted file mode 100644 index 1892c95..0000000 --- a/recipe-single-effect.sexp +++ /dev/null @@ -1,94 +0,0 @@ -;; Single Effect Test - Static Effects -;; Change the effect line to test different effects -(recipe "single-effect-test" - :version "1.0" - :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) - - ;; Effects - (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") - (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") - (effect bloom :path "sexp_effects/effects/bloom.sexp") - (effect blur :path "sexp_effects/effects/blur.sexp") - (effect brightness :path "sexp_effects/effects/brightness.sexp") - (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") - (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") - (effect contrast :path "sexp_effects/effects/contrast.sexp") - (effect crt :path "sexp_effects/effects/crt.sexp") - (effect datamosh :path "sexp_effects/effects/datamosh.sexp") - (effect echo :path "sexp_effects/effects/echo.sexp") - (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") - (effect emboss :path "sexp_effects/effects/emboss.sexp") - (effect film_grain :path "sexp_effects/effects/film_grain.sexp") - (effect fisheye :path "sexp_effects/effects/fisheye.sexp") - (effect flip :path "sexp_effects/effects/flip.sexp") - (effect grayscale :path "sexp_effects/effects/grayscale.sexp") - (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") - (effect invert :path "sexp_effects/effects/invert.sexp") - (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") - (effect mirror :path "sexp_effects/effects/mirror.sexp") - (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") - (effect noise :path "sexp_effects/effects/noise.sexp") - (effect outline :path "sexp_effects/effects/outline.sexp") - (effect pixelate :path "sexp_effects/effects/pixelate.sexp") - (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") - (effect posterize :path "sexp_effects/effects/posterize.sexp") - (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") - (effect ripple :path "sexp_effects/effects/ripple.sexp") - (effect rotate :path "sexp_effects/effects/rotate.sexp") - (effect saturation :path "sexp_effects/effects/saturation.sexp") - (effect scanlines :path "sexp_effects/effects/scanlines.sexp") - (effect sepia :path "sexp_effects/effects/sepia.sexp") - (effect sharpen :path "sexp_effects/effects/sharpen.sexp") - (effect strobe :path "sexp_effects/effects/strobe.sexp") - (effect swirl :path "sexp_effects/effects/swirl.sexp") - (effect threshold :path "sexp_effects/effects/threshold.sexp") - (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") - (effect trails :path "sexp_effects/effects/trails.sexp") - (effect vignette :path "sexp_effects/effects/vignette.sexp") - (effect wave :path "sexp_effects/effects/wave.sexp") - (effect zoom :path "sexp_effects/effects/zoom.sexp") - - ;; Source - (def video (source :path "monday.webm")) - (def audio (source :path "dizzy.mp3")) - - ;; 10 second segments - (def clip (-> video (segment :start 0 :duration 10))) - (def audio-clip (-> audio (segment :start 0 :duration 10))) - - ;; ============================================================ - ;; CHANGE THE EFFECT LINE BELOW: - ;; (effect invert) (effect grayscale) (effect sepia) - ;; (effect brightness :amount 30) (effect contrast :amount 1.5) - ;; (effect saturation :amount 2.0) (effect hue_shift :degrees 90) - ;; (effect color_cycle :speed 2) (effect threshold :level 128) - ;; (effect posterize :levels 6) (effect blur :radius 10) - ;; (effect sharpen :amount 2) (effect bloom :intensity 0.6 :radius 20) - ;; (effect color-adjust :brightness 20 :contrast 1.2) - ;; (effect swirl :strength 3) (effect fisheye :strength 0.5) - ;; (effect wave :amplitude 30 :wavelength 60) - ;; (effect ripple :amplitude 20 :frequency 6) - ;; (effect kaleidoscope :segments 6 :rotation_speed 30) - ;; (effect zoom :factor 1.2) (effect rotate :angle 15) - ;; (effect mirror :direction "horizontal") - ;; (effect pixelate :block_size 16) - ;; (effect ascii_art :char_size 8 :color_mode "color") - ;; (effect ascii_zones :char_size 10) - ;; (effect edge_detect :low 50 :high 150) - ;; (effect emboss :strength 1.5) (effect outline :thickness 2) - ;; (effect neon_glow :glow_radius 20 :glow_intensity 2) - ;; (effect crt :line_spacing 3 :vignette_amount 0.3) - ;; (effect scanlines :spacing 3 :intensity 0.4) - ;; (effect film_grain :intensity 0.25) (effect vignette :strength 0.6) - ;; (effect noise :amount 40) (effect rgb_split :offset_x 20) - ;; (effect echo :num_echoes 4 :decay 0.5) - ;; (effect trails :persistence 0.7) (effect strobe :frequency 4) - ;; (effect flip :direction "horizontal") - ;; (effect tile_grid :rows 2 :cols 2) - ;; (effect pixelsort :threshold_low 30 :threshold_high 220) - ;; (effect datamosh :corruption 0.5 :block_size 24) - ;; ============================================================ - (def result (-> clip - (effect swirl :strength 3))) - - (mux result audio-clip)) diff --git a/recipe.sexp b/recipe.sexp deleted file mode 100644 index 0878bdc..0000000 --- a/recipe.sexp +++ /dev/null @@ -1,25 +0,0 @@ -; Beat-synced video recipe -(recipe "beat-sync" - :version "1.0" - :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac") - - ; Declare analyzers and effects - (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") - (effect invert :path "../artdag-effects/invert/effect.py") - - ; Sources - (def video (source :path "new.webm" :description "Video input")) - (def audio (-> (source :path "dizzy.mp3" :description "Audio input") - (segment :start 0 :duration 10))) - - ; Analyze audio for beats - (def analysis (-> audio (analyze beats))) - - ; Slice video at beat times, invert on odd beats - (def synced-video (slice-on video analysis - :times times - :effect invert - :pattern alternate)) - - ; Mux video with audio - (mux synced-video audio)) diff --git a/sexp_effects/effects/blend.sexp b/sexp_effects/effects/blend.sexp index ba5296d..bf7fefd 100644 --- a/sexp_effects/effects/blend.sexp +++ b/sexp_effects/effects/blend.sexp @@ -1,56 +1,31 @@ -;; Blend effect - combines two video streams -;; Multi-input effect: uses frame-a and frame-b +;; Blend effect - combines two video frames +;; Streaming-compatible: frame is background, overlay is second frame +;; Usage: (blend background overlay :opacity 0.5 :mode "alpha") +;; ;; Params: ;; mode - blend mode (add, multiply, screen, overlay, difference, lighten, darken, alpha) ;; opacity - blend amount (0-1) -;; resize_mode - how to resize frame-b to match frame-a (fit, crop, stretch) -;; priority - which dimension takes priority (width, height) -;; pad_color - color for padding in fit mode [r g b] -(require-primitives "image" "blending") +(require-primitives "image" "blending" "core") (define-effect blend :params ( - (mode :type string :default "overlay") + (overlay :type frame :default nil) + (mode :type string :default "alpha") (opacity :type float :default 0.5) - (resize_mode :type string :default "fit") - (priority :type string :default "width") - (pad_color :type list :default (quote [0 0 0])) ) - (let [a frame-a - a-w (width a) - a-h (height a) - b-raw frame-b - b-w (width b-raw) - b-h (height b-raw) - ;; Calculate scale based on resize mode and priority - scale-w (/ a-w b-w) - scale-h (/ a-h b-h) - scale (if (= resize_mode "stretch") - 1 ;; Will use explicit dimensions - (if (= resize_mode "crop") - (max scale-w scale-h) ;; Scale to cover, then crop - (if (= priority "width") - scale-w - scale-h))) - ;; For stretch, use target dimensions directly - new-w (if (= resize_mode "stretch") a-w (round (* b-w scale))) - new-h (if (= resize_mode "stretch") a-h (round (* b-h scale))) - ;; Resize b - b-resized (resize b-raw new-w new-h "linear") - ;; Handle fit (pad) or crop to exact size - b (if (= resize_mode "crop") - ;; Crop to center - (let [cx (/ (- new-w a-w) 2) - cy (/ (- new-h a-h) 2)] - (crop b-resized cx cy a-w a-h)) - (if (and (= resize_mode "fit") (or (!= new-w a-w) (!= new-h a-h))) - ;; Pad to center - (let [pad-x (/ (- a-w new-w) 2) - pad-y (/ (- a-h new-h) 2) - canvas (make-image a-w a-h pad_color)] - (paste canvas b-resized pad-x pad-y)) - b-resized))] - (if (= mode "alpha") - (blend-images a b opacity) - (blend-images a (blend-mode a b mode) opacity)))) + (if (core:is-nil overlay) + frame + (let [a frame + b overlay + a-h (image:height a) + a-w (image:width a) + b-h (image:height b) + b-w (image:width b) + ;; Resize b to match a if needed + b-sized (if (and (= a-w b-w) (= a-h b-h)) + b + (image:resize b a-w a-h "linear"))] + (if (= mode "alpha") + (blending:blend-images a b-sized opacity) + (blending:blend-images a (blending:blend-mode a b-sized mode) opacity))))) diff --git a/sexp_effects/effects/blend_multi.sexp b/sexp_effects/effects/blend_multi.sexp index ece540e..1ee160f 100644 --- a/sexp_effects/effects/blend_multi.sexp +++ b/sexp_effects/effects/blend_multi.sexp @@ -1,9 +1,9 @@ ;; N-way weighted blend effect -;; -;; Takes N input frames via `inputs` and N per-frame weights. -;; Produces a single frame: the normalised weighted composite. +;; Streaming-compatible: pass inputs as a list of frames +;; Usage: (blend_multi :inputs [(read a) (read b) (read c)] :weights [0.3 0.4 0.3]) ;; ;; Parameters: +;; inputs - list of N frames to blend ;; weights - list of N floats, one per input (resolved per-frame) ;; mode - blend mode applied when folding each frame in: ;; "alpha" — pure weighted average (default) @@ -30,14 +30,15 @@ (define-effect blend_multi :params ( - (weights :type list :default (quote [])) + (inputs :type list :default []) + (weights :type list :default []) (mode :type string :default "alpha") (resize_mode :type string :default "fit") ) (let [n (len inputs) ;; Target dimensions from first frame - target-w (width (nth inputs 0)) - target-h (height (nth inputs 0)) + target-w (image:width (nth inputs 0)) + target-h (image:height (nth inputs 0)) ;; Fold over indices 1..n-1 ;; Accumulator is (list blended-frame running-weight-sum) seed (list (nth inputs 0) (nth weights 0)) @@ -48,10 +49,10 @@ w (nth weights i) new-running (+ running w) opacity (/ w (max new-running 0.001)) - f (resize (nth inputs i) target-w target-h "linear") + f (image:resize (nth inputs i) target-w target-h "linear") ;; Apply blend mode then mix with opacity blended (if (= mode "alpha") - (blend-images acc f opacity) - (blend-images acc (blend-mode acc f mode) opacity))] + (blending:blend-images acc f opacity) + (blending:blend-images acc (blending:blend-mode acc f mode) opacity))] (list blended new-running))))] (nth result 0))) diff --git a/sexp_effects/effects/bloom.sexp b/sexp_effects/effects/bloom.sexp index 5a4b020..3524d01 100644 --- a/sexp_effects/effects/bloom.sexp +++ b/sexp_effects/effects/bloom.sexp @@ -1,4 +1,5 @@ ;; Bloom effect - glow on bright areas +(require-primitives "image" "blending") (define-effect bloom :params ( @@ -11,5 +12,5 @@ (if (> (luminance c) threshold) c (rgb 0 0 0))))) - (blurred (blur bright radius))) - (blend-mode frame blurred "add"))) + (blurred (image:blur bright radius))) + (blending:blend-mode frame blurred "add"))) diff --git a/sexp_effects/effects/blur.sexp b/sexp_effects/effects/blur.sexp index 37af07d..b71a55a 100644 --- a/sexp_effects/effects/blur.sexp +++ b/sexp_effects/effects/blur.sexp @@ -1,9 +1,8 @@ ;; Blur effect - gaussian blur - -(require-primitives "filters" "math") +(require-primitives "image") (define-effect blur :params ( (radius :type int :default 5 :range [1 50]) ) - (blur frame (max 1 radius))) + (image:blur frame (max 1 radius))) diff --git a/sexp_effects/effects/brightness.sexp b/sexp_effects/effects/brightness.sexp index 239a79d..4af53a7 100644 --- a/sexp_effects/effects/brightness.sexp +++ b/sexp_effects/effects/brightness.sexp @@ -1,10 +1,9 @@ ;; Brightness effect - adjusts overall brightness ;; Uses vectorized adjust primitive for fast processing - (require-primitives "color_ops") (define-effect brightness :params ( (amount :type int :default 0 :range [-255 255]) ) - (adjust frame amount 1)) + (color_ops:adjust-brightness frame amount)) diff --git a/sexp_effects/effects/color-adjust.sexp b/sexp_effects/effects/color-adjust.sexp index 3f598b3..5318bdd 100644 --- a/sexp_effects/effects/color-adjust.sexp +++ b/sexp_effects/effects/color-adjust.sexp @@ -1,4 +1,5 @@ ;; Color adjustment effect - replaces TRANSFORM node +(require-primitives "color_ops") (define-effect color-adjust :params ( @@ -7,5 +8,6 @@ (saturation :type float :default 1 :range [0 2] :desc "Saturation multiplier") ) (-> frame - (adjust :brightness brightness :contrast contrast) - (shift-hsv :s saturation))) + (color_ops:adjust-brightness brightness) + (color_ops:adjust-contrast contrast) + (color_ops:adjust-saturation saturation))) diff --git a/sexp_effects/effects/color_cycle.sexp b/sexp_effects/effects/color_cycle.sexp index f581ac0..e08dbb6 100644 --- a/sexp_effects/effects/color_cycle.sexp +++ b/sexp_effects/effects/color_cycle.sexp @@ -1,4 +1,5 @@ ;; Color Cycle effect - animated hue rotation +(require-primitives "color_ops") (define-effect color_cycle :params ( diff --git a/sexp_effects/effects/contrast.sexp b/sexp_effects/effects/contrast.sexp index 47f7903..660661d 100644 --- a/sexp_effects/effects/contrast.sexp +++ b/sexp_effects/effects/contrast.sexp @@ -1,10 +1,9 @@ ;; Contrast effect - adjusts image contrast ;; Uses vectorized adjust primitive for fast processing - (require-primitives "color_ops") (define-effect contrast :params ( (amount :type int :default 1 :range [0.5 3]) ) - (adjust frame 0 amount)) + (color_ops:adjust-contrast frame amount)) diff --git a/sexp_effects/effects/crt.sexp b/sexp_effects/effects/crt.sexp index 861d8a8..097eaf9 100644 --- a/sexp_effects/effects/crt.sexp +++ b/sexp_effects/effects/crt.sexp @@ -1,4 +1,5 @@ ;; CRT effect - old monitor simulation +(require-primitives "image") (define-effect crt :params ( @@ -6,8 +7,8 @@ (line_opacity :type float :default 0.3 :range [0 1]) (vignette_amount :type float :default 0.2) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (cx (/ w 2)) (cy (/ h 2)) (max-dist (sqrt (+ (* cx cx) (* cy cy))))) diff --git a/sexp_effects/effects/echo.sexp b/sexp_effects/effects/echo.sexp index 0528ab2..2aa2287 100644 --- a/sexp_effects/effects/echo.sexp +++ b/sexp_effects/effects/echo.sexp @@ -1,4 +1,5 @@ ;; Echo effect - motion trails using frame buffer +(require-primitives "blending") (define-effect echo :params ( @@ -15,4 +16,4 @@ (let ((result (copy frame))) ;; Simple blend of first two frames for now ;; Full version would fold over all frames - (blend-images frame (nth new-buffer 1) (* decay 0.5))))))) + (blending:blend-images frame (nth new-buffer 1) (* decay 0.5))))))) diff --git a/sexp_effects/effects/edge_detect.sexp b/sexp_effects/effects/edge_detect.sexp index aacc350..170befb 100644 --- a/sexp_effects/effects/edge_detect.sexp +++ b/sexp_effects/effects/edge_detect.sexp @@ -1,8 +1,9 @@ ;; Edge detection effect - highlights edges +(require-primitives "image") (define-effect edge_detect :params ( (low :type int :default 50 :range [10 100]) (high :type int :default 150 :range [50 300]) ) - (edges frame low high)) + (image:edge-detect frame low high)) diff --git a/sexp_effects/effects/emboss.sexp b/sexp_effects/effects/emboss.sexp index 2305c24..1eac3ce 100644 --- a/sexp_effects/effects/emboss.sexp +++ b/sexp_effects/effects/emboss.sexp @@ -1,4 +1,5 @@ ;; Emboss effect - creates raised/3D appearance +(require-primitives "blending") (define-effect emboss :params ( @@ -9,4 +10,4 @@ (list (- strength) 1 strength) (list 0 strength strength))) (embossed (convolve frame kernel))) - (blend-images embossed frame blend))) + (blending:blend-images embossed frame blend))) diff --git a/sexp_effects/effects/film_grain.sexp b/sexp_effects/effects/film_grain.sexp index 1f7b38b..29bdd75 100644 --- a/sexp_effects/effects/film_grain.sexp +++ b/sexp_effects/effects/film_grain.sexp @@ -1,4 +1,5 @@ ;; Film Grain effect - adds film grain texture +(require-primitives "core") (define-effect film_grain :params ( diff --git a/sexp_effects/effects/fisheye.sexp b/sexp_effects/effects/fisheye.sexp index d31935d..37750a7 100644 --- a/sexp_effects/effects/fisheye.sexp +++ b/sexp_effects/effects/fisheye.sexp @@ -1,4 +1,5 @@ ;; Fisheye effect - barrel/pincushion lens distortion +(require-primitives "geometry" "image") (define-effect fisheye :params ( @@ -7,9 +8,9 @@ (center_y :type float :default 0.5 :range [0 1]) (zoom_correct :type bool :default true) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (cx (* w center_x)) (cy (* h center_y)) - (coords (fisheye-displace w h strength cx cy zoom_correct))) - (remap frame (coords-x coords) (coords-y coords)))) + (coords (geometry:fisheye-coords w h strength cx cy zoom_correct))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/flip.sexp b/sexp_effects/effects/flip.sexp index c527113..977e1e1 100644 --- a/sexp_effects/effects/flip.sexp +++ b/sexp_effects/effects/flip.sexp @@ -1,4 +1,5 @@ ;; Flip effect - flips image horizontally or vertically +(require-primitives "geometry") (define-effect flip :params ( @@ -7,9 +8,9 @@ ) (let ((result frame)) (if horizontal - (set! result (flip-h result)) + (set! result (geometry:flip-img result "horizontal")) nil) (if vertical - (set! result (flip-v result)) + (set! result (geometry:flip-img result "vertical")) nil) result)) diff --git a/sexp_effects/effects/grayscale.sexp b/sexp_effects/effects/grayscale.sexp index 48c59ee..848f8a7 100644 --- a/sexp_effects/effects/grayscale.sexp +++ b/sexp_effects/effects/grayscale.sexp @@ -1,6 +1,7 @@ ;; Grayscale effect - converts to grayscale ;; Uses vectorized mix-gray primitive for fast processing +(require-primitives "image") (define-effect grayscale :params () - (mix-gray frame 1)) + (image:grayscale frame)) diff --git a/sexp_effects/effects/hue_shift.sexp b/sexp_effects/effects/hue_shift.sexp index fb6aa79..ab61bd6 100644 --- a/sexp_effects/effects/hue_shift.sexp +++ b/sexp_effects/effects/hue_shift.sexp @@ -9,4 +9,4 @@ (speed :type int :default 0 :desc "rotation per second") ) (let ((shift (+ degrees (* speed t)))) - (shift-hsv frame shift 1 1))) + (color_ops:shift-hsv frame shift 1 1))) diff --git a/sexp_effects/effects/invert.sexp b/sexp_effects/effects/invert.sexp index 8fd6b81..34936da 100644 --- a/sexp_effects/effects/invert.sexp +++ b/sexp_effects/effects/invert.sexp @@ -6,4 +6,4 @@ (define-effect invert :params ((amount :type float :default 1 :range [0 1])) - (if (> amount 0.5) (invert-img frame) frame)) + (if (> amount 0.5) (color_ops:invert-img frame) frame)) diff --git a/sexp_effects/effects/kaleidoscope.sexp b/sexp_effects/effects/kaleidoscope.sexp index 8a79937..9487ae2 100644 --- a/sexp_effects/effects/kaleidoscope.sexp +++ b/sexp_effects/effects/kaleidoscope.sexp @@ -1,4 +1,5 @@ ;; Kaleidoscope effect - mandala-like symmetry patterns +(require-primitives "geometry" "image") (define-effect kaleidoscope :params ( @@ -9,11 +10,11 @@ (center_y :type float :default 0.5 :range [0 1]) (zoom :type int :default 1 :range [0.5 3]) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (cx (* w center_x)) (cy (* h center_y)) ;; Total rotation including time-based animation (total_rot (+ rotation (* rotation_speed (or _time 0)))) - (coords (kaleidoscope-displace w h segments total_rot cx cy zoom))) - (remap frame (coords-x coords) (coords-y coords)))) + (coords (geometry:kaleidoscope-coords w h segments total_rot cx cy zoom))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/layer.sexp b/sexp_effects/effects/layer.sexp index 30ba927..e57d627 100644 --- a/sexp_effects/effects/layer.sexp +++ b/sexp_effects/effects/layer.sexp @@ -1,28 +1,36 @@ -;; Layer effect - composite frame-b over frame-a at position -;; Multi-input effect: uses frame-a (background) and frame-b (overlay) -;; Params: x, y (position), opacity (0-1), mode (blend mode) +;; Layer effect - composite overlay over background at position +;; Streaming-compatible: frame is background, overlay is foreground +;; Usage: (layer background overlay :x 10 :y 20 :opacity 0.8) +;; +;; Params: +;; overlay - frame to composite on top +;; x, y - position to place overlay +;; opacity - blend amount (0-1) +;; mode - blend mode (alpha, multiply, screen, etc.) -(require-primitives "image" "blending") +(require-primitives "image" "blending" "core") (define-effect layer :params ( + (overlay :type frame :default nil) (x :type int :default 0) (y :type int :default 0) (opacity :type float :default 1.0) (mode :type string :default "alpha") ) - (let [bg (copy frame-a) - fg frame-b - ;; Resize fg if needed to fit - fg-w (width fg) - fg-h (height fg)] - (if (= opacity 1.0) - ;; Simple paste - (paste bg fg x y) - ;; Blend with opacity - (let [blended (if (= mode "alpha") - (blend-images (crop bg x y fg-w fg-h) fg opacity) - (blend-images (crop bg x y fg-w fg-h) - (blend-mode (crop bg x y fg-w fg-h) fg mode) - opacity))] - (paste bg blended x y))))) + (if (core:is-nil overlay) + frame + (let [bg (copy frame) + fg overlay + fg-w (image:width fg) + fg-h (image:height fg)] + (if (= opacity 1.0) + ;; Simple paste + (paste bg fg x y) + ;; Blend with opacity + (let [blended (if (= mode "alpha") + (blending:blend-images (image:crop bg x y fg-w fg-h) fg opacity) + (blending:blend-images (image:crop bg x y fg-w fg-h) + (blending:blend-mode (image:crop bg x y fg-w fg-h) fg mode) + opacity))] + (paste bg blended x y)))))) diff --git a/sexp_effects/effects/mirror.sexp b/sexp_effects/effects/mirror.sexp index f1f4c5a..a450cb6 100644 --- a/sexp_effects/effects/mirror.sexp +++ b/sexp_effects/effects/mirror.sexp @@ -1,32 +1,33 @@ ;; Mirror effect - mirrors half of image +(require-primitives "geometry" "image") (define-effect mirror :params ( (mode :type string :default "left_right") ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (hw (floor (/ w 2))) (hh (floor (/ h 2)))) (cond ((= mode "left_right") - (let ((left (crop frame 0 0 hw h)) + (let ((left (image:crop frame 0 0 hw h)) (result (copy frame))) - (paste result (flip-h left) hw 0))) + (paste result (geometry:flip-img left "horizontal") hw 0))) ((= mode "right_left") - (let ((right (crop frame hw 0 hw h)) + (let ((right (image:crop frame hw 0 hw h)) (result (copy frame))) - (paste result (flip-h right) 0 0))) + (paste result (geometry:flip-img right "horizontal") 0 0))) ((= mode "top_bottom") - (let ((top (crop frame 0 0 w hh)) + (let ((top (image:crop frame 0 0 w hh)) (result (copy frame))) - (paste result (flip-v top) 0 hh))) + (paste result (geometry:flip-img top "vertical") 0 hh))) ((= mode "bottom_top") - (let ((bottom (crop frame 0 hh w hh)) + (let ((bottom (image:crop frame 0 hh w hh)) (result (copy frame))) - (paste result (flip-v bottom) 0 0))) + (paste result (geometry:flip-img bottom "vertical") 0 0))) (else frame)))) diff --git a/sexp_effects/effects/neon_glow.sexp b/sexp_effects/effects/neon_glow.sexp index 26e5462..39245ab 100644 --- a/sexp_effects/effects/neon_glow.sexp +++ b/sexp_effects/effects/neon_glow.sexp @@ -1,4 +1,5 @@ ;; Neon Glow effect - glowing edge effect +(require-primitives "image" "blending") (define-effect neon_glow :params ( @@ -8,15 +9,15 @@ (glow_intensity :type int :default 2 :range [0.5 5]) (background :type float :default 0.3 :range [0 1]) ) - (let* ((edge-img (edges frame edge_low edge_high)) - (glow (blur edge-img glow_radius)) + (let* ((edge-img (image:edge-detect frame edge_low edge_high)) + (glow (image:blur edge-img glow_radius)) ;; Intensify the glow (bright-glow (map-pixels glow (lambda (x y c) (rgb (clamp (* (red c) glow_intensity) 0 255) (clamp (* (green c) glow_intensity) 0 255) (clamp (* (blue c) glow_intensity) 0 255)))))) - (blend-mode (blend-images frame (make-image (width frame) (height frame) (list 0 0 0)) + (blending:blend-mode (blending:blend-images frame (make-image (image:width frame) (image:height frame) (list 0 0 0)) (- 1 background)) bright-glow "screen"))) diff --git a/sexp_effects/effects/outline.sexp b/sexp_effects/effects/outline.sexp index b9127e0..276f891 100644 --- a/sexp_effects/effects/outline.sexp +++ b/sexp_effects/effects/outline.sexp @@ -1,4 +1,5 @@ ;; Outline effect - shows only edges +(require-primitives "image") (define-effect outline :params ( @@ -7,14 +8,14 @@ (color :type list :default (list 0 0 0) ) (fill_mode "original")) - (let* ((edge-img (edges frame (/ threshold 2) threshold)) + (let* ((edge-img (image:edge-detect frame (/ threshold 2) threshold)) (dilated (if (> thickness 1) (dilate edge-img thickness) edge-img)) (base (cond ((= fill_mode "original") (copy frame)) - ((= fill_mode "white") (make-image (width frame) (height frame) (list 255 255 255))) - (else (make-image (width frame) (height frame) (list 0 0 0)))))) + ((= fill_mode "white") (make-image (image:width frame) (image:height frame) (list 255 255 255))) + (else (make-image (image:width frame) (image:height frame) (list 0 0 0)))))) (map-pixels base (lambda (x y c) (let ((edge-val (luminance (pixel dilated x y)))) diff --git a/sexp_effects/effects/pixelate.sexp b/sexp_effects/effects/pixelate.sexp index 0abacdf..3d28ce1 100644 --- a/sexp_effects/effects/pixelate.sexp +++ b/sexp_effects/effects/pixelate.sexp @@ -1,12 +1,13 @@ ;; Pixelate effect - creates blocky pixels +(require-primitives "image") (define-effect pixelate :params ( (block_size :type int :default 8 :range [2 64]) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (small-w (max 1 (floor (/ w block_size)))) (small-h (max 1 (floor (/ h block_size)))) - (small (resize frame small-w small-h "area"))) - (resize small w h "nearest"))) + (small (image:resize frame small-w small-h "area"))) + (image:resize small w h "nearest"))) diff --git a/sexp_effects/effects/posterize.sexp b/sexp_effects/effects/posterize.sexp index 1063e80..7052ed3 100644 --- a/sexp_effects/effects/posterize.sexp +++ b/sexp_effects/effects/posterize.sexp @@ -1,12 +1,8 @@ ;; Posterize effect - reduces color levels +(require-primitives "color_ops") (define-effect posterize :params ( (levels :type int :default 8 :range [2 32]) ) - (let ((step (floor (/ 256 levels)))) - (map-pixels frame - (lambda (x y c) - (rgb (* (floor (/ (red c) step)) step) - (* (floor (/ (green c) step)) step) - (* (floor (/ (blue c) step)) step)))))) + (color_ops:posterize frame levels)) diff --git a/sexp_effects/effects/resize-frame.sexp b/sexp_effects/effects/resize-frame.sexp index a337865..a1cce27 100644 --- a/sexp_effects/effects/resize-frame.sexp +++ b/sexp_effects/effects/resize-frame.sexp @@ -1,5 +1,6 @@ ;; Resize effect - replaces RESIZE node ;; Note: uses target-w/target-h to avoid conflict with width/height primitives +(require-primitives "image") (define-effect resize-frame :params ( @@ -7,4 +8,4 @@ (target-h :type int :default 480 :desc "Target height in pixels") (mode :type string :default "linear" :choices [linear nearest area] :desc "Interpolation mode") ) - (resize frame target-w target-h mode)) + (image:resize frame target-w target-h mode)) diff --git a/sexp_effects/effects/ripple.sexp b/sexp_effects/effects/ripple.sexp index 9d54789..0bb7a8d 100644 --- a/sexp_effects/effects/ripple.sexp +++ b/sexp_effects/effects/ripple.sexp @@ -10,10 +10,10 @@ (decay :type int :default 1 :range [0 5]) (speed :type int :default 1 :range [0 10]) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (cx (* w center_x)) (cy (* h center_y)) - (phase (* (or _time 0) speed 2 pi)) - (coords (ripple-displace w h frequency amplitude cx cy decay phase))) - (remap frame (coords-x coords) (coords-y coords)))) + (phase (* (or t 0) speed 2 pi)) + (coords (geometry:ripple-displace w h frequency amplitude cx cy decay phase))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/rotate.sexp b/sexp_effects/effects/rotate.sexp index d886c5a..d06c2f7 100644 --- a/sexp_effects/effects/rotate.sexp +++ b/sexp_effects/effects/rotate.sexp @@ -8,4 +8,4 @@ (speed :type int :default 0 :desc "rotation per second") ) (let ((total-angle (+ angle (* speed t)))) - (rotate-img frame total-angle))) + (geometry:rotate-img frame total-angle))) diff --git a/sexp_effects/effects/saturation.sexp b/sexp_effects/effects/saturation.sexp index 452d581..9852dc7 100644 --- a/sexp_effects/effects/saturation.sexp +++ b/sexp_effects/effects/saturation.sexp @@ -1,10 +1,9 @@ ;; Saturation effect - adjusts color saturation ;; Uses vectorized shift-hsv primitive for fast processing - (require-primitives "color_ops") (define-effect saturation :params ( (amount :type int :default 1 :range [0 3]) ) - (shift-hsv frame 0 amount 1)) + (color_ops:adjust-saturation frame amount)) diff --git a/sexp_effects/effects/scanlines.sexp b/sexp_effects/effects/scanlines.sexp index f46a8c3..ddfcf44 100644 --- a/sexp_effects/effects/scanlines.sexp +++ b/sexp_effects/effects/scanlines.sexp @@ -1,4 +1,5 @@ ;; Scanlines effect - VHS-style horizontal line shifting +(require-primitives "core") (define-effect scanlines :params ( @@ -9,6 +10,6 @@ (map-rows frame (lambda (y row) (let* ((sine-shift (* amplitude (sin (/ (* y 6.28) (max 1 frequency))))) - (rand-shift (random (- amplitude) amplitude)) + (rand-shift (core:rand-range (- amplitude) amplitude)) (shift (floor (lerp sine-shift rand-shift randomness)))) (roll row shift 0))))) diff --git a/sexp_effects/effects/sepia.sexp b/sexp_effects/effects/sepia.sexp index 2fd6666..e3a5875 100644 --- a/sexp_effects/effects/sepia.sexp +++ b/sexp_effects/effects/sepia.sexp @@ -1,9 +1,7 @@ ;; Sepia effect - applies sepia tone ;; Classic warm vintage look +(require-primitives "color_ops") (define-effect sepia :params () - (color-matrix frame - (list (list 0.393 0.769 0.189) - (list 0.349 0.686 0.168) - (list 0.272 0.534 0.131)))) + (color_ops:sepia frame)) diff --git a/sexp_effects/effects/sharpen.sexp b/sexp_effects/effects/sharpen.sexp index 81dc72e..538bd7f 100644 --- a/sexp_effects/effects/sharpen.sexp +++ b/sexp_effects/effects/sharpen.sexp @@ -1,10 +1,8 @@ ;; Sharpen effect - sharpens edges +(require-primitives "image") (define-effect sharpen :params ( (amount :type int :default 1 :range [0 5]) ) - (let ((kernel (list (list 0 (- amount) 0) - (list (- amount) (+ 1 (* 4 amount)) (- amount)) - (list 0 (- amount) 0)))) - (convolve frame kernel))) + (image:sharpen frame amount)) diff --git a/sexp_effects/effects/strobe.sexp b/sexp_effects/effects/strobe.sexp index 0825d3c..e51ba30 100644 --- a/sexp_effects/effects/strobe.sexp +++ b/sexp_effects/effects/strobe.sexp @@ -1,4 +1,5 @@ ;; Strobe effect - holds frames for choppy look +(require-primitives "core") (define-effect strobe :params ( @@ -7,7 +8,7 @@ (let* ((held (state-get 'held nil)) (held-until (state-get 'held-until 0)) (frame-duration (/ 1 frame_rate))) - (if (or (= held nil) (>= t held-until)) + (if (or (core:is-nil held) (>= t held-until)) (begin (state-set 'held (copy frame)) (state-set 'held-until (+ t frame-duration)) diff --git a/sexp_effects/effects/swirl.sexp b/sexp_effects/effects/swirl.sexp index 254f92c..ba9cf57 100644 --- a/sexp_effects/effects/swirl.sexp +++ b/sexp_effects/effects/swirl.sexp @@ -1,4 +1,5 @@ ;; Swirl effect - spiral vortex distortion +(require-primitives "geometry" "image") (define-effect swirl :params ( @@ -8,9 +9,9 @@ (center_y :type float :default 0.5 :range [0 1]) (falloff :type string :default "quadratic") ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (cx (* w center_x)) (cy (* h center_y)) - (coords (swirl-displace w h strength radius cx cy falloff))) - (remap frame (coords-x coords) (coords-y coords)))) + (coords (geometry:swirl-coords w h strength radius cx cy falloff))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/threshold.sexp b/sexp_effects/effects/threshold.sexp index 4f8f115..50d3bc5 100644 --- a/sexp_effects/effects/threshold.sexp +++ b/sexp_effects/effects/threshold.sexp @@ -1,14 +1,9 @@ ;; Threshold effect - converts to black and white +(require-primitives "color_ops") (define-effect threshold :params ( (level :type int :default 128 :range [0 255]) (invert :type bool :default false) ) - (map-pixels frame - (lambda (x y c) - (let* ((lum (luminance c)) - (above (if invert (< lum level) (> lum level)))) - (if above - (rgb 255 255 255) - (rgb 0 0 0)))))) + (color_ops:threshold frame level invert)) diff --git a/sexp_effects/effects/tile_grid.sexp b/sexp_effects/effects/tile_grid.sexp index 95ea769..44487a9 100644 --- a/sexp_effects/effects/tile_grid.sexp +++ b/sexp_effects/effects/tile_grid.sexp @@ -1,4 +1,5 @@ ;; Tile Grid effect - tiles image in grid +(require-primitives "geometry" "image") (define-effect tile_grid :params ( @@ -6,11 +7,11 @@ (cols :type int :default 2 :range [1 10]) (gap :type int :default 0 :range [0 50]) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (tile-w (floor (/ (- w (* gap (- cols 1))) cols))) (tile-h (floor (/ (- h (* gap (- rows 1))) rows))) - (tile (resize frame tile-w tile-h "area")) + (tile (image:resize frame tile-w tile-h "area")) (result (make-image w h (list 0 0 0)))) (begin ;; Manually place tiles using nested iteration diff --git a/sexp_effects/effects/trails.sexp b/sexp_effects/effects/trails.sexp index b0752e7..f16c302 100644 --- a/sexp_effects/effects/trails.sexp +++ b/sexp_effects/effects/trails.sexp @@ -1,4 +1,5 @@ ;; Trails effect - persistent motion trails +(require-primitives "image" "blending") (define-effect trails :params ( @@ -10,10 +11,10 @@ (begin (state-set 'buffer (copy frame)) frame) - (let* ((faded (blend-images buffer - (make-image (width frame) (height frame) (list 0 0 0)) + (let* ((faded (blending:blend-images buffer + (make-image (image:width frame) (image:height frame) (list 0 0 0)) (- 1 persistence))) - (result (blend-mode faded current "lighten"))) + (result (blending:blend-mode faded current "lighten"))) (begin (state-set 'buffer result) result))))) diff --git a/sexp_effects/effects/vignette.sexp b/sexp_effects/effects/vignette.sexp index 6c1cd02..46e63ee 100644 --- a/sexp_effects/effects/vignette.sexp +++ b/sexp_effects/effects/vignette.sexp @@ -1,12 +1,13 @@ ;; Vignette effect - darkens corners +(require-primitives "image") (define-effect vignette :params ( (strength :type float :default 0.5 :range [0 1]) (radius :type int :default 1 :range [0.5 2]) ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) (cx (/ w 2)) (cy (/ h 2)) (max-dist (* (sqrt (+ (* cx cx) (* cy cy))) radius))) diff --git a/sexp_effects/effects/wave.sexp b/sexp_effects/effects/wave.sexp index 98246e2..98b03c2 100644 --- a/sexp_effects/effects/wave.sexp +++ b/sexp_effects/effects/wave.sexp @@ -1,4 +1,5 @@ ;; Wave effect - sine wave displacement distortion +(require-primitives "geometry" "image") (define-effect wave :params ( @@ -7,8 +8,8 @@ (speed :type int :default 1 :range [0 10]) (direction :type string :default "horizontal") ) - (let* ((w (width frame)) - (h (height frame)) + (let* ((w (image:width frame)) + (h (image:height frame)) ;; Use _time for animation phase (phase (* (or _time 0) speed 2 pi)) ;; Calculate frequency: waves per dimension @@ -17,5 +18,5 @@ ((= direction "horizontal") "x") ((= direction "vertical") "y") (else "both"))) - (coords (wave-displace w h axis freq amplitude phase))) - (remap frame (coords-x coords) (coords-y coords)))) + (coords (geometry:wave-coords w h axis freq amplitude phase))) + (geometry:remap frame (geometry:coords-x coords) (geometry:coords-y coords)))) diff --git a/sexp_effects/effects/zoom.sexp b/sexp_effects/effects/zoom.sexp index 0e94234..6e4b9ff 100644 --- a/sexp_effects/effects/zoom.sexp +++ b/sexp_effects/effects/zoom.sexp @@ -5,4 +5,4 @@ :params ( (amount :type int :default 1 :range [0.1 5]) ) - (scale-img frame amount amount)) + (geometry:scale-img frame amount amount)) diff --git a/sexp_effects/primitive_libs/core.py b/sexp_effects/primitive_libs/core.py index 3220e5e..352cbd3 100644 --- a/sexp_effects/primitive_libs/core.py +++ b/sexp_effects/primitive_libs/core.py @@ -189,6 +189,30 @@ def prim_range(*args): return [] +# Random +import random +_rng = random.Random() + +def prim_rand(): + """Return random float in [0, 1).""" + return _rng.random() + +def prim_rand_int(lo, hi): + """Return random integer in [lo, hi].""" + return _rng.randint(int(lo), int(hi)) + +def prim_rand_range(lo, hi): + """Return random float in [lo, hi).""" + return lo + _rng.random() * (hi - lo) + +def prim_map_range(val, from_lo, from_hi, to_lo, to_hi): + """Map value from one range to another.""" + if from_hi == from_lo: + return to_lo + t = (val - from_lo) / (from_hi - from_lo) + return to_lo + t * (to_hi - to_lo) + + # Core primitives dict PRIMITIVES = { # Arithmetic @@ -231,10 +255,17 @@ PRIMITIVES = { 'list?': prim_is_list, 'dict?': prim_is_dict, 'nil?': prim_is_nil, + 'is-nil': prim_is_nil, # Higher-order / iteration 'reduce': prim_reduce, 'fold': prim_reduce, 'map': prim_map, 'range': prim_range, + + # Random + 'rand': prim_rand, + 'rand-int': prim_rand_int, + 'rand-range': prim_rand_range, + 'map-range': prim_map_range, } diff --git a/sexp_effects/primitive_libs/streaming.py b/sexp_effects/primitive_libs/streaming.py new file mode 100644 index 0000000..3e2fc51 --- /dev/null +++ b/sexp_effects/primitive_libs/streaming.py @@ -0,0 +1,304 @@ +""" +Streaming primitives for video/audio processing. + +These primitives handle video source reading and audio analysis, +keeping the interpreter completely generic. +""" + +import numpy as np +import subprocess +import json +from pathlib import Path + + +class VideoSource: + """Video source with persistent streaming pipe for fast sequential reads.""" + + def __init__(self, path: str, fps: float = 30): + self.path = Path(path) + self.fps = fps # Output fps for the stream + self._frame_size = None + self._duration = None + self._proc = None # Persistent ffmpeg process + self._stream_time = 0.0 # Current position in stream + self._frame_time = 1.0 / fps # Time per frame at output fps + self._last_read_time = -1 + self._cached_frame = None + + # Get video info + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(self.path)] + result = subprocess.run(cmd, capture_output=True, text=True) + info = json.loads(result.stdout) + + for stream in info.get("streams", []): + if stream.get("codec_type") == "video": + self._frame_size = (stream.get("width", 720), stream.get("height", 720)) + # Try direct duration field first + if "duration" in stream: + self._duration = float(stream["duration"]) + # Fall back to tags.DURATION (webm format: "00:01:00.124000000") + elif "tags" in stream and "DURATION" in stream["tags"]: + dur_str = stream["tags"]["DURATION"] + parts = dur_str.split(":") + if len(parts) == 3: + h, m, s = parts + self._duration = int(h) * 3600 + int(m) * 60 + float(s) + break + + if not self._frame_size: + self._frame_size = (720, 720) + + def _start_stream(self, seek_time: float = 0): + """Start or restart the ffmpeg streaming process.""" + if self._proc: + self._proc.kill() + self._proc = None + + w, h = self._frame_size + cmd = [ + "ffmpeg", "-v", "quiet", + "-ss", f"{seek_time:.3f}", + "-i", str(self.path), + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{w}x{h}", + "-r", str(self.fps), # Output at specified fps + "-" + ] + self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + self._stream_time = seek_time + + def _read_frame_from_stream(self) -> np.ndarray: + """Read one frame from the stream.""" + w, h = self._frame_size + frame_size = w * h * 3 + + if not self._proc or self._proc.poll() is not None: + return None + + data = self._proc.stdout.read(frame_size) + if len(data) < frame_size: + return None + + return np.frombuffer(data, dtype=np.uint8).reshape((h, w, 3)).copy() + + def read(self) -> np.ndarray: + """Read frame (uses last cached or t=0).""" + if self._cached_frame is not None: + return self._cached_frame + return self.read_at(0) + + def read_at(self, t: float) -> np.ndarray: + """Read frame at specific time using streaming with smart seeking.""" + # Cache check - return same frame for same time + if t == self._last_read_time and self._cached_frame is not None: + return self._cached_frame + + w, h = self._frame_size + + # Loop time if video is shorter + seek_time = t + if self._duration and self._duration > 0: + seek_time = t % self._duration + + # Decide whether to seek or continue streaming + # Seek if: no stream, going backwards (more than 1 frame), or jumping more than 2 seconds ahead + # Allow small backward tolerance to handle floating point and timing jitter + need_seek = ( + self._proc is None or + self._proc.poll() is not None or + seek_time < self._stream_time - self._frame_time or # More than 1 frame backward + seek_time > self._stream_time + 2.0 + ) + + if need_seek: + import sys + reason = "no proc" if self._proc is None else "proc dead" if self._proc.poll() is not None else "backward" if seek_time < self._stream_time else "jump" + print(f"SEEK {self.path.name}: t={t:.4f} seek={seek_time:.4f} stream={self._stream_time:.4f} ({reason})", file=sys.stderr) + self._start_stream(seek_time) + + # Skip frames to reach target time + while self._stream_time + self._frame_time <= seek_time: + frame = self._read_frame_from_stream() + if frame is None: + # Stream ended, restart from seek point + self._start_stream(seek_time) + break + self._stream_time += self._frame_time + + # Read the target frame + frame = self._read_frame_from_stream() + if frame is None: + import sys + print(f"NULL FRAME {self.path.name}: t={t:.2f} seek={seek_time:.2f}", file=sys.stderr) + frame = np.zeros((h, w, 3), dtype=np.uint8) + else: + self._stream_time += self._frame_time + + self._last_read_time = t + self._cached_frame = frame + return frame + + def skip(self): + """No-op for seek-based reading.""" + pass + + @property + def size(self): + return self._frame_size + + def close(self): + if self._proc: + self._proc.kill() + self._proc = None + + +class AudioAnalyzer: + """Audio analyzer for energy and beat detection.""" + + def __init__(self, path: str, sample_rate: int = 22050): + self.path = Path(path) + self.sample_rate = sample_rate + + # Load audio via ffmpeg + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"] + result = subprocess.run(cmd, capture_output=True) + self._audio = np.frombuffer(result.stdout, dtype=np.float32) + + # Get duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + self.duration = float(info.get("format", {}).get("duration", 60)) + + # Beat detection state + self._flux_history = [] + self._last_beat_time = -1 + self._beat_count = 0 + self._last_beat_check_time = -1 + # Cache beat result for current time (so multiple scans see same result) + self._beat_cache_time = -1 + self._beat_cache_result = False + + def get_energy(self, t: float) -> float: + """Get energy level at time t (0-1).""" + idx = int(t * self.sample_rate) + start = max(0, idx - 512) + end = min(len(self._audio), idx + 512) + if start >= end: + return 0.0 + return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0) + + def get_beat(self, t: float) -> bool: + """Check if there's a beat at time t.""" + # Return cached result if same time (multiple scans query same frame) + if t == self._beat_cache_time: + return self._beat_cache_result + + idx = int(t * self.sample_rate) + size = 2048 + + start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2) + if end - start < size/2: + self._beat_cache_time = t + self._beat_cache_result = False + return False + curr = self._audio[start:end] + + pstart, pend = max(0, start - 512), max(0, end - 512) + if pend <= pstart: + self._beat_cache_time = t + self._beat_cache_result = False + return False + prev = self._audio[pstart:pend] + + curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr)))) + prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev)))) + + n = min(len(curr_spec), len(prev_spec)) + flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1) + + self._flux_history.append((t, flux)) + if len(self._flux_history) > 50: + self._flux_history = self._flux_history[-50:] + + if len(self._flux_history) < 5: + self._beat_cache_time = t + self._beat_cache_result = False + return False + + recent = [f for _, f in self._flux_history[-20:]] + threshold = np.mean(recent) + 1.5 * np.std(recent) + + is_beat = flux > threshold and (t - self._last_beat_time) > 0.1 + if is_beat: + self._last_beat_time = t + if t > self._last_beat_check_time: + self._beat_count += 1 + self._last_beat_check_time = t + + # Cache result for this time + self._beat_cache_time = t + self._beat_cache_result = is_beat + return is_beat + + def get_beat_count(self, t: float) -> int: + """Get cumulative beat count up to time t.""" + # Ensure beat detection has run up to this time + self.get_beat(t) + return self._beat_count + + +# === Primitives === + +def prim_make_video_source(path: str, fps: float = 30): + """Create a video source from a file path.""" + return VideoSource(path, fps) + + +def prim_source_read(source: VideoSource, t: float = None): + """Read a frame from a video source.""" + import sys + if t is not None: + frame = source.read_at(t) + # Debug: show source and time + if int(t * 10) % 10 == 0: # Every second + print(f"READ {source.path.name}: t={t:.2f} stream={source._stream_time:.2f}", file=sys.stderr) + return frame + return source.read() + + +def prim_source_skip(source: VideoSource): + """Skip a frame (keep pipe in sync).""" + source.skip() + + +def prim_source_size(source: VideoSource): + """Get (width, height) of source.""" + return source.size + + +def prim_make_audio_analyzer(path: str): + """Create an audio analyzer from a file path.""" + return AudioAnalyzer(path) + + +def prim_audio_energy(analyzer: AudioAnalyzer, t: float) -> float: + """Get energy level (0-1) at time t.""" + return analyzer.get_energy(t) + + +def prim_audio_beat(analyzer: AudioAnalyzer, t: float) -> bool: + """Check if there's a beat at time t.""" + return analyzer.get_beat(t) + + +def prim_audio_beat_count(analyzer: AudioAnalyzer, t: float) -> int: + """Get cumulative beat count up to time t.""" + return analyzer.get_beat_count(t) + + +def prim_audio_duration(analyzer: AudioAnalyzer) -> float: + """Get audio duration in seconds.""" + return analyzer.duration diff --git a/streaming/output.py b/streaming/output.py index 6b3064b..c273bd1 100644 --- a/streaming/output.py +++ b/streaming/output.py @@ -167,6 +167,8 @@ class FileOutput(Output): # Add audio input if provided if audio_source: cmd.extend(["-i", str(audio_source)]) + # Explicitly map: video from input 0 (rawvideo), audio from input 1 + cmd.extend(["-map", "0:v", "-map", "1:a"]) cmd.extend([ "-c:v", codec, @@ -181,10 +183,12 @@ class FileOutput(Output): cmd.append(str(self.path)) + import sys + print(f"FileOutput cmd: {' '.join(cmd)}", file=sys.stderr) self._process = subprocess.Popen( cmd, stdin=subprocess.PIPE, - stderr=subprocess.DEVNULL, + stderr=None, # Show errors for debugging ) def write(self, frame: np.ndarray, t: float): @@ -257,3 +261,109 @@ class NullOutput(Output): @property def is_open(self) -> bool: return self._is_open + + +class PipeOutput(Output): + """ + Pipe frames directly to mpv. + + Launches mpv with rawvideo demuxer and writes frames to stdin. + """ + + def __init__(self, size: Tuple[int, int], fps: float = 30, audio_source: str = None): + self.size = size + self.fps = fps + self.audio_source = audio_source + self._is_open = True + self._process = None + self._audio_process = None + self._started = False + + def _start(self): + """Start mpv and audio on first frame.""" + if self._started: + return + self._started = True + + import sys + w, h = self.size + + # Start mpv + cmd = [ + "mpv", "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={self.fps}", + "--title=Streaming", + "-" + ] + print(f"Starting mpv: {w}x{h} @ {self.fps}fps", file=sys.stderr) + self._process = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stderr=subprocess.DEVNULL, + ) + + # Start audio + if self.audio_source: + audio_cmd = [ + "ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + str(self.audio_source) + ] + print(f"Starting audio: {self.audio_source}", file=sys.stderr) + self._audio_process = subprocess.Popen( + audio_cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + + def write(self, frame: np.ndarray, t: float): + """Write frame to mpv.""" + if not self._is_open: + return + + self._start() + + # Check mpv still running + if self._process.poll() is not None: + self._is_open = False + return + + # Resize if needed + if frame.shape[1] != self.size[0] or frame.shape[0] != self.size[1]: + import cv2 + frame = cv2.resize(frame, self.size) + + # Ensure correct format + if frame.dtype != np.uint8: + frame = np.clip(frame, 0, 255).astype(np.uint8) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + + try: + self._process.stdin.write(frame.tobytes()) + self._process.stdin.flush() + except BrokenPipeError: + self._is_open = False + + def close(self): + """Close mpv and audio.""" + if self._process: + try: + self._process.stdin.close() + except: + pass + self._process.terminate() + self._process.wait() + if self._audio_process: + self._audio_process.terminate() + self._audio_process.wait() + self._is_open = False + + @property + def is_open(self) -> bool: + if self._process and self._process.poll() is not None: + self._is_open = False + return self._is_open diff --git a/streaming/pipeline.py b/streaming/pipeline.py new file mode 100644 index 0000000..29dd7e1 --- /dev/null +++ b/streaming/pipeline.py @@ -0,0 +1,846 @@ +""" +Streaming pipeline executor. + +Directly executes compiled sexp recipes frame-by-frame. +No adapter layer - frames and analysis flow through the DAG. +""" + +import sys +import time +import numpy as np +from pathlib import Path +from typing import Dict, List, Any, Optional +from dataclasses import dataclass, field + +from .sources import VideoSource +from .audio import StreamingAudioAnalyzer +from .output import DisplayOutput, FileOutput +from .sexp_interp import SexpInterpreter + + +@dataclass +class FrameContext: + """Context passed through the pipeline for each frame.""" + t: float # Current time + energy: float = 0.0 + is_beat: bool = False + beat_count: int = 0 + analysis: Dict[str, Any] = field(default_factory=dict) + + +class StreamingPipeline: + """ + Executes a compiled sexp recipe as a streaming pipeline. + + Frames flow through the DAG directly - no adapter needed. + Each node is evaluated lazily when its output is requested. + """ + + def __init__(self, compiled_recipe, recipe_dir: Path = None, fps: float = 30, seed: int = 42, + output_size: tuple = None): + self.recipe = compiled_recipe + self.recipe_dir = recipe_dir or Path(".") + self.fps = fps + self.seed = seed + + # Build node lookup + self.nodes = {n['id']: n for n in compiled_recipe.nodes} + + # Runtime state + self.sources: Dict[str, VideoSource] = {} + self.audio_analyzer: Optional[StreamingAudioAnalyzer] = None + self.audio_source_path: Optional[str] = None + + # Sexp interpreter for expressions + self.interp = SexpInterpreter() + + # Scan state (node_id -> current value) + self.scan_state: Dict[str, Any] = {} + self.scan_emit: Dict[str, Any] = {} + + # SLICE_ON state + self.slice_on_acc: Dict[str, Any] = {} + self.slice_on_result: Dict[str, Any] = {} + + # Frame cache for current timestep (cleared each frame) + self._frame_cache: Dict[str, np.ndarray] = {} + + # Context for current frame + self.ctx = FrameContext(t=0.0) + + # Output size (w, h) - set after sources are initialized + self._output_size = output_size + + # Initialize + self._init_sources() + self._init_scans() + self._init_slice_on() + + # Set output size from first source if not specified + if self._output_size is None and self.sources: + first_source = next(iter(self.sources.values())) + self._output_size = first_source._size + + def _init_sources(self): + """Initialize video and audio sources.""" + for node in self.recipe.nodes: + if node.get('type') == 'SOURCE': + config = node.get('config', {}) + path = config.get('path') + if path: + full_path = (self.recipe_dir / path).resolve() + suffix = full_path.suffix.lower() + + if suffix in ('.mp4', '.webm', '.mov', '.avi', '.mkv'): + if not full_path.exists(): + print(f"Warning: video not found: {full_path}", file=sys.stderr) + continue + self.sources[node['id']] = VideoSource( + str(full_path), + target_fps=self.fps + ) + elif suffix in ('.mp3', '.wav', '.flac', '.ogg', '.m4a', '.aac'): + if not full_path.exists(): + print(f"Warning: audio not found: {full_path}", file=sys.stderr) + continue + self.audio_source_path = str(full_path) + self.audio_analyzer = StreamingAudioAnalyzer(str(full_path)) + + def _init_scans(self): + """Initialize scan nodes with their initial state.""" + import random + seed_offset = 0 + + for node in self.recipe.nodes: + if node.get('type') == 'SCAN': + config = node.get('config', {}) + + # Create RNG for this scan + scan_seed = config.get('seed', self.seed + seed_offset) + rng = random.Random(scan_seed) + seed_offset += 1 + + # Evaluate initial value + init_expr = config.get('init', 0) + init_value = self.interp.eval(init_expr, {}) + + self.scan_state[node['id']] = { + 'value': init_value, + 'rng': rng, + 'config': config, + } + + # Compute initial emit + self._update_scan_emit(node['id']) + + def _update_scan_emit(self, node_id: str): + """Update the emit value for a scan.""" + state = self.scan_state[node_id] + config = state['config'] + emit_expr = config.get('emit_expr', config.get('emit', None)) + + if emit_expr is None: + # No emit expression - emit the value directly + self.scan_emit[node_id] = state['value'] + return + + # Build environment from state + env = {} + if isinstance(state['value'], dict): + env.update(state['value']) + else: + env['acc'] = state['value'] + + env['beat_count'] = self.ctx.beat_count + env['time'] = self.ctx.t + + # Set RNG for interpreter + self.interp.rng = state['rng'] + + self.scan_emit[node_id] = self.interp.eval(emit_expr, env) + + def _step_scan(self, node_id: str): + """Step a scan forward on beat.""" + state = self.scan_state[node_id] + config = state['config'] + step_expr = config.get('step_expr', config.get('step', None)) + + if step_expr is None: + return + + # Build environment + env = {} + if isinstance(state['value'], dict): + env.update(state['value']) + else: + env['acc'] = state['value'] + + env['beat_count'] = self.ctx.beat_count + env['time'] = self.ctx.t + + # Set RNG + self.interp.rng = state['rng'] + + # Evaluate step + new_value = self.interp.eval(step_expr, env) + state['value'] = new_value + + # Update emit + self._update_scan_emit(node_id) + + def _init_slice_on(self): + """Initialize SLICE_ON nodes.""" + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + config = node.get('config', {}) + init = config.get('init', {}) + self.slice_on_acc[node['id']] = dict(init) + + # Evaluate initial state + self._eval_slice_on(node['id']) + + def _eval_slice_on(self, node_id: str): + """Evaluate a SLICE_ON node's Lambda.""" + node = self.nodes[node_id] + config = node.get('config', {}) + fn = config.get('fn') + videos = config.get('videos', []) + + if not fn: + return + + acc = self.slice_on_acc[node_id] + n_videos = len(videos) + + # Set up environment + self.interp.globals['videos'] = list(range(n_videos)) + + try: + from .sexp_interp import eval_slice_on_lambda + result = eval_slice_on_lambda( + fn, acc, self.ctx.beat_count, 0, 1, + list(range(n_videos)), self.interp + ) + self.slice_on_result[node_id] = result + + # Update accumulator + if 'acc' in result: + self.slice_on_acc[node_id] = result['acc'] + except Exception as e: + print(f"SLICE_ON eval error: {e}", file=sys.stderr) + + def _on_beat(self): + """Called when a beat is detected.""" + self.ctx.beat_count += 1 + + # Step all scans + for node_id in self.scan_state: + self._step_scan(node_id) + + # Step all SLICE_ON nodes + for node_id in self.slice_on_acc: + self._eval_slice_on(node_id) + + def _get_frame(self, node_id: str) -> Optional[np.ndarray]: + """ + Get the output frame for a node at current time. + + Recursively evaluates inputs as needed. + Results are cached for the current timestep. + """ + if node_id in self._frame_cache: + return self._frame_cache[node_id] + + node = self.nodes.get(node_id) + if not node: + return None + + node_type = node.get('type') + + if node_type == 'SOURCE': + frame = self._eval_source(node) + elif node_type == 'SEGMENT': + frame = self._eval_segment(node) + elif node_type == 'EFFECT': + frame = self._eval_effect(node) + elif node_type == 'SLICE_ON': + frame = self._eval_slice_on_frame(node) + else: + # Unknown node type - try to pass through input + inputs = node.get('inputs', []) + frame = self._get_frame(inputs[0]) if inputs else None + + self._frame_cache[node_id] = frame + return frame + + def _eval_source(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SOURCE node.""" + source = self.sources.get(node['id']) + if source: + return source.read_frame(self.ctx.t) + return None + + def _eval_segment(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SEGMENT node (time segment of source).""" + inputs = node.get('inputs', []) + if not inputs: + return None + + config = node.get('config', {}) + start = config.get('start', 0) + duration = config.get('duration') + + # Resolve any bindings + if isinstance(start, dict): + start = self._resolve_binding(start) if start.get('_binding') else 0 + if isinstance(duration, dict): + duration = self._resolve_binding(duration) if duration.get('_binding') else None + + # Adjust time for segment + t_local = self.ctx.t + (start if isinstance(start, (int, float)) else 0) + if duration and isinstance(duration, (int, float)): + t_local = t_local % duration # Loop within segment + + # Get source frame at adjusted time + source_id = inputs[0] + source = self.sources.get(source_id) + if source: + return source.read_frame(t_local) + + return self._get_frame(source_id) + + def _eval_effect(self, node: dict) -> Optional[np.ndarray]: + """Evaluate an EFFECT node.""" + import cv2 + + inputs = node.get('inputs', []) + config = node.get('config', {}) + effect_name = config.get('effect') + + # Get input frame(s) + input_frames = [self._get_frame(inp) for inp in inputs] + input_frames = [f for f in input_frames if f is not None] + + if not input_frames: + return None + + frame = input_frames[0] + + # Resolve bindings in config + params = self._resolve_config(config) + + # Apply effect based on name + if effect_name == 'rotate': + angle = params.get('angle', 0) + if abs(angle) > 0.5: + h, w = frame.shape[:2] + center = (w // 2, h // 2) + matrix = cv2.getRotationMatrix2D(center, angle, 1.0) + frame = cv2.warpAffine(frame, matrix, (w, h)) + + elif effect_name == 'zoom': + amount = params.get('amount', 1.0) + if abs(amount - 1.0) > 0.01: + frame = self._apply_zoom(frame, amount) + + elif effect_name == 'invert': + amount = params.get('amount', 0) + if amount > 0.01: + inverted = 255 - frame + frame = cv2.addWeighted(frame, 1 - amount, inverted, amount, 0) + + elif effect_name == 'hue_shift': + degrees = params.get('degrees', 0) + if abs(degrees) > 1: + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + int(degrees / 2)) % 180 + frame = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + elif effect_name == 'blend': + if len(input_frames) >= 2: + opacity = params.get('opacity', 0.5) + frame = cv2.addWeighted(input_frames[0], 1 - opacity, + input_frames[1], opacity, 0) + + elif effect_name == 'blend_multi': + weights = params.get('weights', []) + if len(input_frames) > 1 and weights: + h, w = input_frames[0].shape[:2] + result = np.zeros((h, w, 3), dtype=np.float32) + for f, wt in zip(input_frames, weights): + if f is not None and wt > 0.001: + if f.shape[:2] != (h, w): + f = cv2.resize(f, (w, h)) + result += f.astype(np.float32) * wt + frame = np.clip(result, 0, 255).astype(np.uint8) + + elif effect_name == 'ripple': + amp = params.get('amplitude', 0) + if amp > 1: + frame = self._apply_ripple(frame, amp, + params.get('center_x', 0.5), + params.get('center_y', 0.5), + params.get('frequency', 8), + params.get('decay', 2), + params.get('speed', 5)) + + return frame + + def _eval_slice_on_frame(self, node: dict) -> Optional[np.ndarray]: + """Evaluate a SLICE_ON node - returns composited frame.""" + import cv2 + + config = node.get('config', {}) + video_ids = config.get('videos', []) + result = self.slice_on_result.get(node['id'], {}) + + if not result: + # No result yet - return first video + if video_ids: + return self._get_frame(video_ids[0]) + return None + + # Get layers and compose info + layers = result.get('layers', []) + compose = result.get('compose', {}) + weights = compose.get('weights', []) + + if not layers or not weights: + if video_ids: + return self._get_frame(video_ids[0]) + return None + + # Get frames for each layer + frames = [] + for i, layer in enumerate(layers): + video_idx = layer.get('video', i) + if video_idx < len(video_ids): + frame = self._get_frame(video_ids[video_idx]) + + # Apply layer effects (zoom) + effects = layer.get('effects', []) + for eff in effects: + eff_name = eff.get('effect') + if hasattr(eff_name, 'name'): + eff_name = eff_name.name + if eff_name == 'zoom': + zoom_amt = eff.get('amount', 1.0) + if frame is not None: + frame = self._apply_zoom(frame, zoom_amt) + + frames.append(frame) + else: + frames.append(None) + + # Composite with weights - use consistent output size + if self._output_size: + w, h = self._output_size + else: + # Fallback to first non-None frame size + for f in frames: + if f is not None: + h, w = f.shape[:2] + break + else: + return None + + output = np.zeros((h, w, 3), dtype=np.float32) + + for frame, weight in zip(frames, weights): + if frame is None or weight < 0.001: + continue + + # Resize to output size + if frame.shape[1] != w or frame.shape[0] != h: + frame = cv2.resize(frame, (w, h)) + + output += frame.astype(np.float32) * weight + + # Normalize weights + total_weight = sum(wt for wt in weights if wt > 0.001) + if total_weight > 0 and abs(total_weight - 1.0) > 0.01: + output /= total_weight + + return np.clip(output, 0, 255).astype(np.uint8) + + def _resolve_config(self, config: dict) -> dict: + """Resolve bindings in effect config to actual values.""" + resolved = {} + + for key, value in config.items(): + if key in ('effect', 'effect_path', 'effect_cid', 'effects_registry', + 'analysis_refs', 'inputs', 'cid'): + continue + + if isinstance(value, dict) and value.get('_binding'): + resolved[key] = self._resolve_binding(value) + elif isinstance(value, dict) and value.get('_expr'): + resolved[key] = self._resolve_expr(value) + else: + resolved[key] = value + + return resolved + + def _resolve_binding(self, binding: dict) -> Any: + """Resolve a binding to its current value.""" + source_id = binding.get('source') + feature = binding.get('feature', 'values') + range_map = binding.get('range') + + # Get raw value from scan or analysis + if source_id in self.scan_emit: + value = self.scan_emit[source_id] + elif source_id in self.ctx.analysis: + data = self.ctx.analysis[source_id] + value = data.get(feature, data.get('values', [0]))[0] if isinstance(data, dict) else data + else: + # Fallback to energy + value = self.ctx.energy + + # Extract feature from dict + if isinstance(value, dict) and feature in value: + value = value[feature] + + # Apply range mapping + if range_map and isinstance(value, (int, float)): + lo, hi = range_map + value = lo + value * (hi - lo) + + return value + + def _resolve_expr(self, expr: dict) -> Any: + """Resolve a compiled expression.""" + env = { + 'energy': self.ctx.energy, + 'beat_count': self.ctx.beat_count, + 't': self.ctx.t, + } + + # Add scan values + for scan_id, value in self.scan_emit.items(): + # Use short form if available + env[scan_id] = value + + # Extract the actual expression from _expr wrapper + actual_expr = expr.get('_expr', expr) + return self.interp.eval(actual_expr, env) + + def _apply_zoom(self, frame: np.ndarray, amount: float) -> np.ndarray: + """Apply zoom to frame.""" + import cv2 + h, w = frame.shape[:2] + + if amount > 1.01: + # Zoom in: crop center + new_w, new_h = int(w / amount), int(h / amount) + if new_w > 0 and new_h > 0: + x1, y1 = (w - new_w) // 2, (h - new_h) // 2 + cropped = frame[y1:y1+new_h, x1:x1+new_w] + return cv2.resize(cropped, (w, h)) + elif amount < 0.99: + # Zoom out: shrink and center + scaled_w, scaled_h = int(w * amount), int(h * amount) + if scaled_w > 0 and scaled_h > 0: + shrunk = cv2.resize(frame, (scaled_w, scaled_h)) + canvas = np.zeros((h, w, 3), dtype=np.uint8) + x_off, y_off = (w - scaled_w) // 2, (h - scaled_h) // 2 + canvas[y_off:y_off+scaled_h, x_off:x_off+scaled_w] = shrunk + return canvas + + return frame + + def _apply_ripple(self, frame: np.ndarray, amplitude: float, + cx: float, cy: float, frequency: float, + decay: float, speed: float) -> np.ndarray: + """Apply ripple effect.""" + import cv2 + h, w = frame.shape[:2] + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize to center + center_x, center_y = w * cx, h * cy + dx = x_coords - center_x + dy = y_coords - center_y + dist = np.sqrt(dx**2 + dy**2) + + # Ripple displacement + phase = self.ctx.t * speed + ripple = amplitude * np.sin(dist / frequency - phase) * np.exp(-dist * decay / max(w, h)) + + # Displace coordinates + angle = np.arctan2(dy, dx) + map_x = (x_coords + ripple * np.cos(angle)).astype(np.float32) + map_y = (y_coords + ripple * np.sin(angle)).astype(np.float32) + + return cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + def _find_output_node(self) -> Optional[str]: + """Find the final output node (MUX or last EFFECT).""" + # Look for MUX node + for node in self.recipe.nodes: + if node.get('type') == 'MUX': + return node['id'] + + # Otherwise find last EFFECT after SLICE_ON + last_effect = None + found_slice_on = False + for node in self.recipe.nodes: + if node.get('type') == 'SLICE_ON': + found_slice_on = True + elif node.get('type') == 'EFFECT' and found_slice_on: + last_effect = node['id'] + + return last_effect + + def render_frame(self, t: float) -> Optional[np.ndarray]: + """Render a single frame at time t.""" + # Clear frame cache + self._frame_cache.clear() + + # Update context + self.ctx.t = t + + # Update audio analysis + if self.audio_analyzer: + self.audio_analyzer.set_time(t) + energy = self.audio_analyzer.get_energy() + is_beat = self.audio_analyzer.get_beat() + + # Beat edge detection + was_beat = self.ctx.is_beat + self.ctx.energy = energy + self.ctx.is_beat = is_beat + + if is_beat and not was_beat: + self._on_beat() + + # Store in analysis dict + self.ctx.analysis['live_energy'] = {'values': [energy]} + self.ctx.analysis['live_beat'] = {'values': [1.0 if is_beat else 0.0]} + + # Find output node and render + output_node = self._find_output_node() + if output_node: + frame = self._get_frame(output_node) + # Normalize to output size + if frame is not None and self._output_size: + w, h = self._output_size + if frame.shape[1] != w or frame.shape[0] != h: + import cv2 + frame = cv2.resize(frame, (w, h)) + return frame + + return None + + def run(self, output: str = "preview", duration: float = None): + """ + Run the pipeline. + + Args: + output: "preview", filename, or Output object + duration: Duration in seconds (default: audio duration or 60s) + """ + # Determine duration + if duration is None: + if self.audio_analyzer: + duration = self.audio_analyzer.duration + else: + duration = 60.0 + + # Create output + if output == "preview": + # Get frame size from first source + first_source = next(iter(self.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + out = DisplayOutput(size=(w, h), fps=self.fps, audio_source=self.audio_source_path) + elif isinstance(output, str): + first_source = next(iter(self.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + out = FileOutput(output, size=(w, h), fps=self.fps, audio_source=self.audio_source_path) + else: + out = output + + frame_time = 1.0 / self.fps + n_frames = int(duration * self.fps) + + print(f"Streaming: {len(self.sources)} sources -> {output}", file=sys.stderr) + print(f"Duration: {duration:.1f}s, {n_frames} frames @ {self.fps}fps", file=sys.stderr) + + start_time = time.time() + frame_count = 0 + + try: + for frame_num in range(n_frames): + t = frame_num * frame_time + + frame = self.render_frame(t) + + if frame is not None: + out.write(frame, t) + frame_count += 1 + + # Progress + if frame_num % 50 == 0: + elapsed = time.time() - start_time + fps = frame_count / elapsed if elapsed > 0 else 0 + pct = 100 * frame_num / n_frames + print(f"\r{pct:5.1f}% | {fps:5.1f} fps | frame {frame_num}/{n_frames}", + end="", file=sys.stderr) + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + finally: + out.close() + for src in self.sources.values(): + src.close() + + elapsed = time.time() - start_time + avg_fps = frame_count / elapsed if elapsed > 0 else 0 + print(f"\nCompleted: {frame_count} frames in {elapsed:.1f}s ({avg_fps:.1f} fps avg)", + file=sys.stderr) + + +def run_pipeline(recipe_path: str, output: str = "preview", + duration: float = None, fps: float = None): + """ + Run a recipe through the streaming pipeline. + + No adapter layer - directly executes the compiled recipe. + """ + from pathlib import Path + + # Add artdag to path + import sys + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + + from artdag.sexp.compiler import compile_string + + recipe_path = Path(recipe_path) + recipe_text = recipe_path.read_text() + compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent) + + pipeline = StreamingPipeline( + compiled, + recipe_dir=recipe_path.parent, + fps=fps or compiled.encoding.get('fps', 30), + ) + + pipeline.run(output=output, duration=duration) + + +def run_pipeline_piped(recipe_path: str, duration: float = None, fps: float = None): + """ + Run pipeline and pipe directly to mpv with audio. + """ + import subprocess + from pathlib import Path + import sys + sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) + from artdag.sexp.compiler import compile_string + + recipe_path = Path(recipe_path) + recipe_text = recipe_path.read_text() + compiled = compile_string(recipe_text, {}, recipe_dir=recipe_path.parent) + + pipeline = StreamingPipeline( + compiled, + recipe_dir=recipe_path.parent, + fps=fps or compiled.encoding.get('fps', 30), + ) + + # Get frame info + first_source = next(iter(pipeline.sources.values()), None) + if first_source: + w, h = first_source._size + else: + w, h = 720, 720 + + # Determine duration + if duration is None: + if pipeline.audio_analyzer: + duration = pipeline.audio_analyzer.duration + else: + duration = 60.0 + + actual_fps = fps or compiled.encoding.get('fps', 30) + n_frames = int(duration * actual_fps) + frame_time = 1.0 / actual_fps + + print(f"Streaming {n_frames} frames @ {actual_fps}fps to mpv", file=sys.stderr) + + # Start mpv + mpv_cmd = [ + "mpv", "--no-cache", + "--demuxer=rawvideo", + f"--demuxer-rawvideo-w={w}", + f"--demuxer-rawvideo-h={h}", + "--demuxer-rawvideo-mp-format=rgb24", + f"--demuxer-rawvideo-fps={actual_fps}", + "--title=Streaming Pipeline", + "-" + ] + mpv = subprocess.Popen(mpv_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Start audio if available + audio_proc = None + if pipeline.audio_source_path: + audio_cmd = ["ffplay", "-nodisp", "-autoexit", "-loglevel", "quiet", + pipeline.audio_source_path] + audio_proc = subprocess.Popen(audio_cmd, stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL) + + try: + import cv2 + for frame_num in range(n_frames): + if mpv.poll() is not None: + break # mpv closed + + t = frame_num * frame_time + frame = pipeline.render_frame(t) + if frame is not None: + # Ensure consistent frame size + if frame.shape[1] != w or frame.shape[0] != h: + frame = cv2.resize(frame, (w, h)) + if not frame.flags['C_CONTIGUOUS']: + frame = np.ascontiguousarray(frame) + try: + mpv.stdin.write(frame.tobytes()) + mpv.stdin.flush() + except BrokenPipeError: + break + except KeyboardInterrupt: + pass + finally: + if mpv.stdin: + mpv.stdin.close() + mpv.terminate() + if audio_proc: + audio_proc.terminate() + for src in pipeline.sources.values(): + src.close() + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run sexp recipe through streaming pipeline") + parser.add_argument("recipe", help="Path to .sexp recipe file") + parser.add_argument("-o", "--output", default="pipe", + help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)") + parser.add_argument("-d", "--duration", type=float, default=None, + help="Duration in seconds (default: audio duration)") + parser.add_argument("--fps", type=float, default=None, + help="Frame rate (default: from recipe)") + args = parser.parse_args() + + if args.output == "pipe": + run_pipeline_piped(args.recipe, duration=args.duration, fps=args.fps) + else: + run_pipeline(args.recipe, output=args.output, duration=args.duration, fps=args.fps) diff --git a/streaming/recipe_adapter.py b/streaming/recipe_adapter.py index acf9722..2133919 100644 --- a/streaming/recipe_adapter.py +++ b/streaming/recipe_adapter.py @@ -412,3 +412,59 @@ def run_recipe( """ adapter = RecipeAdapter(recipe_path, params=params) adapter.run(output=output, duration=duration, fps=fps) + + +def run_recipe_piped( + recipe_path: str, + duration: float = None, + params: Dict = None, + fps: float = None, +): + """ + Run recipe and pipe directly to mpv. + """ + from .output import PipeOutput + + adapter = RecipeAdapter(recipe_path, params=params) + compositor = adapter.build_compositor(analysis_data={}, fps=fps) + + # Get frame size + if compositor.sources: + first_source = compositor.sources[0] + w, h = first_source._size + else: + w, h = 720, 720 + + actual_fps = fps or adapter.compiled.encoding.get('fps', 30) + + # Create pipe output + pipe_out = PipeOutput( + size=(w, h), + fps=actual_fps, + audio_source=compositor._audio_source + ) + + # Create executor + from .sexp_executor import SexpStreamingExecutor + executor = SexpStreamingExecutor(adapter.compiled, seed=42) + + # Run with pipe output + compositor.run(output=pipe_out, duration=duration, recipe_executor=executor) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run sexp recipe with streaming compositor") + parser.add_argument("recipe", help="Path to .sexp recipe file") + parser.add_argument("-o", "--output", default="pipe", + help="Output: 'pipe' (mpv), 'preview', or filename (default: pipe)") + parser.add_argument("-d", "--duration", type=float, default=None, + help="Duration in seconds (default: audio duration)") + parser.add_argument("--fps", type=float, default=None, + help="Frame rate (default: from recipe)") + args = parser.parse_args() + + if args.output == "pipe": + run_recipe_piped(args.recipe, duration=args.duration, fps=args.fps) + else: + run_recipe(args.recipe, output=args.output, duration=args.duration, fps=args.fps) diff --git a/streaming/stream_sexp.py b/streaming/stream_sexp.py new file mode 100644 index 0000000..b36dabf --- /dev/null +++ b/streaming/stream_sexp.py @@ -0,0 +1,1081 @@ +""" +Generic Streaming S-expression Interpreter. + +Executes streaming sexp recipes frame-by-frame. +The sexp defines the pipeline logic - interpreter just provides primitives. + +Primitives: + (read source-name) - read frame from source + (rotate frame :angle N) - rotate frame + (zoom frame :amount N) - zoom frame + (invert frame :amount N) - invert colors + (hue-shift frame :degrees N) - shift hue + (blend frame1 frame2 :opacity N) - blend two frames + (blend-weighted [frames...] [weights...]) - weighted blend + (ripple frame :amplitude N :cx N :cy N ...) - ripple effect + + (bind scan-name :field) - get scan state field + (map value [lo hi]) - map 0-1 value to range + energy - current energy (0-1) + beat - 1 if beat, 0 otherwise + t - current time + beat-count - total beats so far + +Example sexp: + (stream "test" + :fps 30 + (source vid "video.mp4") + (audio aud "music.mp3") + + (scan spin beat + :init {:angle 0 :dir 1} + :step (dict :angle (+ angle (* dir 10)) :dir dir)) + + (frame + (-> (read vid) + (rotate :angle (bind spin :angle)) + (zoom :amount (map energy [1 1.5]))))) +""" + +import sys +import time +import json +import hashlib +import numpy as np +import subprocess +from pathlib import Path +from dataclasses import dataclass, field +from typing import Dict, List, Any, Optional, Tuple, Union + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) +from artdag.sexp.parser import parse, parse_all, Symbol, Keyword + + +@dataclass +class StreamContext: + """Runtime context for streaming.""" + t: float = 0.0 + frame_num: int = 0 + fps: float = 30.0 + energy: float = 0.0 + is_beat: bool = False + beat_count: int = 0 + output_size: Tuple[int, int] = (720, 720) + + +class StreamCache: + """Cache for streaming data.""" + + def __init__(self, cache_dir: Path, recipe_hash: str): + self.cache_dir = cache_dir / recipe_hash + self.cache_dir.mkdir(parents=True, exist_ok=True) + self.analysis_buffer: Dict[str, List] = {} + self.scan_states: Dict[str, List] = {} + self.keyframe_interval = 5.0 + + def record_analysis(self, name: str, t: float, value: float): + if name not in self.analysis_buffer: + self.analysis_buffer[name] = [] + t = float(t) if hasattr(t, 'item') else t + value = float(value) if hasattr(value, 'item') else value + self.analysis_buffer[name].append((t, value)) + + def record_scan_state(self, name: str, t: float, state: dict): + if name not in self.scan_states: + self.scan_states[name] = [] + states = self.scan_states[name] + if not states or t - states[-1][0] >= self.keyframe_interval: + t = float(t) if hasattr(t, 'item') else t + clean = {k: (float(v) if hasattr(v, 'item') else v) for k, v in state.items()} + self.scan_states[name].append((t, clean)) + + def flush(self): + for name, data in self.analysis_buffer.items(): + path = self.cache_dir / f"analysis_{name}.json" + existing = json.loads(path.read_text()) if path.exists() else [] + existing.extend(data) + path.write_text(json.dumps(existing)) + self.analysis_buffer.clear() + + for name, states in self.scan_states.items(): + path = self.cache_dir / f"scan_{name}.json" + existing = json.loads(path.read_text()) if path.exists() else [] + existing.extend(states) + path.write_text(json.dumps(existing)) + self.scan_states.clear() + + +class VideoSource: + """Video source - reads frames sequentially.""" + + def __init__(self, path: str, fps: float = 30): + self.path = Path(path) + if not self.path.exists(): + raise FileNotFoundError(f"Video not found: {path}") + + # Get info + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + + for s in info.get("streams", []): + if s.get("codec_type") == "video": + self.width = s.get("width", 720) + self.height = s.get("height", 720) + break + else: + self.width, self.height = 720, 720 + + self.duration = float(info.get("format", {}).get("duration", 60)) + self.size = (self.width, self.height) + + # Start decoder + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "rawvideo", "-pix_fmt", "rgb24", "-r", str(fps), "-"] + self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + self._frame_size = self.width * self.height * 3 + self._current_frame = None + + def read(self) -> Optional[np.ndarray]: + """Read next frame.""" + data = self._proc.stdout.read(self._frame_size) + if len(data) < self._frame_size: + return self._current_frame # Return last frame if stream ends + self._current_frame = np.frombuffer(data, dtype=np.uint8).reshape( + self.height, self.width, 3).copy() + return self._current_frame + + def skip(self): + """Read and discard frame (keep pipe in sync).""" + self._proc.stdout.read(self._frame_size) + + def close(self): + if self._proc: + self._proc.terminate() + self._proc.wait() + + +class AudioAnalyzer: + """Real-time audio analysis.""" + + def __init__(self, path: str, sample_rate: int = 22050): + self.path = Path(path) + + # Load audio + cmd = ["ffmpeg", "-v", "quiet", "-i", str(self.path), + "-f", "f32le", "-ac", "1", "-ar", str(sample_rate), "-"] + self._audio = np.frombuffer( + subprocess.run(cmd, capture_output=True).stdout, dtype=np.float32) + self.sample_rate = sample_rate + + # Get duration + cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(self.path)] + info = json.loads(subprocess.run(cmd, capture_output=True, text=True).stdout) + self.duration = float(info.get("format", {}).get("duration", 60)) + + self._flux_history = [] + self._last_beat_time = -1 + + def get_energy(self, t: float) -> float: + idx = int(t * self.sample_rate) + start = max(0, idx - 512) + end = min(len(self._audio), idx + 512) + if start >= end: + return 0.0 + return min(1.0, np.sqrt(np.mean(self._audio[start:end] ** 2)) * 3.0) + + def get_beat(self, t: float) -> bool: + idx = int(t * self.sample_rate) + size = 2048 + + start, end = max(0, idx - size//2), min(len(self._audio), idx + size//2) + if end - start < size//2: + return False + curr = self._audio[start:end] + + pstart, pend = max(0, start - 512), max(0, end - 512) + if pend <= pstart: + return False + prev = self._audio[pstart:pend] + + curr_spec = np.abs(np.fft.rfft(curr * np.hanning(len(curr)))) + prev_spec = np.abs(np.fft.rfft(prev * np.hanning(len(prev)))) + + n = min(len(curr_spec), len(prev_spec)) + flux = np.sum(np.maximum(0, curr_spec[:n] - prev_spec[:n])) / (n + 1) + + self._flux_history.append((t, flux)) + while self._flux_history and self._flux_history[0][0] < t - 1.5: + self._flux_history.pop(0) + + if len(self._flux_history) < 3: + return False + + vals = [f for _, f in self._flux_history] + threshold = np.mean(vals) + np.std(vals) * 0.3 + 0.001 + + is_beat = flux > threshold and t - self._last_beat_time > 0.1 + if is_beat: + self._last_beat_time = t + return is_beat + + +class StreamInterpreter: + """ + Generic streaming sexp interpreter. + + Evaluates the frame pipeline expression each frame. + """ + + def __init__(self, sexp_path: str, cache_dir: str = None): + self.sexp_path = Path(sexp_path) + self.sexp_dir = self.sexp_path.parent + + text = self.sexp_path.read_text() + self.ast = parse(text) + + self.config = self._parse_config() + + recipe_hash = hashlib.sha256(text.encode()).hexdigest()[:16] + cache_path = Path(cache_dir) if cache_dir else self.sexp_dir / ".stream_cache" + self.cache = StreamCache(cache_path, recipe_hash) + + self.ctx = StreamContext(fps=self.config.get('fps', 30)) + self.sources: Dict[str, VideoSource] = {} + self.frames: Dict[str, np.ndarray] = {} # Current frame per source + self._sources_read: set = set() # Track which sources read this frame + self.audios: Dict[str, AudioAnalyzer] = {} # Multiple named audio sources + self.audio_paths: Dict[str, str] = {} + self.audio_state: Dict[str, dict] = {} # Per-audio: {energy, is_beat, beat_count, last_beat} + self.scans: Dict[str, dict] = {} + + # Registries for external definitions + self.primitives: Dict[str, Any] = {} # name -> Python function + self.effects: Dict[str, dict] = {} # name -> {params, body} + self.macros: Dict[str, dict] = {} # name -> {params, body} + self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs" + + self.frame_pipeline = None # The (frame ...) expression + + import random + self.rng = random.Random(self.config.get('seed', 42)) + + def _parse_config(self) -> dict: + """Parse config from (stream name :key val ...).""" + config = {'fps': 30, 'seed': 42} + if not self.ast or not isinstance(self.ast[0], Symbol): + return config + if self.ast[0].name != 'stream': + return config + + i = 2 + while i < len(self.ast): + if isinstance(self.ast[i], Keyword): + config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None + i += 2 + elif isinstance(self.ast[i], list): + break + else: + i += 1 + return config + + def _load_primitives(self, lib_name: str): + """Load primitives from a Python library file.""" + import importlib.util + + # Try multiple paths + lib_paths = [ + self.primitive_lib_dir / f"{lib_name}.py", + self.sexp_dir / "primitive_libs" / f"{lib_name}.py", + self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py", + ] + + lib_path = None + for p in lib_paths: + if p.exists(): + lib_path = p + break + + if not lib_path: + print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr) + return + + spec = importlib.util.spec_from_file_location(lib_name, lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + # Extract all prim_* functions + count = 0 + for name in dir(module): + if name.startswith('prim_'): + func = getattr(module, name) + prim_name = name[5:] # Remove 'prim_' prefix + self.primitives[prim_name] = func + # Also register with dashes instead of underscores + dash_name = prim_name.replace('_', '-') + self.primitives[dash_name] = func + # Also register with -img suffix (sexp convention) + self.primitives[dash_name + '-img'] = func + count += 1 + + # Also check for PRIMITIVES dict (some modules use this for additional exports) + if hasattr(module, 'PRIMITIVES'): + prims = getattr(module, 'PRIMITIVES') + if isinstance(prims, dict): + for name, func in prims.items(): + self.primitives[name] = func + # Also register underscore version + underscore_name = name.replace('-', '_') + self.primitives[underscore_name] = func + count += 1 + + print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr) + + def _load_effect(self, effect_path: Path): + """Load and register an effect from a .sexp file.""" + if not effect_path.exists(): + print(f"Warning: effect file not found: {effect_path}", file=sys.stderr) + return + + text = effect_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'define-effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = {} + body = None + + i = 2 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'params' and i + 1 < len(form): + # Parse params list + params_list = form[i + 1] + for p in params_list: + if isinstance(p, list) and p: + pname = p[0].name if isinstance(p[0], Symbol) else str(p[0]) + pdef = {'default': 0} + j = 1 + while j < len(p): + if isinstance(p[j], Keyword): + pdef[p[j].name] = p[j + 1] if j + 1 < len(p) else None + j += 2 + else: + j += 1 + params[pname] = pdef + i += 2 + else: + i += 2 + else: + # Body expression + body = form[i] + i += 1 + + self.effects[name] = {'params': params, 'body': body, 'path': str(effect_path)} + print(f"Effect: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [] + body = None + + if len(form) > 2 and isinstance(form[2], list): + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + if len(form) > 3: + body = form[3] + + self.macros[name] = {'params': params, 'body': body} + print(f"Macro: {name}", file=sys.stderr) + + def _init(self): + """Initialize sources, scans, and pipeline from sexp.""" + for form in self.ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + # === External loading === + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'effect': + # (effect name :path "...") + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + # (include :path "...") + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) # Reuse effect loader for includes + i += 2 + else: + i += 1 + + # === Sources === + + elif cmd == 'source': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + path = str(form[2]).strip('"') + full = (self.sexp_dir / path).resolve() + if full.exists(): + self.sources[name] = VideoSource(str(full), self.ctx.fps) + print(f"Source: {name} -> {full}", file=sys.stderr) + else: + print(f"Warning: {full} not found", file=sys.stderr) + + elif cmd == 'audio': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + path = str(form[2]).strip('"') + full = (self.sexp_dir / path).resolve() + if full.exists(): + self.audios[name] = AudioAnalyzer(str(full)) + self.audio_paths[name] = str(full) + self.audio_state[name] = {'energy': 0.0, 'is_beat': False, 'beat_count': 0, 'last_beat': False} + print(f"Audio: {name} -> {full}", file=sys.stderr) + + elif cmd == 'scan': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + # Trigger can be: + # (beat audio-name) - trigger on beat from specific audio + # beat - legacy: trigger on beat from first audio + trigger_expr = form[2] + if isinstance(trigger_expr, list) and len(trigger_expr) >= 2: + # (beat audio-name) + trigger_type = trigger_expr[0].name if isinstance(trigger_expr[0], Symbol) else str(trigger_expr[0]) + trigger_audio = trigger_expr[1].name if isinstance(trigger_expr[1], Symbol) else str(trigger_expr[1]) + trigger = (trigger_type, trigger_audio) + else: + # Legacy bare symbol + trigger = trigger_expr.name if isinstance(trigger_expr, Symbol) else str(trigger_expr) + + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], {}) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger, + } + trigger_str = f"{trigger[0]} {trigger[1]}" if isinstance(trigger, tuple) else trigger + print(f"Scan: {name} (on {trigger_str})", file=sys.stderr) + + elif cmd == 'frame': + # (frame expr) - the pipeline to evaluate each frame + self.frame_pipeline = form[1] if len(form) > 1 else None + + # Set output size from first source + if self.sources: + first = next(iter(self.sources.values())) + self.ctx.output_size = first.size + + def _eval(self, expr, env: dict) -> Any: + """Evaluate an expression.""" + import cv2 + + # Primitives + if isinstance(expr, (int, float)): + return expr + if isinstance(expr, str): + return expr + if isinstance(expr, Symbol): + name = expr.name + # Built-in values + if name == 't' or name == '_time': + return self.ctx.t + if name == 'pi': + import math + return math.pi + if name == 'true': + return True + if name == 'false': + return False + if name == 'nil': + return None + # Environment lookup + if name in env: + return env[name] + # Scan state lookup + if name in self.scans: + return self.scans[name]['state'] + return 0 + + if isinstance(expr, Keyword): + return expr.name + + if not isinstance(expr, list) or not expr: + return expr + + # Dict literal {:key val ...} + if isinstance(expr[0], Keyword): + result = {} + i = 0 + while i < len(expr): + if isinstance(expr[i], Keyword): + result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None + i += 2 + else: + i += 1 + return result + + head = expr[0] + if not isinstance(head, Symbol): + return [self._eval(e, env) for e in expr] + + op = head.name + args = expr[1:] + + # Check if op is a closure in environment + if op in env: + val = env[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + # Invoke closure + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + # Threading macro + if op == '->': + result = self._eval(args[0], env) + for form in args[1:]: + if isinstance(form, list) and form: + # Insert result as first arg + new_form = [form[0], result] + form[1:] + result = self._eval(new_form, env) + else: + result = self._eval([form, result], env) + return result + + # === Audio analysis (explicit) === + + if op == 'energy': + # (energy audio-name) - get current energy from named audio + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return self.audio_state[audio_name]['energy'] + return 0.0 + + if op == 'beat': + # (beat audio-name) - 1 if beat this frame, 0 otherwise + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return 1.0 if self.audio_state[audio_name]['is_beat'] else 0.0 + return 0.0 + + if op == 'beat-count': + # (beat-count audio-name) - total beats from named audio + audio_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if audio_name in self.audio_state: + return self.audio_state[audio_name]['beat_count'] + return 0 + + # === Frame operations === + + if op == 'read': + # (read source-name) - get current frame from source (lazy read) + name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if name not in self.frames: + if name in self.sources: + self.frames[name] = self.sources[name].read() + self._sources_read.add(name) + return self.frames.get(name) + + # === Binding and mapping === + + if op == 'bind': + # (bind scan-name :field) or (bind scan-name) + scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + field = None + if len(args) > 1 and isinstance(args[1], Keyword): + field = args[1].name + + if scan_name in self.scans: + state = self.scans[scan_name]['state'] + if field: + return state.get(field, 0) + return state + return 0 + + if op == 'map': + # (map value [lo hi]) + val = self._eval(args[0], env) + range_list = self._eval(args[1], env) if len(args) > 1 else [0, 1] + if isinstance(range_list, list) and len(range_list) >= 2: + lo, hi = range_list[0], range_list[1] + return lo + val * (hi - lo) + return val + + # === Arithmetic === + + if op == '+': + return sum(self._eval(a, env) for a in args) + if op == '-': + vals = [self._eval(a, env) for a in args] + return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0] + if op == '*': + result = 1 + for a in args: + result *= self._eval(a, env) + return result + if op == '/': + vals = [self._eval(a, env) for a in args] + return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + if op == 'mod': + vals = [self._eval(a, env) for a in args] + return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + + if op == 'map-range': + # (map-range val from-lo from-hi to-lo to-hi) + val = self._eval(args[0], env) + from_lo = self._eval(args[1], env) + from_hi = self._eval(args[2], env) + to_lo = self._eval(args[3], env) + to_hi = self._eval(args[4], env) + # Normalize val to 0-1 in source range, then scale to target range + if from_hi == from_lo: + return to_lo + t = (val - from_lo) / (from_hi - from_lo) + return to_lo + t * (to_hi - to_lo) + + # === Comparison === + + if op == '<': + return self._eval(args[0], env) < self._eval(args[1], env) + if op == '>': + return self._eval(args[0], env) > self._eval(args[1], env) + if op == '=': + return self._eval(args[0], env) == self._eval(args[1], env) + if op == '<=': + return self._eval(args[0], env) <= self._eval(args[1], env) + if op == '>=': + return self._eval(args[0], env) >= self._eval(args[1], env) + + if op == 'and': + for arg in args: + if not self._eval(arg, env): + return False + return True + + if op == 'or': + # Lisp-style or: returns first truthy value, or last value if none truthy + result = False + for arg in args: + result = self._eval(arg, env) + if result: + return result + return result + + if op == 'not': + return not self._eval(args[0], env) + + # === Logic === + + if op == 'if': + cond = self._eval(args[0], env) + if cond: + return self._eval(args[1], env) + return self._eval(args[2], env) if len(args) > 2 else None + + if op == 'cond': + # (cond pred1 expr1 pred2 expr2 ... true else-expr) + i = 0 + while i < len(args) - 1: + pred = self._eval(args[i], env) + if pred: + return self._eval(args[i + 1], env) + i += 2 + return None + + if op == 'lambda': + # (lambda (params...) body) - create a closure + params = args[0] + body = args[1] + param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params] + # Return a closure dict that captures the current env + return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)} + + if op == 'let' or op == 'let*': + # Support both formats: + # (let [name val name val ...] body) - flat vector + # (let ((name val) (name val) ...) body) - nested list + # Note: our let already evaluates sequentially like let* + bindings = args[0] + body = args[1] + new_env = dict(env) + + if bindings and isinstance(bindings[0], list): + # Nested format: ((name val) (name val) ...) + for binding in bindings: + if isinstance(binding, list) and len(binding) >= 2: + name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0]) + val = self._eval(binding[1], new_env) + new_env[name] = val + else: + # Flat format: [name val name val ...] + i = 0 + while i < len(bindings): + name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i]) + val = self._eval(bindings[i + 1], new_env) + new_env[name] = val + i += 2 + return self._eval(body, new_env) + + # === Random === + + if op == 'rand': + return self.rng.random() + if op == 'rand-int': + lo = int(self._eval(args[0], env)) + hi = int(self._eval(args[1], env)) + return self.rng.randint(lo, hi) + if op == 'rand-range': + lo = self._eval(args[0], env) + hi = self._eval(args[1], env) + return lo + self.rng.random() * (hi - lo) + + # === Dict === + + if op == 'dict': + result = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + result[args[i].name] = self._eval(args[i + 1], env) if i + 1 < len(args) else None + i += 2 + else: + i += 1 + return result + + if op == 'get': + d = self._eval(args[0], env) + key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env) + if isinstance(d, dict): + return d.get(key, 0) + return 0 + + # === List === + + if op == 'list': + return [self._eval(a, env) for a in args] + + if op == 'nth': + lst = self._eval(args[0], env) + idx = int(self._eval(args[1], env)) + if isinstance(lst, list) and 0 <= idx < len(lst): + return lst[idx] + return None + + if op == 'len': + lst = self._eval(args[0], env) + return len(lst) if isinstance(lst, (list, dict, str)) else 0 + + # === External effects === + if op in self.effects: + effect = self.effects[op] + effect_env = dict(env) + effect_env['t'] = self.ctx.t + + # Set defaults for all params + param_names = list(effect['params'].keys()) + for pname, pdef in effect['params'].items(): + effect_env[pname] = pdef.get('default', 0) + + # Parse args: first is frame, then positional params, then kwargs + positional_idx = 0 + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + # Keyword arg + pname = args[i].name + if pname in effect['params'] and i + 1 < len(args): + effect_env[pname] = self._eval(args[i + 1], env) + i += 2 + else: + # Positional arg + val = self._eval(args[i], env) + if positional_idx == 0: + effect_env['frame'] = val + elif positional_idx - 1 < len(param_names): + effect_env[param_names[positional_idx - 1]] = val + positional_idx += 1 + i += 1 + + return self._eval(effect['body'], effect_env) + + # === External primitives === + if op in self.primitives: + prim_func = self.primitives[op] + # Evaluate all args + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + # Call primitive + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # === Macros === + if op in self.macros: + macro = self.macros[op] + # Bind macro params to args (unevaluated) + macro_env = dict(env) + for i, pname in enumerate(macro['params']): + macro_env[pname] = args[i] if i < len(args) else None + # Expand and evaluate + return self._eval(macro['body'], macro_env) + + # === Primitive-style call (name-with-dashes -> prim_name_with_underscores) === + prim_name = op.replace('-', '_') + if prim_name in self.primitives: + prim_func = self.primitives[prim_name] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name.replace('-', '_') + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # Unknown - return as-is + return expr + + def _step_scans(self): + """Step scans on beat from specific audio.""" + for name, scan in self.scans.items(): + trigger = scan['trigger'] + + # Check if this scan should step + should_step = False + audio_name = None + + if isinstance(trigger, tuple) and trigger[0] == 'beat': + # Explicit: (beat audio-name) + audio_name = trigger[1] + if audio_name in self.audio_state: + should_step = self.audio_state[audio_name]['is_beat'] + elif trigger == 'beat': + # Legacy: use first audio + if self.audio_state: + audio_name = next(iter(self.audio_state)) + should_step = self.audio_state[audio_name]['is_beat'] + + if should_step and audio_name: + state = self.audio_state[audio_name] + env = dict(scan['state']) + env['beat_count'] = state['beat_count'] + env['t'] = self.ctx.t + env['energy'] = state['energy'] + + if scan['step']: + new_state = self._eval(scan['step'], env) + if isinstance(new_state, dict): + scan['state'] = new_state + elif new_state is not None: + scan['state'] = {'acc': new_state} + + self.cache.record_scan_state(name, self.ctx.t, scan['state']) + + def run(self, duration: float = None, output: str = "pipe"): + """Run the streaming pipeline.""" + from .output import PipeOutput, DisplayOutput, FileOutput + + self._init() + + if not self.sources: + print("Error: no sources", file=sys.stderr) + return + + if not self.frame_pipeline: + print("Error: no (frame ...) pipeline defined", file=sys.stderr) + return + + w, h = self.ctx.output_size + + # Duration from first audio or default + if duration is None: + if self.audios: + first_audio = next(iter(self.audios.values())) + duration = first_audio.duration + else: + duration = 60.0 + + n_frames = int(duration * self.ctx.fps) + frame_time = 1.0 / self.ctx.fps + + print(f"Streaming {n_frames} frames @ {self.ctx.fps}fps", file=sys.stderr) + + # Use first audio for playback sync + first_audio_path = next(iter(self.audio_paths.values())) if self.audio_paths else None + + # Output + if output == "pipe": + out = PipeOutput(size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + elif output == "preview": + out = DisplayOutput(size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + else: + out = FileOutput(output, size=(w, h), fps=self.ctx.fps, + audio_source=first_audio_path) + + try: + for frame_num in range(n_frames): + if not out.is_open: + print(f"\nOutput closed at {frame_num}", file=sys.stderr) + break + + self.ctx.t = frame_num * frame_time + self.ctx.frame_num = frame_num + + # Update all audio states + for audio_name, analyzer in self.audios.items(): + state = self.audio_state[audio_name] + energy = analyzer.get_energy(self.ctx.t) + is_beat_raw = analyzer.get_beat(self.ctx.t) + is_beat = is_beat_raw and not state['last_beat'] + state['last_beat'] = is_beat_raw + + state['energy'] = energy + state['is_beat'] = is_beat + if is_beat: + state['beat_count'] += 1 + + self.cache.record_analysis(f'{audio_name}_energy', self.ctx.t, energy) + self.cache.record_analysis(f'{audio_name}_beat', self.ctx.t, 1.0 if is_beat else 0.0) + + # Step scans + self._step_scans() + + # Clear frames - will be read lazily + self.frames.clear() + self._sources_read = set() + + # Evaluate pipeline (reads happen on-demand) + result = self._eval(self.frame_pipeline, {}) + + # Skip unread sources to keep pipes in sync + for name, src in self.sources.items(): + if name not in self._sources_read: + src.skip() + + # Ensure output size + if result is not None: + import cv2 + if result.shape[:2] != (h, w): + result = cv2.resize(result, (w, h)) + out.write(result, self.ctx.t) + + # Progress + if frame_num % 30 == 0: + pct = 100 * frame_num / n_frames + # Show beats from first audio + total_beats = 0 + if self.audio_state: + first_state = next(iter(self.audio_state.values())) + total_beats = first_state['beat_count'] + print(f"\r{pct:5.1f}% | beats:{total_beats}", + end="", file=sys.stderr) + sys.stderr.flush() + + if frame_num % 300 == 0: + self.cache.flush() + + except KeyboardInterrupt: + print("\nInterrupted", file=sys.stderr) + except Exception as e: + print(f"\nError: {e}", file=sys.stderr) + import traceback + traceback.print_exc() + finally: + out.close() + for src in self.sources.values(): + src.close() + self.cache.flush() + + print("\nDone", file=sys.stderr) + + +def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None): + """Run a streaming sexp.""" + interp = StreamInterpreter(sexp_path) + if fps: + interp.ctx.fps = fps + interp.run(duration=duration, output=output) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run streaming sexp") + parser.add_argument("sexp", help="Path to .sexp file") + parser.add_argument("-d", "--duration", type=float, default=None) + parser.add_argument("-o", "--output", default="pipe") + parser.add_argument("--fps", type=float, default=None, help="Override fps (default: from sexp)") + args = parser.parse_args() + + run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps) diff --git a/streaming/stream_sexp_generic.py b/streaming/stream_sexp_generic.py new file mode 100644 index 0000000..2f8d4b2 --- /dev/null +++ b/streaming/stream_sexp_generic.py @@ -0,0 +1,859 @@ +""" +Fully Generic Streaming S-expression Interpreter. + +The interpreter knows NOTHING about video, audio, or any domain. +All domain logic comes from primitives loaded via (require-primitives ...). + +Built-in forms: + - Control: if, cond, let, let*, lambda, -> + - Arithmetic: +, -, *, /, mod, map-range + - Comparison: <, >, =, <=, >=, and, or, not + - Data: dict, get, list, nth, len, quote + - Random: rand, rand-int, rand-range + - Scan: bind (access scan state) + +Everything else comes from primitives or effects. + +Context (ctx) is passed explicitly to frame evaluation: + - ctx.t: current time + - ctx.frame-num: current frame number + - ctx.fps: frames per second +""" + +import sys +import time +import json +import hashlib +import math +import numpy as np +from pathlib import Path +from dataclasses import dataclass +from typing import Dict, List, Any, Optional, Tuple + +sys.path.insert(0, str(Path(__file__).parent.parent.parent / "artdag")) +from artdag.sexp.parser import parse, parse_all, Symbol, Keyword + + +@dataclass +class Context: + """Runtime context passed to frame evaluation.""" + t: float = 0.0 + frame_num: int = 0 + fps: float = 30.0 + + +class StreamInterpreter: + """ + Fully generic streaming sexp interpreter. + + No domain-specific knowledge - just evaluates expressions + and calls primitives. + """ + + def __init__(self, sexp_path: str): + self.sexp_path = Path(sexp_path) + self.sexp_dir = self.sexp_path.parent + + text = self.sexp_path.read_text() + self.ast = parse(text) + + self.config = self._parse_config() + + # Global environment for def bindings + self.globals: Dict[str, Any] = {} + + # Scans + self.scans: Dict[str, dict] = {} + + # Audio playback path (for syncing output) + self.audio_playback: Optional[str] = None + + # Registries for external definitions + self.primitives: Dict[str, Any] = {} + self.effects: Dict[str, dict] = {} + self.macros: Dict[str, dict] = {} + self.primitive_lib_dir = self.sexp_dir.parent / "sexp_effects" / "primitive_libs" + + self.frame_pipeline = None + + # External config files (set before run()) + self.sources_config: Optional[Path] = None + self.audio_config: Optional[Path] = None + + import random + self.rng = random.Random(self.config.get('seed', 42)) + + def _load_config_file(self, config_path): + """Load a config file and process its definitions.""" + config_path = Path(config_path) # Accept str or Path + if not config_path.exists(): + print(f"Warning: config file not found: {config_path}", file=sys.stderr) + return + + text = config_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'def': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + value = self._eval(form[2], self.globals) + self.globals[name] = value + print(f"Config: {name}", file=sys.stderr) + + elif cmd == 'audio-playback': + # Path relative to working directory (consistent with other paths) + path = str(form[1]).strip('"') + self.audio_playback = str(Path(path).resolve()) + print(f"Audio playback: {self.audio_playback}", file=sys.stderr) + + def _parse_config(self) -> dict: + """Parse config from (stream name :key val ...).""" + config = {'fps': 30, 'seed': 42, 'width': 720, 'height': 720} + if not self.ast or not isinstance(self.ast[0], Symbol): + return config + if self.ast[0].name != 'stream': + return config + + i = 2 + while i < len(self.ast): + if isinstance(self.ast[i], Keyword): + config[self.ast[i].name] = self.ast[i + 1] if i + 1 < len(self.ast) else None + i += 2 + elif isinstance(self.ast[i], list): + break + else: + i += 1 + return config + + def _load_primitives(self, lib_name: str): + """Load primitives from a Python library file.""" + import importlib.util + + lib_paths = [ + self.primitive_lib_dir / f"{lib_name}.py", + self.sexp_dir / "primitive_libs" / f"{lib_name}.py", + self.sexp_dir.parent / "sexp_effects" / "primitive_libs" / f"{lib_name}.py", + ] + + lib_path = None + for p in lib_paths: + if p.exists(): + lib_path = p + break + + if not lib_path: + print(f"Warning: primitive library '{lib_name}' not found", file=sys.stderr) + return + + spec = importlib.util.spec_from_file_location(lib_name, lib_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + + count = 0 + for name in dir(module): + if name.startswith('prim_'): + func = getattr(module, name) + prim_name = name[5:] + dash_name = prim_name.replace('_', '-') + # Register ONLY with namespace (geometry:ripple-displace) + self.primitives[f"{lib_name}:{dash_name}"] = func + count += 1 + + if hasattr(module, 'PRIMITIVES'): + prims = getattr(module, 'PRIMITIVES') + if isinstance(prims, dict): + for name, func in prims.items(): + # Register ONLY with namespace + dash_name = name.replace('_', '-') + self.primitives[f"{lib_name}:{dash_name}"] = func + count += 1 + + print(f"Loaded primitives: {lib_name} ({count} functions)", file=sys.stderr) + + def _load_effect(self, effect_path: Path): + """Load and register an effect from a .sexp file.""" + if not effect_path.exists(): + print(f"Warning: effect file not found: {effect_path}", file=sys.stderr) + return + + text = effect_path.read_text() + ast = parse_all(text) + + for form in ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'define-effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = {} + body = None + i = 2 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'params' and i + 1 < len(form): + for pdef in form[i + 1]: + if isinstance(pdef, list) and pdef: + pname = pdef[0].name if isinstance(pdef[0], Symbol) else str(pdef[0]) + pinfo = {'default': 0} + j = 1 + while j < len(pdef): + if isinstance(pdef[j], Keyword) and j + 1 < len(pdef): + pinfo[pdef[j].name] = pdef[j + 1] + j += 2 + else: + j += 1 + params[pname] = pinfo + i += 2 + else: + body = form[i] + i += 1 + + self.effects[name] = {'params': params, 'body': body} + print(f"Effect: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + body = form[3] + self.macros[name] = {'params': params, 'body': body} + + elif cmd == 'effect': + # Handle (effect name :path "...") in included files - recursive + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + # Resolve relative to the file being loaded + full = (effect_path.parent / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + # Handle (include :path "...") in included files - recursive + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (effect_path.parent / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'scan': + # Handle scans from included files + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + trigger_expr = form[2] + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], self.globals) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger_expr, + } + print(f"Scan: {name}", file=sys.stderr) + + def _init(self): + """Initialize from sexp - load primitives, effects, defs, scans.""" + # Load external config files first (they can override recipe definitions) + if self.sources_config: + self._load_config_file(self.sources_config) + if self.audio_config: + self._load_config_file(self.audio_config) + + for form in self.ast: + if not isinstance(form, list) or not form: + continue + if not isinstance(form[0], Symbol): + continue + + cmd = form[0].name + + if cmd == 'require-primitives': + lib_name = form[1] if isinstance(form[1], str) else str(form[1]).strip('"') + self._load_primitives(lib_name) + + elif cmd == 'effect': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + i = 2 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'include': + i = 1 + while i < len(form): + if isinstance(form[i], Keyword) and form[i].name == 'path': + path = str(form[i + 1]).strip('"') + full = (self.sexp_dir / path).resolve() + self._load_effect(full) + i += 2 + else: + i += 1 + + elif cmd == 'audio-playback': + # (audio-playback "path") - set audio file for playback sync + # Skip if already set by config file + if self.audio_playback is None: + path = str(form[1]).strip('"') + self.audio_playback = str((self.sexp_dir / path).resolve()) + print(f"Audio playback: {self.audio_playback}", file=sys.stderr) + + elif cmd == 'def': + # (def name expr) - evaluate and store in globals + # Skip if already defined by config file + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + if name in self.globals: + print(f"Def: {name} (from config, skipped)", file=sys.stderr) + continue + value = self._eval(form[2], self.globals) + self.globals[name] = value + print(f"Def: {name}", file=sys.stderr) + + elif cmd == 'defmacro': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + params = [p.name if isinstance(p, Symbol) else str(p) for p in form[2]] + body = form[3] + self.macros[name] = {'params': params, 'body': body} + + elif cmd == 'scan': + name = form[1].name if isinstance(form[1], Symbol) else str(form[1]) + trigger_expr = form[2] + init_val, step_expr = {}, None + i = 3 + while i < len(form): + if isinstance(form[i], Keyword): + if form[i].name == 'init' and i + 1 < len(form): + init_val = self._eval(form[i + 1], self.globals) + elif form[i].name == 'step' and i + 1 < len(form): + step_expr = form[i + 1] + i += 2 + else: + i += 1 + + self.scans[name] = { + 'state': dict(init_val) if isinstance(init_val, dict) else {'acc': init_val}, + 'init': init_val, + 'step': step_expr, + 'trigger': trigger_expr, + } + print(f"Scan: {name}", file=sys.stderr) + + elif cmd == 'frame': + self.frame_pipeline = form[1] if len(form) > 1 else None + + def _eval(self, expr, env: dict) -> Any: + """Evaluate an expression.""" + + # Primitives + if isinstance(expr, (int, float)): + return expr + if isinstance(expr, str): + return expr + if isinstance(expr, bool): + return expr + + if isinstance(expr, Symbol): + name = expr.name + # Built-in constants + if name == 'pi': + return math.pi + if name == 'true': + return True + if name == 'false': + return False + if name == 'nil': + return None + # Environment lookup + if name in env: + return env[name] + # Global lookup + if name in self.globals: + return self.globals[name] + # Scan state lookup + if name in self.scans: + return self.scans[name]['state'] + raise NameError(f"Undefined variable: {name}") + + if isinstance(expr, Keyword): + return expr.name + + if not isinstance(expr, list) or not expr: + return expr + + # Dict literal {:key val ...} + if isinstance(expr[0], Keyword): + result = {} + i = 0 + while i < len(expr): + if isinstance(expr[i], Keyword): + result[expr[i].name] = self._eval(expr[i + 1], env) if i + 1 < len(expr) else None + i += 2 + else: + i += 1 + return result + + head = expr[0] + if not isinstance(head, Symbol): + return [self._eval(e, env) for e in expr] + + op = head.name + args = expr[1:] + + # Check for closure call + if op in env: + val = env[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + if op in self.globals: + val = self.globals[op] + if isinstance(val, dict) and val.get('_type') == 'closure': + closure = val + closure_env = dict(closure['env']) + for i, pname in enumerate(closure['params']): + closure_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(closure['body'], closure_env) + + # Threading macro + if op == '->': + result = self._eval(args[0], env) + for form in args[1:]: + if isinstance(form, list) and form: + new_form = [form[0], result] + form[1:] + result = self._eval(new_form, env) + else: + result = self._eval([form, result], env) + return result + + # === Binding === + + if op == 'bind': + scan_name = args[0].name if isinstance(args[0], Symbol) else str(args[0]) + if scan_name in self.scans: + state = self.scans[scan_name]['state'] + if len(args) > 1: + key = args[1].name if isinstance(args[1], Keyword) else str(args[1]) + return state.get(key, 0) + return state + return 0 + + # === Arithmetic === + + if op == '+': + return sum(self._eval(a, env) for a in args) + if op == '-': + vals = [self._eval(a, env) for a in args] + return vals[0] - sum(vals[1:]) if len(vals) > 1 else -vals[0] + if op == '*': + result = 1 + for a in args: + result *= self._eval(a, env) + return result + if op == '/': + vals = [self._eval(a, env) for a in args] + return vals[0] / vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + if op == 'mod': + vals = [self._eval(a, env) for a in args] + return vals[0] % vals[1] if len(vals) > 1 and vals[1] != 0 else 0 + + # === Comparison === + + if op == '<': + return self._eval(args[0], env) < self._eval(args[1], env) + if op == '>': + return self._eval(args[0], env) > self._eval(args[1], env) + if op == '=': + return self._eval(args[0], env) == self._eval(args[1], env) + if op == '<=': + return self._eval(args[0], env) <= self._eval(args[1], env) + if op == '>=': + return self._eval(args[0], env) >= self._eval(args[1], env) + + if op == 'and': + for arg in args: + if not self._eval(arg, env): + return False + return True + + if op == 'or': + result = False + for arg in args: + result = self._eval(arg, env) + if result: + return result + return result + + if op == 'not': + return not self._eval(args[0], env) + + # === Logic === + + if op == 'if': + cond = self._eval(args[0], env) + if cond: + return self._eval(args[1], env) + return self._eval(args[2], env) if len(args) > 2 else None + + if op == 'cond': + i = 0 + while i < len(args) - 1: + pred = self._eval(args[i], env) + if pred: + return self._eval(args[i + 1], env) + i += 2 + return None + + if op == 'lambda': + params = args[0] + body = args[1] + param_names = [p.name if isinstance(p, Symbol) else str(p) for p in params] + return {'_type': 'closure', 'params': param_names, 'body': body, 'env': dict(env)} + + if op == 'let' or op == 'let*': + bindings = args[0] + body = args[1] + new_env = dict(env) + + if bindings and isinstance(bindings[0], list): + for binding in bindings: + if isinstance(binding, list) and len(binding) >= 2: + name = binding[0].name if isinstance(binding[0], Symbol) else str(binding[0]) + val = self._eval(binding[1], new_env) + new_env[name] = val + else: + i = 0 + while i < len(bindings): + name = bindings[i].name if isinstance(bindings[i], Symbol) else str(bindings[i]) + val = self._eval(bindings[i + 1], new_env) + new_env[name] = val + i += 2 + return self._eval(body, new_env) + + # === Dict === + + if op == 'dict': + result = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + key = args[i].name + val = self._eval(args[i + 1], env) if i + 1 < len(args) else None + result[key] = val + i += 2 + else: + i += 1 + return result + + if op == 'get': + obj = self._eval(args[0], env) + key = args[1].name if isinstance(args[1], Keyword) else self._eval(args[1], env) + if isinstance(obj, dict): + return obj.get(key, 0) + return 0 + + # === List === + + if op == 'list': + return [self._eval(a, env) for a in args] + + if op == 'quote': + return args[0] if args else None + + if op == 'nth': + lst = self._eval(args[0], env) + idx = int(self._eval(args[1], env)) + if isinstance(lst, (list, tuple)) and 0 <= idx < len(lst): + return lst[idx] + return None + + if op == 'len': + val = self._eval(args[0], env) + return len(val) if hasattr(val, '__len__') else 0 + + if op == 'map': + seq = self._eval(args[0], env) + fn = self._eval(args[1], env) + if not isinstance(seq, (list, tuple)): + return [] + # Handle closure (lambda from sexp) + if isinstance(fn, dict) and fn.get('_type') == 'closure': + results = [] + for item in seq: + closure_env = dict(fn['env']) + if fn['params']: + closure_env[fn['params'][0]] = item + results.append(self._eval(fn['body'], closure_env)) + return results + # Handle Python callable + if callable(fn): + return [fn(item) for item in seq] + return [] + + # === Effects === + + if op in self.effects: + effect = self.effects[op] + effect_env = dict(env) + + param_names = list(effect['params'].keys()) + for pname, pdef in effect['params'].items(): + effect_env[pname] = pdef.get('default', 0) + + positional_idx = 0 + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + pname = args[i].name + if pname in effect['params'] and i + 1 < len(args): + effect_env[pname] = self._eval(args[i + 1], env) + i += 2 + else: + val = self._eval(args[i], env) + if positional_idx == 0: + effect_env['frame'] = val + elif positional_idx - 1 < len(param_names): + effect_env[param_names[positional_idx - 1]] = val + positional_idx += 1 + i += 1 + + return self._eval(effect['body'], effect_env) + + # === Primitives === + + if op in self.primitives: + prim_func = self.primitives[op] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # === Macros (function-like: args evaluated before binding) === + + if op in self.macros: + macro = self.macros[op] + macro_env = dict(env) + for i, pname in enumerate(macro['params']): + # Evaluate args in calling environment before binding + macro_env[pname] = self._eval(args[i], env) if i < len(args) else None + return self._eval(macro['body'], macro_env) + + # Underscore variant lookup + prim_name = op.replace('-', '_') + if prim_name in self.primitives: + prim_func = self.primitives[prim_name] + evaluated_args = [] + kwargs = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + k = args[i].name.replace('-', '_') + v = self._eval(args[i + 1], env) if i + 1 < len(args) else None + kwargs[k] = v + i += 2 + else: + evaluated_args.append(self._eval(args[i], env)) + i += 1 + + try: + if kwargs: + return prim_func(*evaluated_args, **kwargs) + return prim_func(*evaluated_args) + except Exception as e: + print(f"Primitive {op} error: {e}", file=sys.stderr) + return None + + # Unknown - return as-is + return expr + + def _step_scans(self, ctx: Context, env: dict): + """Step scans based on trigger evaluation.""" + for name, scan in self.scans.items(): + trigger_expr = scan['trigger'] + + # Evaluate trigger in context + should_step = self._eval(trigger_expr, env) + + if should_step: + state = scan['state'] + step_env = dict(state) + step_env.update(env) + + new_state = self._eval(scan['step'], step_env) + if isinstance(new_state, dict): + scan['state'] = new_state + else: + scan['state'] = {'acc': new_state} + + def run(self, duration: float = None, output: str = "pipe"): + """Run the streaming pipeline.""" + # Import output classes - handle both package and direct execution + try: + from .output import PipeOutput, DisplayOutput, FileOutput + except ImportError: + from output import PipeOutput, DisplayOutput, FileOutput + + self._init() + + if not self.frame_pipeline: + print("Error: no (frame ...) pipeline defined", file=sys.stderr) + return + + w = self.config.get('width', 720) + h = self.config.get('height', 720) + fps = self.config.get('fps', 30) + + if duration is None: + # Try to get duration from audio if available + for name, val in self.globals.items(): + if hasattr(val, 'duration'): + duration = val.duration + print(f"Using audio duration: {duration:.1f}s", file=sys.stderr) + break + else: + duration = 60.0 + + n_frames = int(duration * fps) + frame_time = 1.0 / fps + + print(f"Streaming {n_frames} frames @ {fps}fps", file=sys.stderr) + + # Create context + ctx = Context(fps=fps) + + # Output (with optional audio sync) + audio = self.audio_playback + if output == "pipe": + out = PipeOutput(size=(w, h), fps=fps, audio_source=audio) + elif output == "preview": + out = DisplayOutput(size=(w, h), fps=fps, audio_source=audio) + else: + out = FileOutput(output, size=(w, h), fps=fps, audio_source=audio) + + try: + frame_times = [] + for frame_num in range(n_frames): + if not out.is_open: + break + + frame_start = time.time() + ctx.t = frame_num * frame_time + ctx.frame_num = frame_num + + # Build frame environment with context + frame_env = { + 'ctx': { + 't': ctx.t, + 'frame-num': ctx.frame_num, + 'fps': ctx.fps, + }, + 't': ctx.t, # Also expose t directly for convenience + 'frame-num': ctx.frame_num, + } + + # Step scans + self._step_scans(ctx, frame_env) + + # Evaluate pipeline + result = self._eval(self.frame_pipeline, frame_env) + + if result is not None and hasattr(result, 'shape'): + out.write(result, ctx.t) + + frame_elapsed = time.time() - frame_start + frame_times.append(frame_elapsed) + + # Progress with timing + if frame_num % 30 == 0: + pct = 100 * frame_num / n_frames + avg_ms = 1000 * sum(frame_times[-30:]) / max(1, len(frame_times[-30:])) + target_ms = 1000 * frame_time + print(f"\r{pct:5.1f}% [{avg_ms:.0f}ms/frame, target {target_ms:.0f}ms]", end="", file=sys.stderr, flush=True) + + finally: + out.close() + print("\nDone", file=sys.stderr) + + +def run_stream(sexp_path: str, duration: float = None, output: str = "pipe", fps: float = None, + sources_config: str = None, audio_config: str = None): + """Run a streaming sexp.""" + interp = StreamInterpreter(sexp_path) + if fps: + interp.config['fps'] = fps + if sources_config: + interp.sources_config = Path(sources_config) + if audio_config: + interp.audio_config = Path(audio_config) + interp.run(duration=duration, output=output) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser(description="Run streaming sexp (generic interpreter)") + parser.add_argument("sexp", help="Path to .sexp file") + parser.add_argument("-d", "--duration", type=float, default=None) + parser.add_argument("-o", "--output", default="pipe") + parser.add_argument("--fps", type=float, default=None) + parser.add_argument("--sources", dest="sources_config", help="Path to sources config .sexp file") + parser.add_argument("--audio", dest="audio_config", help="Path to audio config .sexp file") + args = parser.parse_args() + + run_stream(args.sexp, duration=args.duration, output=args.output, fps=args.fps, + sources_config=args.sources_config, audio_config=args.audio_config) diff --git a/templates/crossfade-zoom.sexp b/templates/crossfade-zoom.sexp new file mode 100644 index 0000000..fc6d9ad --- /dev/null +++ b/templates/crossfade-zoom.sexp @@ -0,0 +1,25 @@ +;; Crossfade with Zoom Transition +;; +;; Macro for transitioning between two frames with a zoom effect. +;; Active frame zooms out while next frame zooms in. +;; +;; Required context: +;; - zoom effect must be loaded +;; - blend effect must be loaded +;; +;; Parameters: +;; active-frame: current frame +;; next-frame: frame to transition to +;; fade-amt: transition progress (0 = all active, 1 = all next) +;; +;; Usage: +;; (include :path "../templates/crossfade-zoom.sexp") +;; ... +;; (crossfade-zoom active-frame next-frame 0.5) + +(defmacro crossfade-zoom (active-frame next-frame fade-amt) + (let [active-zoom (+ 1.0 fade-amt) + active-zoomed (zoom active-frame :amount active-zoom) + next-zoom (+ 0.1 (* fade-amt 0.9)) + next-zoomed (zoom next-frame :amount next-zoom)] + (blend active-zoomed next-zoomed :opacity fade-amt))) diff --git a/templates/scan-oscillating-spin.sexp b/templates/scan-oscillating-spin.sexp new file mode 100644 index 0000000..051f079 --- /dev/null +++ b/templates/scan-oscillating-spin.sexp @@ -0,0 +1,28 @@ +;; Oscillating Spin Scan +;; +;; Accumulates rotation angle on each beat, reversing direction +;; periodically for an oscillating effect. +;; +;; Required context: +;; - music: audio analyzer from (streaming:make-audio-analyzer ...) +;; +;; Provides scan: spin +;; Bind with: (bind spin :angle) ;; cumulative rotation angle +;; +;; Behavior: +;; - Rotates 14.4 degrees per beat (completes 360 in 25 beats) +;; - After 20-30 beats, reverses direction +;; - Creates a swinging/oscillating rotation effect +;; +;; Usage: +;; (include :path "../templates/scan-oscillating-spin.sexp") +;; +;; In frame: +;; (rotate frame :angle (bind spin :angle)) + +(scan spin (streaming:audio-beat music t) + :init {:angle 0 :dir 1 :left 25} + :step (if (> left 0) + (dict :angle (+ angle (* dir 14.4)) :dir dir :left (- left 1)) + (dict :angle angle :dir (* dir -1) + :left (+ 20 (mod (streaming:audio-beat-count music t) 11))))) diff --git a/templates/scan-ripple-drops.sexp b/templates/scan-ripple-drops.sexp new file mode 100644 index 0000000..7caf720 --- /dev/null +++ b/templates/scan-ripple-drops.sexp @@ -0,0 +1,41 @@ +;; Beat-Triggered Ripple Drops Scan +;; +;; Creates random ripple drops triggered by audio beats. +;; Each drop has a random center position and duration. +;; +;; Required context: +;; - music: audio analyzer from (streaming:make-audio-analyzer ...) +;; - core primitives loaded +;; +;; Provides scan: ripple-state +;; Bind with: (bind ripple-state :gate) ;; 0 or 1 +;; (bind ripple-state :cx) ;; center x (0-1) +;; (bind ripple-state :cy) ;; center y (0-1) +;; +;; Parameters: +;; trigger-chance: probability per beat (default 0.15) +;; min-duration: minimum beats (default 1) +;; max-duration: maximum beats (default 15) +;; +;; Usage: +;; (include :path "../templates/scan-ripple-drops.sexp") +;; ;; Uses default: 15% chance, 1-15 beat duration +;; +;; In frame: +;; (let [rip-gate (bind ripple-state :gate) +;; rip-amp (* rip-gate (core:map-range e 0 1 5 50))] +;; (ripple frame +;; :amplitude rip-amp +;; :center_x (bind ripple-state :cx) +;; :center_y (bind ripple-state :cy))) + +(scan ripple-state (streaming:audio-beat music t) + :init {:gate 0 :cx 0.5 :cy 0.5 :left 0} + :step (if (> left 0) + (dict :gate 1 :cx cx :cy cy :left (- left 1)) + (if (< (core:rand) 0.15) + (dict :gate 1 + :cx (+ 0.2 (* (core:rand) 0.6)) + :cy (+ 0.2 (* (core:rand) 0.6)) + :left (+ 1 (mod (streaming:audio-beat-count music t) 15))) + (dict :gate 0 :cx 0.5 :cy 0.5 :left 0)))) diff --git a/templates/standard-effects.sexp b/templates/standard-effects.sexp new file mode 100644 index 0000000..9e97f34 --- /dev/null +++ b/templates/standard-effects.sexp @@ -0,0 +1,22 @@ +;; Standard Effects Bundle +;; +;; Loads commonly-used video effects. +;; Include after primitives are loaded. +;; +;; Effects provided: +;; - rotate: rotation by angle +;; - zoom: scale in/out +;; - blend: alpha blend two frames +;; - ripple: water ripple distortion +;; - invert: color inversion +;; - hue_shift: hue rotation +;; +;; Usage: +;; (include :path "../templates/standard-effects.sexp") + +(effect rotate :path "../sexp_effects/effects/rotate.sexp") +(effect zoom :path "../sexp_effects/effects/zoom.sexp") +(effect blend :path "../sexp_effects/effects/blend.sexp") +(effect ripple :path "../sexp_effects/effects/ripple.sexp") +(effect invert :path "../sexp_effects/effects/invert.sexp") +(effect hue_shift :path "../sexp_effects/effects/hue_shift.sexp") diff --git a/templates/standard-primitives.sexp b/templates/standard-primitives.sexp new file mode 100644 index 0000000..6e2c62d --- /dev/null +++ b/templates/standard-primitives.sexp @@ -0,0 +1,14 @@ +;; Standard Primitives Bundle +;; +;; Loads all commonly-used primitive libraries. +;; Include this at the top of streaming recipes. +;; +;; Usage: +;; (include :path "../templates/standard-primitives.sexp") + +(require-primitives "geometry") +(require-primitives "core") +(require-primitives "image") +(require-primitives "blending") +(require-primitives "color_ops") +(require-primitives "streaming") diff --git a/templates/stream-process-pair.sexp b/templates/stream-process-pair.sexp new file mode 100644 index 0000000..55f408e --- /dev/null +++ b/templates/stream-process-pair.sexp @@ -0,0 +1,72 @@ +;; stream-process-pair template (streaming-compatible) +;; +;; Macro for processing a video source pair with full effects. +;; Reads source, applies A/B effects (rotate, zoom, invert, hue), blends, +;; and applies pair-level rotation. +;; +;; Required context (must be defined in calling scope): +;; - sources: array of video sources +;; - pair-configs: array of {:dir :rot-a :rot-b :zoom-a :zoom-b} configs +;; - pair-states: array from (bind pairs :states) +;; - now: current time (t) +;; - e: audio energy (0-1) +;; +;; Required effects (must be loaded): +;; - rotate, zoom, invert, hue_shift, blend +;; +;; Usage: +;; (include :path "../templates/stream-process-pair.sexp") +;; ...in frame pipeline... +;; (let [pair-states (bind pairs :states) +;; now t +;; e (streaming:audio-energy music now)] +;; (process-pair 0)) ;; process source at index 0 + +(require-primitives "core") + +(defmacro process-pair (src-idx) + (let [src (nth sources src-idx) + frame (streaming:source-read src now) + cfg (nth pair-configs src-idx) + state (nth pair-states src-idx) + + ;; Get state values (invert uses countdown > 0) + inv-a-active (if (> (get state :inv-a) 0) 1 0) + inv-b-active (if (> (get state :inv-b) 0) 1 0) + ;; Hue is active only when countdown > 0 + hue-a-val (if (> (get state :hue-a) 0) (get state :hue-a-val) 0) + hue-b-val (if (> (get state :hue-b) 0) (get state :hue-b-val) 0) + mix-opacity (get state :mix) + pair-rot-angle (* (get state :angle) (get cfg :dir)) + + ;; Get config values for energy-mapped ranges + rot-a-max (get cfg :rot-a) + rot-b-max (get cfg :rot-b) + zoom-a-max (get cfg :zoom-a) + zoom-b-max (get cfg :zoom-b) + + ;; Energy-driven rotation and zoom + rot-a (core:map-range e 0 1 0 rot-a-max) + rot-b (core:map-range e 0 1 0 rot-b-max) + zoom-a (core:map-range e 0 1 1 zoom-a-max) + zoom-b (core:map-range e 0 1 1 zoom-b-max) + + ;; Apply effects to clip A + clip-a (-> frame + (rotate :angle rot-a) + (zoom :amount zoom-a) + (invert :amount inv-a-active) + (hue_shift :degrees hue-a-val)) + + ;; Apply effects to clip B + clip-b (-> frame + (rotate :angle rot-b) + (zoom :amount zoom-b) + (invert :amount inv-b-active) + (hue_shift :degrees hue-b-val)) + + ;; Blend A+B + blended (blend clip-a clip-b :opacity mix-opacity)] + + ;; Apply pair-level rotation + (rotate blended :angle pair-rot-angle)))