From 406cc7c0c76e69f9809c53bed5b06d0cee46a194 Mon Sep 17 00:00:00 2001 From: gilesb Date: Mon, 19 Jan 2026 12:34:45 +0000 Subject: [PATCH] Initial commit: video effects processing system Add S-expression based video effects pipeline with modular effect definitions, constructs, and recipe files. Co-Authored-By: Claude Opus 4.5 --- .gitignore | 12 + analysis.sexp | 13 + analyze.py | 223 +++ constructs/beat-alternate.sexp | 21 + constructs/cycle-effects-preset.sexp | 153 ++ constructs/slice-every-n.sexp | 53 + constructs/slice-on.sexp | 45 + effect-presets/all-42.sexp | 65 + effect-presets/chill-subtle.sexp | 13 + effect-presets/glitch-only.sexp | 13 + effect-presets/retro-vhs.sexp | 13 + effects/ascii_art.py | 205 +++ effects/ascii_art.sexp | 34 + effects/ascii_zones.sexp | 33 + effects/beam.py | 164 +++ effects/bloom.py | 108 ++ effects/bloom.sexp | 21 + effects/blur.py | 54 + effects/blur.sexp | 20 + effects/brightness.py | 52 + effects/brightness.sexp | 20 + effects/cartoon.py | 117 ++ effects/chromatic.py | 115 ++ effects/color-adjust.sexp | 21 + effects/color_cycle.py | 104 ++ effects/color_cycle.sexp | 20 + effects/color_grade.py | 139 ++ effects/contrast.py | 52 + effects/contrast.sexp | 20 + effects/crt.py | 193 +++ effects/crt.sexp | 21 + effects/datamosh.py | 141 ++ effects/datamosh.sexp | 21 + effects/displacement.py | 170 +++ effects/echo.py | 139 ++ effects/echo.sexp | 21 + effects/edge_detect.sexp | 22 + effects/emboss.py | 88 ++ effects/emboss.sexp | 19 + effects/film_grain.py | 148 ++ effects/film_grain.sexp | 19 + effects/fisheye.py | 116 ++ effects/fisheye.sexp | 20 + effects/flip.py | 64 + effects/flip.sexp | 19 + effects/grayscale.sexp | 14 + effects/hue_shift.py | 59 + effects/hue_shift.sexp | 20 + effects/invert.py | 62 + effects/invert.sexp | 14 + effects/kaleidoscope.py | 137 ++ effects/kaleidoscope.sexp | 21 + effects/mirror.py | 86 ++ effects/mirror.sexp | 19 + effects/motion_blur.py | 122 ++ effects/neon_glow.py | 130 ++ effects/neon_glow.sexp | 21 + effects/noise.py | 137 ++ effects/noise.sexp | 19 + effects/outline.py | 111 ++ effects/outline.sexp | 19 + effects/pixelate.py | 72 + effects/pixelate.sexp | 20 + effects/pixelsort.py | 152 ++ effects/pixelsort.sexp | 22 + effects/posterize.py | 79 + effects/posterize.sexp | 20 + effects/random.py | 217 +++ effects/rgb_split.py | 106 ++ effects/rgb_split.sexp | 19 + effects/ripple.sexp | 21 + effects/rotate.py | 110 ++ effects/rotate.sexp | 20 + effects/saturation.py | 61 + effects/saturation.sexp | 20 + effects/scanlines.py | 116 ++ effects/scanlines.sexp | 21 + effects/scatter.py | 142 ++ effects/sepia.sexp | 14 + effects/sharpen.py | 63 + effects/sharpen.sexp | 20 + effects/shatter.py | 233 +++ effects/strobe.py | 90 ++ effects/strobe.sexp | 20 + effects/swirl.py | 121 ++ effects/swirl.sexp | 20 + effects/threshold.py | 85 ++ effects/threshold.sexp | 20 + effects/tile_grid.py | 134 ++ effects/tile_grid.sexp | 21 + effects/trails.py | 95 ++ effects/trails.sexp | 20 + effects/vhs.py | 163 +++ effects/vignette.py | 116 ++ effects/vignette.sexp | 19 + effects/wave.py | 108 ++ effects/wave.sexp | 21 + effects/zoom.py | 91 ++ effects/zoom.sexp | 20 + execute.py | 1458 ++++++++++++++++++ libs/all-effects.sexp | 63 + libs/plan | 68 + libs/standard-analyzers.sexp | 11 + libs/standard-constructs.sexp | 6 + plan.py | 415 ++++++ recipe-1.sexp | 16 + recipe-all-effects-modular.sexp | 55 + recipe-all-effects.sexp | 172 +++ recipe-bound copy.sexp | 45 + recipe-bound.sexp | 67 + recipe-construct.sexp | 24 + recipe-lambda.sexp | 28 + recipe-parametric.sexp | 33 + recipe-sexp-slice.sexp | 30 + recipe-single-effect.sexp | 94 ++ recipe.sexp | 25 + run-effect.sh | 122 ++ run-file.sh | 7 + run.py | 127 ++ run.sh | 7 + sexp_effects/__init__.py | 32 + sexp_effects/effects/ascii_art.sexp | 14 + sexp_effects/effects/ascii_zones.sexp | 28 + sexp_effects/effects/blend.sexp | 48 + sexp_effects/effects/bloom.sexp | 14 + sexp_effects/effects/blur.sexp | 6 + sexp_effects/effects/brightness.sexp | 7 + sexp_effects/effects/color-adjust.sexp | 8 + sexp_effects/effects/color_cycle.sexp | 11 + sexp_effects/effects/contrast.sexp | 7 + sexp_effects/effects/crt.sexp | 28 + sexp_effects/effects/datamosh.sexp | 13 + sexp_effects/effects/echo.sexp | 17 + sexp_effects/effects/edge_detect.sexp | 7 + sexp_effects/effects/emboss.sexp | 11 + sexp_effects/effects/film_grain.sexp | 17 + sexp_effects/effects/fisheye.sexp | 14 + sexp_effects/effects/flip.sexp | 14 + sexp_effects/effects/grayscale.sexp | 5 + sexp_effects/effects/hue_shift.sexp | 9 + sexp_effects/effects/invert.sexp | 5 + sexp_effects/effects/kaleidoscope.sexp | 18 + sexp_effects/effects/layer.sexp | 21 + sexp_effects/effects/mirror.sexp | 31 + sexp_effects/effects/neon_glow.sexp | 22 + sexp_effects/effects/noise.sexp | 7 + sexp_effects/effects/outline.sexp | 22 + sexp_effects/effects/pixelate.sexp | 11 + sexp_effects/effects/pixelsort.sexp | 10 + sexp_effects/effects/posterize.sexp | 11 + sexp_effects/effects/resize-frame.sexp | 7 + sexp_effects/effects/rgb_split.sexp | 12 + sexp_effects/effects/ripple.sexp | 17 + sexp_effects/effects/rotate.sexp | 8 + sexp_effects/effects/saturation.sexp | 7 + sexp_effects/effects/scanlines.sexp | 13 + sexp_effects/effects/sepia.sexp | 8 + sexp_effects/effects/sharpen.sexp | 9 + sexp_effects/effects/strobe.sexp | 14 + sexp_effects/effects/swirl.sexp | 15 + sexp_effects/effects/threshold.sexp | 13 + sexp_effects/effects/tile_grid.sexp | 27 + sexp_effects/effects/trails.sexp | 18 + sexp_effects/effects/vignette.sexp | 21 + sexp_effects/effects/wave.sexp | 20 + sexp_effects/effects/zoom.sexp | 6 + sexp_effects/interpreter.py | 537 +++++++ sexp_effects/parser.py | 168 +++ sexp_effects/primitives.py | 1867 ++++++++++++++++++++++++ sexp_effects/test_interpreter.py | 173 +++ test_effects_pipeline.py | 258 ++++ 171 files changed, 13406 insertions(+) create mode 100644 .gitignore create mode 100644 analysis.sexp create mode 100644 analyze.py create mode 100644 constructs/beat-alternate.sexp create mode 100644 constructs/cycle-effects-preset.sexp create mode 100644 constructs/slice-every-n.sexp create mode 100644 constructs/slice-on.sexp create mode 100644 effect-presets/all-42.sexp create mode 100644 effect-presets/chill-subtle.sexp create mode 100644 effect-presets/glitch-only.sexp create mode 100644 effect-presets/retro-vhs.sexp create mode 100644 effects/ascii_art.py create mode 100644 effects/ascii_art.sexp create mode 100644 effects/ascii_zones.sexp create mode 100644 effects/beam.py create mode 100644 effects/bloom.py create mode 100644 effects/bloom.sexp create mode 100644 effects/blur.py create mode 100644 effects/blur.sexp create mode 100644 effects/brightness.py create mode 100644 effects/brightness.sexp create mode 100644 effects/cartoon.py create mode 100644 effects/chromatic.py create mode 100644 effects/color-adjust.sexp create mode 100644 effects/color_cycle.py create mode 100644 effects/color_cycle.sexp create mode 100644 effects/color_grade.py create mode 100644 effects/contrast.py create mode 100644 effects/contrast.sexp create mode 100644 effects/crt.py create mode 100644 effects/crt.sexp create mode 100644 effects/datamosh.py create mode 100644 effects/datamosh.sexp create mode 100644 effects/displacement.py create mode 100644 effects/echo.py create mode 100644 effects/echo.sexp create mode 100644 effects/edge_detect.sexp create mode 100644 effects/emboss.py create mode 100644 effects/emboss.sexp create mode 100644 effects/film_grain.py create mode 100644 effects/film_grain.sexp create mode 100644 effects/fisheye.py create mode 100644 effects/fisheye.sexp create mode 100644 effects/flip.py create mode 100644 effects/flip.sexp create mode 100644 effects/grayscale.sexp create mode 100644 effects/hue_shift.py create mode 100644 effects/hue_shift.sexp create mode 100644 effects/invert.py create mode 100644 effects/invert.sexp create mode 100644 effects/kaleidoscope.py create mode 100644 effects/kaleidoscope.sexp create mode 100644 effects/mirror.py create mode 100644 effects/mirror.sexp create mode 100644 effects/motion_blur.py create mode 100644 effects/neon_glow.py create mode 100644 effects/neon_glow.sexp create mode 100644 effects/noise.py create mode 100644 effects/noise.sexp create mode 100644 effects/outline.py create mode 100644 effects/outline.sexp create mode 100644 effects/pixelate.py create mode 100644 effects/pixelate.sexp create mode 100644 effects/pixelsort.py create mode 100644 effects/pixelsort.sexp create mode 100644 effects/posterize.py create mode 100644 effects/posterize.sexp create mode 100644 effects/random.py create mode 100644 effects/rgb_split.py create mode 100644 effects/rgb_split.sexp create mode 100644 effects/ripple.sexp create mode 100644 effects/rotate.py create mode 100644 effects/rotate.sexp create mode 100644 effects/saturation.py create mode 100644 effects/saturation.sexp create mode 100644 effects/scanlines.py create mode 100644 effects/scanlines.sexp create mode 100644 effects/scatter.py create mode 100644 effects/sepia.sexp create mode 100644 effects/sharpen.py create mode 100644 effects/sharpen.sexp create mode 100644 effects/shatter.py create mode 100644 effects/strobe.py create mode 100644 effects/strobe.sexp create mode 100644 effects/swirl.py create mode 100644 effects/swirl.sexp create mode 100644 effects/threshold.py create mode 100644 effects/threshold.sexp create mode 100644 effects/tile_grid.py create mode 100644 effects/tile_grid.sexp create mode 100644 effects/trails.py create mode 100644 effects/trails.sexp create mode 100644 effects/vhs.py create mode 100644 effects/vignette.py create mode 100644 effects/vignette.sexp create mode 100644 effects/wave.py create mode 100644 effects/wave.sexp create mode 100644 effects/zoom.py create mode 100644 effects/zoom.sexp create mode 100644 execute.py create mode 100644 libs/all-effects.sexp create mode 100644 libs/plan create mode 100644 libs/standard-analyzers.sexp create mode 100644 libs/standard-constructs.sexp create mode 100644 plan.py create mode 100644 recipe-1.sexp create mode 100644 recipe-all-effects-modular.sexp create mode 100644 recipe-all-effects.sexp create mode 100644 recipe-bound copy.sexp create mode 100644 recipe-bound.sexp create mode 100644 recipe-construct.sexp create mode 100644 recipe-lambda.sexp create mode 100644 recipe-parametric.sexp create mode 100644 recipe-sexp-slice.sexp create mode 100644 recipe-single-effect.sexp create mode 100644 recipe.sexp create mode 100644 run-effect.sh create mode 100755 run-file.sh create mode 100755 run.py create mode 100755 run.sh create mode 100644 sexp_effects/__init__.py create mode 100644 sexp_effects/effects/ascii_art.sexp create mode 100644 sexp_effects/effects/ascii_zones.sexp create mode 100644 sexp_effects/effects/blend.sexp create mode 100644 sexp_effects/effects/bloom.sexp create mode 100644 sexp_effects/effects/blur.sexp create mode 100644 sexp_effects/effects/brightness.sexp create mode 100644 sexp_effects/effects/color-adjust.sexp create mode 100644 sexp_effects/effects/color_cycle.sexp create mode 100644 sexp_effects/effects/contrast.sexp create mode 100644 sexp_effects/effects/crt.sexp create mode 100644 sexp_effects/effects/datamosh.sexp create mode 100644 sexp_effects/effects/echo.sexp create mode 100644 sexp_effects/effects/edge_detect.sexp create mode 100644 sexp_effects/effects/emboss.sexp create mode 100644 sexp_effects/effects/film_grain.sexp create mode 100644 sexp_effects/effects/fisheye.sexp create mode 100644 sexp_effects/effects/flip.sexp create mode 100644 sexp_effects/effects/grayscale.sexp create mode 100644 sexp_effects/effects/hue_shift.sexp create mode 100644 sexp_effects/effects/invert.sexp create mode 100644 sexp_effects/effects/kaleidoscope.sexp create mode 100644 sexp_effects/effects/layer.sexp create mode 100644 sexp_effects/effects/mirror.sexp create mode 100644 sexp_effects/effects/neon_glow.sexp create mode 100644 sexp_effects/effects/noise.sexp create mode 100644 sexp_effects/effects/outline.sexp create mode 100644 sexp_effects/effects/pixelate.sexp create mode 100644 sexp_effects/effects/pixelsort.sexp create mode 100644 sexp_effects/effects/posterize.sexp create mode 100644 sexp_effects/effects/resize-frame.sexp create mode 100644 sexp_effects/effects/rgb_split.sexp create mode 100644 sexp_effects/effects/ripple.sexp create mode 100644 sexp_effects/effects/rotate.sexp create mode 100644 sexp_effects/effects/saturation.sexp create mode 100644 sexp_effects/effects/scanlines.sexp create mode 100644 sexp_effects/effects/sepia.sexp create mode 100644 sexp_effects/effects/sharpen.sexp create mode 100644 sexp_effects/effects/strobe.sexp create mode 100644 sexp_effects/effects/swirl.sexp create mode 100644 sexp_effects/effects/threshold.sexp create mode 100644 sexp_effects/effects/tile_grid.sexp create mode 100644 sexp_effects/effects/trails.sexp create mode 100644 sexp_effects/effects/vignette.sexp create mode 100644 sexp_effects/effects/wave.sexp create mode 100644 sexp_effects/effects/zoom.sexp create mode 100644 sexp_effects/interpreter.py create mode 100644 sexp_effects/parser.py create mode 100644 sexp_effects/primitives.py create mode 100644 sexp_effects/test_interpreter.py create mode 100644 test_effects_pipeline.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..76e951b --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +# Python cache +__pycache__/ +*.pyc + +# Media files +*.mp4 +*.mkv +*.webm +*.mp3 + +# Output files +*.json diff --git a/analysis.sexp b/analysis.sexp new file mode 100644 index 0000000..b132de1 --- /dev/null +++ b/analysis.sexp @@ -0,0 +1,13 @@ +(analysis + (beats-data + :tempo 137.195 + :times (0.437333 0.874667 1.312 1.76 2.19733 2.63467 3.072 3.50933 3.936 4.37333 4.8 5.248 5.68533 6.12267 6.56 6.99733 7.424 7.86133 8.288 8.736 9.17333 9.61067 9.984) + :count 22 + :duration 9.984 + ) + (bass-data + :times (0 0.0106667 0.0213333 0.032 0.0426667 0.0533333 0.064 0.0746667 0.0853333 0.096 0.106667 0.117333 0.128 0.138667 0.149333 0.16 0.170667 0.181333 0.192 0.202667 0.213333 0.224 0.234667 0.245333 0.256 0.266667 0.277333 0.288 0.298667 0.309333 0.32 0.330667 0.341333 0.352 0.362667 0.373333 0.384 0.394667 0.405333 0.416 0.426667 0.437333 0.448 0.458667 0.469333 0.48 0.490667 0.501333 0.512 0.522667 0.533333 0.544 0.554667 0.565333 0.576 0.586667 0.597333 0.608 0.618667 0.629333 0.64 0.650667 0.661333 0.672 0.682667 0.693333 0.704 0.714667 0.725333 0.736 0.746667 0.757333 0.768 0.778667 0.789333 0.8 0.810667 0.821333 0.832 0.842667 0.853333 0.864 0.874667 0.885333 0.896 0.906667 0.917333 0.928 0.938667 0.949333 0.96 0.970667 0.981333 0.992 1.00267 1.01333 1.024 1.03467 1.04533 1.056 1.06667 1.07733 1.088 1.09867 1.10933 1.12 1.13067 1.14133 1.152 1.16267 1.17333 1.184 1.19467 1.20533 1.216 1.22667 1.23733 1.248 1.25867 1.26933 1.28 1.29067 1.30133 1.312 1.32267 1.33333 1.344 1.35467 1.36533 1.376 1.38667 1.39733 1.408 1.41867 1.42933 1.44 1.45067 1.46133 1.472 1.48267 1.49333 1.504 1.51467 1.52533 1.536 1.54667 1.55733 1.568 1.57867 1.58933 1.6 1.61067 1.62133 1.632 1.64267 1.65333 1.664 1.67467 1.68533 1.696 1.70667 1.71733 1.728 1.73867 1.74933 1.76 1.77067 1.78133 1.792 1.80267 1.81333 1.824 1.83467 1.84533 1.856 1.86667 1.87733 1.888 1.89867 1.90933 1.92 1.93067 1.94133 1.952 1.96267 1.97333 1.984 1.99467 2.00533 2.016 2.02667 2.03733 2.048 2.05867 2.06933 2.08 2.09067 2.10133 2.112 2.12267 2.13333 2.144 2.15467 2.16533 2.176 2.18667 2.19733 2.208 2.21867 2.22933 2.24 2.25067 2.26133 2.272 2.28267 2.29333 2.304 2.31467 2.32533 2.336 2.34667 2.35733 2.368 2.37867 2.38933 2.4 2.41067 2.42133 2.432 2.44267 2.45333 2.464 2.47467 2.48533 2.496 2.50667 2.51733 2.528 2.53867 2.54933 2.56 2.57067 2.58133 2.592 2.60267 2.61333 2.624 2.63467 2.64533 2.656 2.66667 2.67733 2.688 2.69867 2.70933 2.72 2.73067 2.74133 2.752 2.76267 2.77333 2.784 2.79467 2.80533 2.816 2.82667 2.83733 2.848 2.85867 2.86933 2.88 2.89067 2.90133 2.912 2.92267 2.93333 2.944 2.95467 2.96533 2.976 2.98667 2.99733 3.008 3.01867 3.02933 3.04 3.05067 3.06133 3.072 3.08267 3.09333 3.104 3.11467 3.12533 3.136 3.14667 3.15733 3.168 3.17867 3.18933 3.2 3.21067 3.22133 3.232 3.24267 3.25333 3.264 3.27467 3.28533 3.296 3.30667 3.31733 3.328 3.33867 3.34933 3.36 3.37067 3.38133 3.392 3.40267 3.41333 3.424 3.43467 3.44533 3.456 3.46667 3.47733 3.488 3.49867 3.50933 3.52 3.53067 3.54133 3.552 3.56267 3.57333 3.584 3.59467 3.60533 3.616 3.62667 3.63733 3.648 3.65867 3.66933 3.68 3.69067 3.70133 3.712 3.72267 3.73333 3.744 3.75467 3.76533 3.776 3.78667 3.79733 3.808 3.81867 3.82933 3.84 3.85067 3.86133 3.872 3.88267 3.89333 3.904 3.91467 3.92533 3.936 3.94667 3.95733 3.968 3.97867 3.98933 4 4.01067 4.02133 4.032 4.04267 4.05333 4.064 4.07467 4.08533 4.096 4.10667 4.11733 4.128 4.13867 4.14933 4.16 4.17067 4.18133 4.192 4.20267 4.21333 4.224 4.23467 4.24533 4.256 4.26667 4.27733 4.288 4.29867 4.30933 4.32 4.33067 4.34133 4.352 4.36267 4.37333 4.384 4.39467 4.40533 4.416 4.42667 4.43733 4.448 4.45867 4.46933 4.48 4.49067 4.50133 4.512 4.52267 4.53333 4.544 4.55467 4.56533 4.576 4.58667 4.59733 4.608 4.61867 4.62933 4.64 4.65067 4.66133 4.672 4.68267 4.69333 4.704 4.71467 4.72533 4.736 4.74667 4.75733 4.768 4.77867 4.78933 4.8 4.81067 4.82133 4.832 4.84267 4.85333 4.864 4.87467 4.88533 4.896 4.90667 4.91733 4.928 4.93867 4.94933 4.96 4.97067 4.98133 4.992 5.00267 5.01333 5.024 5.03467 5.04533 5.056 5.06667 5.07733 5.088 5.09867 5.10933 5.12 5.13067 5.14133 5.152 5.16267 5.17333 5.184 5.19467 5.20533 5.216 5.22667 5.23733 5.248 5.25867 5.26933 5.28 5.29067 5.30133 5.312 5.32267 5.33333 5.344 5.35467 5.36533 5.376 5.38667 5.39733 5.408 5.41867 5.42933 5.44 5.45067 5.46133 5.472 5.48267 5.49333 5.504 5.51467 5.52533 5.536 5.54667 5.55733 5.568 5.57867 5.58933 5.6 5.61067 5.62133 5.632 5.64267 5.65333 5.664 5.67467 5.68533 5.696 5.70667 5.71733 5.728 5.73867 5.74933 5.76 5.77067 5.78133 5.792 5.80267 5.81333 5.824 5.83467 5.84533 5.856 5.86667 5.87733 5.888 5.89867 5.90933 5.92 5.93067 5.94133 5.952 5.96267 5.97333 5.984 5.99467 6.00533 6.016 6.02667 6.03733 6.048 6.05867 6.06933 6.08 6.09067 6.10133 6.112 6.12267 6.13333 6.144 6.15467 6.16533 6.176 6.18667 6.19733 6.208 6.21867 6.22933 6.24 6.25067 6.26133 6.272 6.28267 6.29333 6.304 6.31467 6.32533 6.336 6.34667 6.35733 6.368 6.37867 6.38933 6.4 6.41067 6.42133 6.432 6.44267 6.45333 6.464 6.47467 6.48533 6.496 6.50667 6.51733 6.528 6.53867 6.54933 6.56 6.57067 6.58133 6.592 6.60267 6.61333 6.624 6.63467 6.64533 6.656 6.66667 6.67733 6.688 6.69867 6.70933 6.72 6.73067 6.74133 6.752 6.76267 6.77333 6.784 6.79467 6.80533 6.816 6.82667 6.83733 6.848 6.85867 6.86933 6.88 6.89067 6.90133 6.912 6.92267 6.93333 6.944 6.95467 6.96533 6.976 6.98667 6.99733 7.008 7.01867 7.02933 7.04 7.05067 7.06133 7.072 7.08267 7.09333 7.104 7.11467 7.12533 7.136 7.14667 7.15733 7.168 7.17867 7.18933 7.2 7.21067 7.22133 7.232 7.24267 7.25333 7.264 7.27467 7.28533 7.296 7.30667 7.31733 7.328 7.33867 7.34933 7.36 7.37067 7.38133 7.392 7.40267 7.41333 7.424 7.43467 7.44533 7.456 7.46667 7.47733 7.488 7.49867 7.50933 7.52 7.53067 7.54133 7.552 7.56267 7.57333 7.584 7.59467 7.60533 7.616 7.62667 7.63733 7.648 7.65867 7.66933 7.68 7.69067 7.70133 7.712 7.72267 7.73333 7.744 7.75467 7.76533 7.776 7.78667 7.79733 7.808 7.81867 7.82933 7.84 7.85067 7.86133 7.872 7.88267 7.89333 7.904 7.91467 7.92533 7.936 7.94667 7.95733 7.968 7.97867 7.98933 8 8.01067 8.02133 8.032 8.04267 8.05333 8.064 8.07467 8.08533 8.096 8.10667 8.11733 8.128 8.13867 8.14933 8.16 8.17067 8.18133 8.192 8.20267 8.21333 8.224 8.23467 8.24533 8.256 8.26667 8.27733 8.288 8.29867 8.30933 8.32 8.33067 8.34133 8.352 8.36267 8.37333 8.384 8.39467 8.40533 8.416 8.42667 8.43733 8.448 8.45867 8.46933 8.48 8.49067 8.50133 8.512 8.52267 8.53333 8.544 8.55467 8.56533 8.576 8.58667 8.59733 8.608 8.61867 8.62933 8.64 8.65067 8.66133 8.672 8.68267 8.69333 8.704 8.71467 8.72533 8.736 8.74667 8.75733 8.768 8.77867 8.78933 8.8 8.81067 8.82133 8.832 8.84267 8.85333 8.864 8.87467 8.88533 8.896 8.90667 8.91733 8.928 8.93867 8.94933 8.96 8.97067 8.98133 8.992 9.00267 9.01333 9.024 9.03467 9.04533 9.056 9.06667 9.07733 9.088 9.09867 9.10933 9.12 9.13067 9.14133 9.152 9.16267 9.17333 9.184 9.19467 9.20533 9.216 9.22667 9.23733 9.248 9.25867 9.26933 9.28 9.29067 9.30133 9.312 9.32267 9.33333 9.344 9.35467 9.36533 9.376 9.38667 9.39733 9.408 9.41867 9.42933 9.44 9.45067 9.46133 9.472 9.48267 9.49333 9.504 9.51467 9.52533 9.536 9.54667 9.55733 9.568 9.57867 9.58933 9.6 9.61067 9.62133 9.632 9.64267 9.65333 9.664 9.67467 9.68533 9.696 9.70667 9.71733 9.728 9.73867 9.74933 9.76 9.77067 9.78133 9.792 9.80267 9.81333 9.824 9.83467 9.84533 9.856 9.86667 9.87733 9.888 9.89867 9.90933 9.92 9.93067 9.94133 9.952 9.96267 9.97333 9.984) + :values (7.16216e-06 6.45309e-06 6.24163e-06 8.43058e-06 7.3364e-06 8.33967e-06 7.16933e-06 6.40957e-06 6.49916e-06 8.20697e-06 8.02302e-06 7.87236e-06 1.21227e-05 8.24857e-06 4.717e-06 6.57983e-06 4.37932e-06 6.2521e-06 7.43115e-06 5.63364e-06 8.52022e-06 7.35037e-06 4.66317e-06 6.7811e-06 7.62028e-06 7.45616e-06 7.43458e-06 8.39454e-06 6.26045e-06 6.91363e-06 5.75392e-06 6.30496e-06 8.32314e-06 6.70732e-06 6.24163e-06 8.43058e-06 7.3364e-06 8.34238e-06 1.7279e-05 0.0312504 0.127171 0.156779 0.213277 0.264067 0.228376 0.238384 0.217415 0.20997 0.166879 0.155116 0.132226 0.130255 0.0951963 0.103571 0.104248 0.0780041 0.0948994 0.0827808 0.0825249 0.0789151 0.090206 0.0928191 0.0945696 0.097371 0.0963535 0.0648912 0.0725221 0.0669522 0.0636747 0.0687113 0.0750296 0.0683408 0.0724092 0.0731389 0.0597439 0.0825941 0.0738546 0.0694798 0.0720687 0.0731458 0.0703331 0.0821691 0.0851682 0.123414 0.132517 0.155994 0.118462 0.109342 0.102054 0.0715775 0.096919 0.0840867 0.0758435 0.0778733 0.0882507 0.0701659 0.0698105 0.0765435 0.0679319 0.0707417 0.0730689 0.0598419 0.0866238 0.380699 0.700534 0.690496 0.554694 0.473041 0.42336 0.33967 0.30334 0.23985 0.207732 0.191854 0.175046 0.173634 0.113132 0.123655 0.116205 0.135359 0.120024 0.117482 0.0964799 0.144343 0.204861 0.23033 0.204271 0.202869 0.225386 0.260025 0.233206 0.173568 0.165386 0.135257 0.126117 0.108307 0.116156 0.129393 0.109 0.145881 0.108262 0.0952238 0.0873363 0.131466 0.0981327 0.13161 0.120487 0.0857447 0.0790208 0.0899334 0.0815575 0.0996942 0.0906596 0.070164 0.0844994 0.0889698 0.0890347 0.096482 0.0880858 0.0825154 0.0929791 0.0753759 0.0918 0.0839455 0.106187 0.156017 0.173052 0.143864 0.129507 0.127141 0.105499 0.0909262 0.0884056 0.0889916 0.0634021 0.0795005 0.0900996 0.0570842 0.0659083 0.0712231 0.0676829 0.055935 0.0600593 0.0532016 0.206078 0.624478 0.829637 0.695123 0.662618 0.565682 0.432616 0.357212 0.311635 0.284791 0.232738 0.194049 0.200091 0.131367 0.11794 0.114917 0.119684 0.113432 0.105584 0.0914791 0.350021 0.830524 0.919146 0.967183 1 0.789667 0.642514 0.402096 0.356062 0.372625 0.262668 0.256799 0.238152 0.239011 0.259009 0.240393 0.195865 0.210342 0.170598 0.115235 0.123494 0.172891 0.396452 0.527137 0.495694 0.502169 0.314289 0.303286 0.251372 0.170556 0.162313 0.163239 0.155657 0.134445 0.143141 0.15054 0.153207 0.172172 0.127843 0.111243 0.0908216 0.103453 0.140627 0.136314 0.13147 0.172442 0.227603 0.219058 0.185081 0.193927 0.192491 0.141449 0.165612 0.171496 0.115222 0.114057 0.139578 0.120337 0.10068 0.0787593 0.0683195 0.0684097 0.066809 0.114857 0.497881 0.920547 0.9325 0.788979 0.60633 0.532281 0.441695 0.391039 0.330509 0.291408 0.213403 0.237324 0.204736 0.153047 0.136729 0.150922 0.122362 0.124808 0.11872 0.185701 0.306793 0.2445 0.231741 0.218492 0.213972 0.217633 0.245314 0.232327 0.150716 0.0949742 0.0913199 0.079221 0.090395 0.0770564 0.100292 0.1031 0.0837269 0.0811178 0.0652714 0.0824131 0.0847641 0.100266 0.120247 0.0969764 0.106081 0.0910757 0.0590815 0.0567437 0.0743846 0.0486739 0.0640541 0.0484055 0.0556839 0.0754229 0.0581457 0.0501834 0.0478827 0.0527257 0.0493874 0.0893116 0.149908 0.155328 0.126655 0.115469 0.132254 0.0966001 0.117966 0.093855 0.0775226 0.0916065 0.0686379 0.0784045 0.0918643 0.073267 0.0711642 0.0711377 0.0501969 0.0583894 0.0556452 0.0756217 0.44287 0.906221 0.948975 0.772533 0.687945 0.596675 0.528724 0.433275 0.409462 0.358384 0.272122 0.27456 0.245432 0.262483 0.198251 0.142315 0.134197 0.11601 0.122026 0.202528 0.175024 0.214981 0.261781 0.224243 0.239647 0.226189 0.254256 0.188644 0.160832 0.137847 0.121601 0.0943244 0.138567 0.155566 0.127026 0.144832 0.118252 0.128691 0.116627 0.124978 0.0945879 0.115633 0.107412 0.111797 0.0825074 0.0830281 0.0930151 0.0881962 0.0952128 0.0804622 0.0821983 0.0895789 0.0986577 0.0641261 0.0756342 0.0935667 0.0827843 0.0883281 0.0758066 0.0694162 0.0832474 0.0685954 0.104091 0.110042 0.183639 0.148052 0.144141 0.121368 0.0693286 0.115004 0.126901 0.109386 0.103061 0.104243 0.077619 0.0804199 0.0913744 0.0826062 0.0844789 0.096114 0.0709375 0.0844898 0.348649 0.707439 0.696308 0.560395 0.483835 0.424138 0.285367 0.309361 0.268134 0.212897 0.20254 0.180925 0.180307 0.140437 0.122159 0.113377 0.1452 0.136581 0.12094 0.0924773 0.152206 0.210191 0.222118 0.210824 0.19643 0.210775 0.237974 0.210584 0.198879 0.160678 0.138147 0.133646 0.112595 0.129571 0.139936 0.119597 0.156428 0.110461 0.0983066 0.0923733 0.132595 0.100783 0.131057 0.119132 0.087296 0.0737559 0.0694214 0.0830922 0.0992999 0.0955029 0.0695149 0.0783225 0.085667 0.0815775 0.0943693 0.0907773 0.0808274 0.0966216 0.0797136 0.0931839 0.0771002 0.103668 0.148217 0.15753 0.13534 0.130242 0.120796 0.0979305 0.0859062 0.0816456 0.0900397 0.0648289 0.0774075 0.0871007 0.0578754 0.0656776 0.0649346 0.0618054 0.0530827 0.0573399 0.052835 0.204659 0.6304 0.842557 0.663718 0.630267 0.537835 0.436743 0.380594 0.334736 0.274641 0.218428 0.182904 0.194093 0.130684 0.116426 0.115307 0.120491 0.115242 0.105039 0.089898 0.339287 0.833928 0.89201 0.847351 0.885868 0.718274 0.63349 0.435283 0.355222 0.398535 0.284555 0.283632 0.258276 0.232136 0.260572 0.245105 0.194172 0.215658 0.181625 0.115912 0.124685 0.172845 0.403153 0.518318 0.505032 0.518908 0.353772 0.314415 0.260417 0.187215 0.166755 0.155303 0.153631 0.134266 0.132239 0.145932 0.145072 0.170908 0.141179 0.117025 0.0932492 0.103141 0.139657 0.141631 0.142166 0.178162 0.236232 0.233171 0.19593 0.203656 0.201666 0.147291 0.155549 0.165642 0.115685 0.114077 0.140576 0.120949 0.10023 0.075488 0.0691524 0.0703872 0.0602554 0.105975 0.460764 0.863468 0.841344 0.743975 0.624344 0.535433 0.425757 0.389639 0.322511 0.279877 0.192502 0.213059 0.190344 0.155011 0.134013 0.148768 0.127813 0.133291 0.128182 0.176028 0.281791 0.223189 0.198992 0.193601 0.183436 0.195744 0.233913 0.221215 0.14886 0.0993742 0.0920248 0.0789306 0.0851811 0.0747424 0.100489 0.106375 0.0866896 0.0822401 0.0643018 0.0819463 0.081135 0.094469 0.116559 0.0946106 0.100833 0.0928059 0.0619034 0.056504 0.0725264 0.046511 0.0671074 0.0507017 0.0509314 0.0693126 0.0573645 0.0507336 0.0471445 0.0522879 0.0480421 0.083083 0.15539 0.165325 0.145727 0.136045 0.134697 0.0870662 0.108761 0.0908244 0.0792352 0.0829125 0.060982 0.0723782 0.0907959 0.0720178 0.0660367 0.0675224 0.050675 0.0588428 0.0543639 0.0739238 0.448568 0.925567 0.973444 0.740911 0.69001 0.631465 0.523379 0.425562 0.407257 0.373598 0.28653 0.296789 0.255094 0.249436 0.199982 0.13872 0.124745 0.110177 0.109994 0.196876 0.175401 0.208955 0.266637 0.242249 0.264852 0.225198 0.239447 0.183968 0.162852 0.151754 0.120695 0.0952901 0.133652 0.155726 0.119348 0.130654 0.120284 0.134148 0.119943 0.127491 0.0998429 0.127342 0.117227 0.138459 0.108536 0.112109 0.116469 0.105608 0.103308 0.0997458 0.0908462 0.0925413 0.103308 0.0622741 0.0717011 0.0920471 0.0798529 0.0845619 0.0768434 0.064583 0.0791313 0.0695519 0.104025 0.120987 0.171366 0.141478 0.127739 0.108496 0.0727998 0.114553 0.127771 0.109442 0.110514 0.110202 0.0835657 0.0795321 0.0935803 0.0834574 0.084336 0.095042 0.0741728 0.0841201 0.330518 0.665087 0.693456 0.566006 0.488647 0.42792 0.337344 0.346076 0.284655 0.214681 0.212832 0.184396 0.174909 0.136491 0.12191 0.1182 0.141756 0.127448 0.118748 0.0850289 0.148361 0.222931 0.239708 0.228752 0.203162 0.235074 0.266685 0.241574 0.199221 0.144484 0.126863 0.1294 0.108858 0.127052 0.142572 0.120572 0.152882 0.111591 0.0881809 0.0888725 0.138569 0.106755 0.124997 0.113986 0.0841088 0.0744094 0.0903245 0.0870655 0.0998053 0.0834926 0.06468 0.0820826 0.0938939 0.0843508 0.0947001 0.091387 0.077998 0.0941699 0.074824 0.0904097 0.0831858 0.106969 0.160138 0.170449 0.160021 0.138937 0.137461 0.105947 0.0918812 0.0931718 0.0947702 0.0717733 0.075929 0.08621 0.0617874 0.0755435 0.0764485 0.0670138 0.054362 0.0621337 0.0601537 0.192354 0.604609 0.857971 0.728876 0.655009 0.530333 0.451029 0.391772 0.332011 0.269215 0.237235 0.198724 0.202796 0.135874 0.119565 0.112975 0.117327 0.110819 0.105813 0.0870918 0.328994 0.801941 0.90869 0.937387 0.987602 0.745804 0.658189 0.443015 0.385498 0.388736 0.275883 0.255372 0.239243 0.216375 0.241192 0.228235 0.187837 0.203992 0.170158 0.109679 0.11758 0.159904 0.390653 0.517078 0.508666 0.531087 0.341772 0.319248 0.274061 0.17915 0.169531 0.160211 0.157662 0.140002 0.132297 0.142435 0.138881 0.171226 0.144367 0.123187 0.0987208 0.105159 0.134667 0.132355 0.139594 0.182268 0.248417 0.247857 0.194815 0.19428 0.189158 0.140931 0.151526 0.168047 0.119665 0.116234 0.13657 0.11837 0.0981858 0.0728398 0.0637859 0.0676442 0.0603401 0.107312 0.45778 0.875891 0.884144 0.702587 0.570179 0.504397 0.463568 0.381661 0.309783 0.277454 0.208983 0.233763 0.207871 0.165335 0.113466) + :duration 9.984 + ) +) \ No newline at end of file diff --git a/analyze.py b/analyze.py new file mode 100644 index 0000000..1affa85 --- /dev/null +++ b/analyze.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +""" +Run analyzers from a recipe and output analysis data as S-expressions. + +Usage: + analyze.py recipe.sexp [-o analysis.sexp] + +Output format: + (analysis + (beats-data + :tempo 120.5 + :times (0.0 0.5 1.0 1.5 ...) + :duration 10.0) + (bass-data + :times (0.0 0.1 0.2 ...) + :values (0.5 0.8 0.3 ...))) +""" + +import sys +import tempfile +import subprocess +import importlib.util +from pathlib import Path + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string, parse +from artdag.sexp.parser import Symbol, Keyword, serialize + + +def load_analyzer(analyzer_path: Path): + """Load an analyzer module from file path.""" + spec = importlib.util.spec_from_file_location("analyzer", analyzer_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def run_analyzer(analyzer_path: Path, input_path: Path, params: dict) -> dict: + """Run an analyzer and return results.""" + analyzer = load_analyzer(analyzer_path) + return analyzer.analyze(input_path, params) + + +def pre_execute_segment(source_path: Path, start: float, duration: float, work_dir: Path) -> Path: + """Pre-execute a segment to get audio for analysis.""" + suffix = source_path.suffix.lower() + is_audio = suffix in ('.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a') + + output_ext = ".m4a" if is_audio else ".mp4" + output_path = work_dir / f"segment{output_ext}" + + cmd = ["ffmpeg", "-y", "-i", str(source_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + + if is_audio: + cmd.extend(["-c:a", "aac", str(output_path)]) + else: + cmd.extend(["-c:v", "libx264", "-preset", "fast", "-crf", "18", + "-c:a", "aac", str(output_path)]) + + subprocess.run(cmd, check=True, capture_output=True) + return output_path + + +def to_sexp(value, indent=0): + """Convert a Python value to S-expression string.""" + if isinstance(value, dict): + if not value: + return "()" + items = [] + for k, v in value.items(): + key = k.replace('_', '-') + items.append(f":{key} {to_sexp(v)}") + return "(" + " ".join(items) + ")" + elif isinstance(value, list): + if not value: + return "()" + items = [to_sexp(v) for v in value] + return "(" + " ".join(items) + ")" + elif isinstance(value, str): + return f'"{value}"' + elif isinstance(value, bool): + return "true" if value else "false" + elif value is None: + return "nil" + elif isinstance(value, float): + return f"{value:.6g}" + else: + return str(value) + + +def analyze_recipe(recipe_path: Path, output_file: Path = None): + """Run all analyzers in a recipe and output S-expression analysis data.""" + + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + print(f"Compiling: {recipe_path}", file=sys.stderr) + compiled = compile_string(recipe_text) + print(f"Recipe: {compiled.name} v{compiled.version}", file=sys.stderr) + + # Find all ANALYZE nodes and their dependencies + nodes_by_id = {n["id"]: n for n in compiled.nodes} + + # Track source paths and segment outputs + source_paths = {} + segment_outputs = {} + analysis_results = {} + + work_dir = Path(tempfile.mkdtemp(prefix="artdag_analyze_")) + + # Process nodes in dependency order + def get_input_path(node_id: str) -> Path: + """Resolve the input path for a node.""" + if node_id in segment_outputs: + return segment_outputs[node_id] + if node_id in source_paths: + return source_paths[node_id] + + node = nodes_by_id.get(node_id) + if not node: + return None + + if node["type"] == "SOURCE": + path = recipe_dir / node["config"].get("path", "") + source_paths[node_id] = path.resolve() + return source_paths[node_id] + + if node["type"] == "SEGMENT": + inputs = node.get("inputs", []) + if inputs: + input_path = get_input_path(inputs[0]) + if input_path: + config = node.get("config", {}) + start = config.get("start", 0) + duration = config.get("duration") + output = pre_execute_segment(input_path, start, duration, work_dir) + segment_outputs[node_id] = output + return output + + return None + + # Find and run all analyzers + for node in compiled.nodes: + if node["type"] == "ANALYZE": + config = node.get("config", {}) + analyzer_name = config.get("analyzer", "unknown") + analyzer_path = config.get("analyzer_path") + + if not analyzer_path: + print(f" Skipping {analyzer_name}: no path", file=sys.stderr) + continue + + # Get input + inputs = node.get("inputs", []) + if not inputs: + print(f" Skipping {analyzer_name}: no inputs", file=sys.stderr) + continue + + input_path = get_input_path(inputs[0]) + if not input_path or not input_path.exists(): + print(f" Skipping {analyzer_name}: input not found", file=sys.stderr) + continue + + # Run analyzer + full_path = recipe_dir / analyzer_path + params = {k: v for k, v in config.items() + if k not in ("analyzer", "analyzer_path", "cid")} + + print(f" Running analyzer: {analyzer_name}", file=sys.stderr) + results = run_analyzer(full_path, input_path, params) + + # Store by node ID for uniqueness (multiple analyzers may have same type) + node_id = node.get("id") + analysis_results[node_id] = results + + times = results.get("times", []) + print(f" {len(times)} times @ {results.get('tempo', 0):.1f} BPM", file=sys.stderr) + + # Generate S-expression output + lines = ["(analysis"] + + for name, data in analysis_results.items(): + # Quote node IDs to prevent parser treating hex like "0e42..." as scientific notation + lines.append(f' ("{name}"') + for key, value in data.items(): + sexp_key = key.replace('_', '-') + sexp_value = to_sexp(value) + lines.append(f" :{sexp_key} {sexp_value}") + lines.append(" )") + + lines.append(")") + + output = "\n".join(lines) + + if output_file: + output_file.write_text(output) + print(f"\nAnalysis written to: {output_file}", file=sys.stderr) + else: + print(output) + + print(f"Debug: temp files in {work_dir}", file=sys.stderr) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Run analyzers from recipe") + parser.add_argument("recipe", type=Path, help="Recipe file (.sexp)") + parser.add_argument("-o", "--output", type=Path, help="Output file (default: stdout)") + + args = parser.parse_args() + + if not args.recipe.exists(): + print(f"Recipe not found: {args.recipe}", file=sys.stderr) + sys.exit(1) + + analyze_recipe(args.recipe, args.output) diff --git a/constructs/beat-alternate.sexp b/constructs/beat-alternate.sexp new file mode 100644 index 0000000..ecac687 --- /dev/null +++ b/constructs/beat-alternate.sexp @@ -0,0 +1,21 @@ +;; beat-alternate construct +;; Alternates between sources on each beat +;; +;; Usage in recipe: +;; (construct beat-alternate :path "constructs/beat-alternate.sexp") +;; (def segments (beat-alternate beats-data (list video-a video-b))) + +(define-construct beat-alternate + "Alternate between sources on each beat" + (analysis sources) + ;; Body: map over time pairs, return segment descriptors + (let [times (get analysis :times) + pairs (zip-pairs (cons 0 times)) + n-sources (len sources)] + (map-indexed + (fn [i pair] + (dict :source (nth sources (mod i n-sources)) + :start (first pair) + :end (nth pair 1) + :effects (list))) + pairs))) diff --git a/constructs/cycle-effects-preset.sexp b/constructs/cycle-effects-preset.sexp new file mode 100644 index 0000000..7ef680b --- /dev/null +++ b/constructs/cycle-effects-preset.sexp @@ -0,0 +1,153 @@ +;; cycle-effects-preset construct +;; Data-driven effect cycling using preset definitions +;; +;; Preset format (flat, no nested :params): +;; {:effect "brightness" :amount {:bind "bass" :range [-80 80]}} +;; {:effect "blur" :radius 5} +;; +;; Binding specs {:bind "analyzer" :range [min max]} are resolved to actual bindings + +(define-construct cycle-effects-preset + "Cycle through effects from a data preset, with automatic binding resolution" + () + (let [num-effects (len preset) + num-videos (len videos) + ;; Extract durations from video-info analysis results + durations (map (fn [info] (get info :duration)) video_infos) + times (get beats :times) + grouped (chunk-every times beats_per_segment) + + ;; Resolve a param value - if it's a binding spec dict, create actual Binding + ;; Note: pass the analyzer NAME (string) to bind, not the data - it will be + ;; looked up at execution time in analysis_data + resolve-param (fn [param-value] + (if (dict? param-value) + (let [bind-name (get param-value :bind)] + (if bind-name + (let [range-spec (get param-value :range (list 0 1))] + (bind bind-name :range range-spec)) + param-value)) + param-value)) + + ;; Process effect spec - resolve any binding specs in params + ;; Effect spec is flat: {:effect "name" :param1 val1 :param2 {:bind ...}} + make-effect (fn [effect-spec] + (let [effect-name (get effect-spec :effect)] + ;; Build effect dict with resolved params + ;; Since we can't iterate dict keys, we check known params + ;; Only include params that exist (non-nil) in the spec + (let [result {:effect effect-name} + ;; Check each known param + amount (get effect-spec :amount nil) + degrees (get effect-spec :degrees nil) + speed (get effect-spec :speed nil) + level (get effect-spec :level nil) + levels (get effect-spec :levels nil) + radius (get effect-spec :radius nil) + intensity (get effect-spec :intensity nil) + contrast (get effect-spec :contrast nil) + brightness (get effect-spec :brightness nil) + strength (get effect-spec :strength nil) + amplitude (get effect-spec :amplitude nil) + wavelength (get effect-spec :wavelength nil) + frequency (get effect-spec :frequency nil) + segments-p (get effect-spec :segments nil) + rotation_speed (get effect-spec :rotation_speed nil) + factor (get effect-spec :factor nil) + angle (get effect-spec :angle nil) + direction (get effect-spec :direction nil) + block_size (get effect-spec :block_size nil) + char_size (get effect-spec :char_size nil) + color_mode (get effect-spec :color_mode nil) + low (get effect-spec :low nil) + high (get effect-spec :high nil) + thickness (get effect-spec :thickness nil) + glow_radius (get effect-spec :glow_radius nil) + glow_intensity (get effect-spec :glow_intensity nil) + line_spacing (get effect-spec :line_spacing nil) + vignette_amount (get effect-spec :vignette_amount nil) + spacing (get effect-spec :spacing nil) + offset_x (get effect-spec :offset_x nil) + num_echoes (get effect-spec :num_echoes nil) + decay (get effect-spec :decay nil) + persistence (get effect-spec :persistence nil) + rows (get effect-spec :rows nil) + cols (get effect-spec :cols nil) + threshold_low (get effect-spec :threshold_low nil) + threshold_high (get effect-spec :threshold_high nil) + corruption (get effect-spec :corruption nil)] + ;; Only add non-nil params to result + ;; Use cond to build up the dict (since we can't dynamically add keys) + ;; This is ugly but necessary without dict iteration + {:effect effect-name + :amount (if (nil? amount) nil (resolve-param amount)) + :degrees (if (nil? degrees) nil (resolve-param degrees)) + :speed speed + :level level + :levels levels + :radius (if (nil? radius) nil (resolve-param radius)) + :intensity (if (nil? intensity) nil (resolve-param intensity)) + :contrast (if (nil? contrast) nil (resolve-param contrast)) + :brightness (if (nil? brightness) nil (resolve-param brightness)) + :strength (if (nil? strength) nil (resolve-param strength)) + :amplitude (if (nil? amplitude) nil (resolve-param amplitude)) + :wavelength wavelength + :frequency frequency + :segments segments-p + :rotation_speed rotation_speed + :factor (if (nil? factor) nil (resolve-param factor)) + :angle (if (nil? angle) nil (resolve-param angle)) + :direction direction + :block_size (if (nil? block_size) nil (resolve-param block_size)) + :char_size char_size + :color_mode color_mode + :low low + :high high + :thickness thickness + :glow_radius glow_radius + :glow_intensity glow_intensity + :line_spacing line_spacing + :vignette_amount (if (nil? vignette_amount) nil (resolve-param vignette_amount)) + :spacing spacing + :offset_x (if (nil? offset_x) nil (resolve-param offset_x)) + :num_echoes num_echoes + :decay decay + :persistence persistence + :rows rows + :cols cols + :threshold_low threshold_low + :threshold_high threshold_high + :corruption (if (nil? corruption) nil (resolve-param corruption))}))) + + find-valid-video (fn [preferred-idx seg-duration] + (cond + (>= (nth durations preferred-idx) seg-duration) preferred-idx + (>= (nth durations (mod (+ preferred-idx 1) num-videos)) seg-duration) (mod (+ preferred-idx 1) num-videos) + (>= (nth durations (mod (+ preferred-idx 2) num-videos)) seg-duration) (mod (+ preferred-idx 2) num-videos) + :else nil))] + + (nth + (reduce + (fn [state group] + (let [acc (first state) + segments (nth state 1) + audio-start (first group) + audio-end (last group) + seg-duration (- audio-end audio-start) + vid-idx (find-valid-video (mod acc num-videos) seg-duration)] + (if (nil? vid-idx) + (list (inc acc) segments) + (let [src (nth videos vid-idx) + src-duration (nth durations vid-idx) + wrapped-start (mod audio-start src-duration) + effect-idx (mod acc num-effects) + effect-spec (nth preset effect-idx) + fx (make-effect effect-spec) + segment (dict :source src + :start wrapped-start + :duration seg-duration + :effects (list fx))] + (list (inc acc) (append segments segment)))))) + (list 0 (list)) + grouped) + 1))) diff --git a/constructs/slice-every-n.sexp b/constructs/slice-every-n.sexp new file mode 100644 index 0000000..90cfd95 --- /dev/null +++ b/constructs/slice-every-n.sexp @@ -0,0 +1,53 @@ +;; slice-every-n construct - group every N beats into one segment +;; +;; Usage: +;; (construct slice-every-n :path "constructs/slice-every-n.sexp") +;; (def segments (slice-every-n beats-data 4 +;; :init 0 +;; :reducer (fn [acc i start end] +;; {:source video-a +;; :effects (list {:effect invert}) +;; :acc (inc acc)}))) +;; +;; Groups every N analysis times into one segment, calling reducer once per group + +(define-construct slice-every-n + "Group every N analysis beats into segments" + (analysis n) + ;; 'init' and 'reducer' come from keyword args + ;; Reducer receives: (acc, i, start, end) where start/end are audio beat times + ;; Reducer returns: {:source src :effects fx :acc new-acc} + ;; Optionally include :start/:end to override (e.g., for wrapping/randomizing) + ;; :duration is calculated from start/end (use :duration to override) + ;; Return :skip true to skip this segment + (let [times (get analysis :times) + ;; Group times into chunks of n + grouped (chunk-every times n)] + (nth + (reduce + (fn [state group] + (let [acc (first state) + segments (nth state 1) + i (len segments) + audio-start (first group) + audio-end (last group) + audio-duration (- audio-end audio-start) + ;; Call user's reducer with audio beat times + result (reducer acc i audio-start audio-end) + new-acc (get result :acc)] + ;; Skip if reducer returns :skip true + (if (get result :skip false) + (list new-acc segments) + (let [;; Use reducer's start/end/duration if provided, else use audio times + seg-start (get result :start audio-start) + seg-end (get result :end audio-end) + seg-duration (get result :duration (- seg-end seg-start)) + segment (dict :source (get result :source) + :start seg-start + :end seg-end + :duration seg-duration + :effects (get result :effects))] + (list new-acc (append segments segment)))))) + (list init (list)) + grouped) + 1))) diff --git a/constructs/slice-on.sexp b/constructs/slice-on.sexp new file mode 100644 index 0000000..29a40e9 --- /dev/null +++ b/constructs/slice-on.sexp @@ -0,0 +1,45 @@ +;; slice-on construct - iterate over analysis times with user function +;; +;; Usage: +;; (construct slice-on :path "constructs/slice-on.sexp") +;; (def segments (slice-on beats-data +;; :init 0 +;; :reducer (fn [acc i start end] +;; {:source (nth (list video-a video-b) (mod acc 2)) +;; :effects (list) +;; :acc (inc acc)}))) +;; +;; The construct receives: +;; - First positional arg as 'analysis' (the analysis data with :times) +;; - :init as 'init' (initial accumulator value) +;; - :reducer as 'reducer' (the reducer lambda) + +(define-construct slice-on + "Iterate over analysis times, calling reducer for each slice" + (analysis) + ;; 'init' and 'reducer' come from keyword args + ;; Get times from analysis data + (let [times (get analysis :times) + pairs (zip-pairs (cons 0 times))] + ;; Use nth to get second element of reduce result (the segments list) + (nth + (reduce + (fn [state pair] + (let [acc (first state) + segments (nth state 1) + i (len segments) + start (first pair) + end (nth pair 1) + ;; Call user's reducer function + result (reducer acc i start end) + ;; Extract new acc and build segment + new-acc (get result :acc) + segment (dict :source (get result :source) + :start start + :end end + :duration (- end start) + :effects (get result :effects))] + (list new-acc (append segments segment)))) + (list init (list)) + pairs) + 1))) diff --git a/effect-presets/all-42.sexp b/effect-presets/all-42.sexp new file mode 100644 index 0000000..8cc2e9f --- /dev/null +++ b/effect-presets/all-42.sexp @@ -0,0 +1,65 @@ +;; All 42 Effects Preset +;; Flat format - params are direct keys, not nested under :params +;; Binding format: {:bind "analyzer-name" :range [min max]} + +(def all-42-preset + (list + ;; Color effects 0-9 + {:effect "invert"} + {:effect "grayscale"} + {:effect "sepia"} + {:effect "brightness" :amount {:bind "bass" :range [-80 80]}} + {:effect "contrast" :amount {:bind "energy" :range [0.5 2.5]}} + {:effect "saturation" :amount {:bind "bass" :range [0.2 3.0]}} + {:effect "hue_shift" :degrees {:bind "energy" :range [0 360]}} + {:effect "color_cycle" :speed 2} + {:effect "threshold" :level 128} + {:effect "posterize" :levels 6} + + ;; Blur/sharpen 10-13 + {:effect "blur" :radius {:bind "bass" :range [1 30]}} + {:effect "sharpen" :amount {:bind "energy" :range [0.5 4]}} + {:effect "bloom" :intensity 0.6 :radius 20} + {:effect "color-adjust" :brightness 20 :contrast 1.2} + + ;; Distortion 14-21 + {:effect "swirl" :strength {:bind "bass" :range [-6 6]}} + {:effect "fisheye" :strength {:bind "bass" :range [-0.5 0.8]}} + {:effect "wave" :amplitude {:bind "bass" :range [10 60]} :wavelength 60} + {:effect "ripple" :amplitude {:bind "bass" :range [10 40]} :frequency 6} + {:effect "kaleidoscope" :segments 6 :rotation_speed 30} + {:effect "zoom" :factor {:bind "bass" :range [0.8 1.5]}} + {:effect "rotate" :angle {:bind "energy" :range [-30 30]}} + {:effect "mirror" :direction "horizontal"} + + ;; Stylization 22-28 + {:effect "pixelate" :block_size {:bind "bass" :range [4 32]}} + {:effect "ascii_art" :char_size 8 :color_mode "color"} + {:effect "ascii_zones" :char_size 10} + {:effect "edge_detect" :low 50 :high 150} + {:effect "emboss" :strength 1.5} + {:effect "outline" :thickness 2} + {:effect "neon_glow" :glow_radius 20 :glow_intensity 2} + + ;; Retro/film 29-33 + {:effect "crt" :line_spacing 3 :vignette_amount 0.3} + {:effect "scanlines" :spacing 3 :intensity 0.4} + {:effect "film_grain" :intensity 0.25} + {:effect "vignette" :strength 0.6} + {:effect "noise" :amount {:bind "bass" :range [10 80]}} + + ;; Chromatic 34 + {:effect "rgb_split" :offset_x {:bind "bass" :range [5 40]}} + + ;; Temporal 35-37 + {:effect "echo" :num_echoes 4 :decay 0.5} + {:effect "trails" :persistence 0.7} + {:effect "strobe" :frequency 4} + + ;; Geometric 38-39 + {:effect "flip" :direction "horizontal"} + {:effect "tile_grid" :rows 2 :cols 2} + + ;; Glitch 40-41 + {:effect "pixelsort" :threshold_low 30 :threshold_high 220} + {:effect "datamosh" :corruption {:bind "bass" :range [0.2 0.8]} :block_size 24})) diff --git a/effect-presets/chill-subtle.sexp b/effect-presets/chill-subtle.sexp new file mode 100644 index 0000000..3ccf192 --- /dev/null +++ b/effect-presets/chill-subtle.sexp @@ -0,0 +1,13 @@ +;; Chill/Subtle Preset +;; Gentle color shifts and soft effects + +(def chill-subtle-preset + (list + {:effect "hue_shift" :params {:degrees {:bind "energy" :range [0 60]}}} + {:effect "saturation" :params {:amount {:bind "bass" :range [0.8 1.3]}}} + {:effect "brightness" :params {:amount {:bind "energy" :range [-10 15]}}} + {:effect "blur" :params {:radius {:bind "bass" :range [0 5]}}} + {:effect "vignette" :params {:strength {:bind "energy" :range [0.2 0.5]}}} + {:effect "film_grain" :params {:intensity {:bind "bass" :range [0.05 0.15]}}} + {:effect "bloom" :params {:intensity {:bind "energy" :range [0.2 0.5]} :radius 15}} + {:effect "color-adjust" :params {:brightness {:bind "bass" :range [-5 10]} :contrast 1.05}})) diff --git a/effect-presets/glitch-only.sexp b/effect-presets/glitch-only.sexp new file mode 100644 index 0000000..e72f73f --- /dev/null +++ b/effect-presets/glitch-only.sexp @@ -0,0 +1,13 @@ +;; Glitch-Only Preset +;; Just the aggressive digital artifacts + +(def glitch-only-preset + (list + {:effect "datamosh" :params {:corruption {:bind "bass" :range [0.3 0.9]} :block_size 16}} + {:effect "pixelsort" :params {:threshold_low 20 :threshold_high 200}} + {:effect "rgb_split" :params {:offset_x {:bind "bass" :range [10 60]}}} + {:effect "noise" :params {:amount {:bind "energy" :range [20 100]}}} + {:effect "scanlines" :params {:spacing 2 :intensity {:bind "bass" :range [0.3 0.8]}}} + {:effect "pixelate" :params {:block_size {:bind "bass" :range [8 48]}}} + {:effect "threshold" :params {:level {:bind "energy" :range [80 180]}}} + {:effect "posterize" :params {:levels {:bind "bass" :range [2 6]}}})) diff --git a/effect-presets/retro-vhs.sexp b/effect-presets/retro-vhs.sexp new file mode 100644 index 0000000..628051b --- /dev/null +++ b/effect-presets/retro-vhs.sexp @@ -0,0 +1,13 @@ +;; Retro VHS Preset +;; 80s/90s video aesthetic + +(def retro-vhs-preset + (list + {:effect "crt" :params {:line_spacing 3 :vignette_amount {:bind "bass" :range [0.2 0.5]}}} + {:effect "scanlines" :params {:spacing 2 :intensity {:bind "energy" :range [0.3 0.6]}}} + {:effect "rgb_split" :params {:offset_x {:bind "bass" :range [2 12]}}} + {:effect "noise" :params {:amount {:bind "bass" :range [5 30]}}} + {:effect "saturation" :params {:amount {:bind "energy" :range [1.2 1.8]}}} + {:effect "blur" :params {:radius {:bind "bass" :range [0 3]}}} + {:effect "contrast" :params {:amount {:bind "energy" :range [1.1 1.4]}}} + {:effect "film_grain" :params {:intensity {:bind "bass" :range [0.1 0.3]}}})) diff --git a/effects/ascii_art.py b/effects/ascii_art.py new file mode 100644 index 0000000..408f013 --- /dev/null +++ b/effects/ascii_art.py @@ -0,0 +1,205 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect ascii_art +@version 1.0.0 +@author artdag + +@description +ASCII art effect - converts video to ASCII character representation. +Renders the video using ASCII characters based on brightness, +creating a retro terminal aesthetic. + +@param char_size int + @range 4 32 + @default 8 + Size of each character cell in pixels. Sync to bass for reactive sizing. + +@param alphabet string + @enum standard blocks cyrillic greek japanese braille + @default standard + Character set to use for rendering. Each has different visual density. + +@param color_mode string + @enum mono color invert + @default color + Color rendering mode: + - mono: white on black + - color: preserve source colors + - invert: dark text on colored background + +@param contrast_boost float + @range 1 3 + @default 1.5 + Enhance contrast for better character separation. + +@param background list + @default [0, 0, 0] + Background color RGB. + +@example + (effect ascii_art :char_size 8 :color_mode "color") + +@example + ;; Japanese characters, reactive sizing + (effect ascii_art :alphabet "japanese" :char_size (bind bass :range [6 16])) + +@example + ;; Braille pattern for high detail + (effect ascii_art :alphabet "braille" :char_size 4) +""" + +import numpy as np +import cv2 + + +# Character sets ordered by visual density (light to dark) +ALPHABETS = { + # Classic ASCII gradient + "standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$", + + # Unicode block elements - naturally ordered by fill + "blocks": " ░▒▓█", + + # Cyrillic - ordered by visual complexity + "cyrillic": " ·гтпрсьоеаилнкчуцбдвжзмъыюяфщшэ", + + # Greek - ordered by visual weight + "greek": " ·ιτορεαηυικλνσςπμβγδζθξφψωΣΩΨΦ", + + # Japanese Katakana - ordered by stroke complexity + "japanese": " ·ノ一ヘイコニハヒフホメヨワヲンリルレロカキクケサシスセソタチツテト", + + # Braille patterns - high detail + "braille": " ⠁⠂⠃⠄⠅⠆⠇⠈⠉⠊⠋⠌⠍⠎⠏⠐⠑⠒⠓⠔⠕⠖⠗⠘⠙⠚⠛⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫⠬⠭⠮⠯⠰⠱⠲⠳⠴⠵⠶⠷⠸⠹⠺⠻⠼⠽⠾⠿", +} + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply ASCII art effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - char_size: character cell size (default 8) + - alphabet: character set name (default "standard") + - color_mode: mono/color/invert (default "color") + - contrast_boost: enhance contrast (default 1.5) + - background: RGB tuple (default [0,0,0]) + state: Persistent state dict (used for caching) + + Returns: + Tuple of (processed_frame, new_state) + """ + char_size = max(4, min(int(params.get("char_size", 8)), 32)) + alphabet_name = params.get("alphabet", "standard") + color_mode = params.get("color_mode", "color") + contrast_boost = max(1.0, min(params.get("contrast_boost", 1.5), 3.0)) + background = params.get("background", [0, 0, 0]) + + if state is None: + state = {} + + # Get character set + char_set = ALPHABETS.get(alphabet_name, ALPHABETS["standard"]) + + # Get or create character atlas cache + cache_key = f"{char_size}_{alphabet_name}" + if "atlas_cache" not in state or state.get("cache_key") != cache_key: + state["atlas"] = _create_char_atlas(char_size, char_set) + state["cache_key"] = cache_key + + atlas = state["atlas"] + + h, w = frame.shape[:2] + cols = w // char_size + rows = h // char_size + + if cols < 1 or rows < 1: + return frame, state + + # Crop frame to fit grid + grid_h, grid_w = rows * char_size, cols * char_size + frame_cropped = frame[:grid_h, :grid_w] + + # Downsample to get average color per cell + reshaped = frame_cropped.reshape(rows, char_size, cols, char_size, 3) + cell_colors = reshaped.mean(axis=(1, 3)).astype(np.uint8) + + # Convert to grayscale for brightness mapping + cell_gray = 0.299 * cell_colors[:,:,0] + 0.587 * cell_colors[:,:,1] + 0.114 * cell_colors[:,:,2] + + # Apply contrast boost + if contrast_boost > 1: + cell_gray = (cell_gray - 128) * contrast_boost + 128 + cell_gray = np.clip(cell_gray, 0, 255) + + # Map brightness to character indices + char_indices = ((cell_gray / 255) * (len(char_set) - 1)).astype(np.int32) + char_indices = np.clip(char_indices, 0, len(char_set) - 1) + + # Create output frame + if isinstance(background, (list, tuple)) and len(background) >= 3: + bg = background[:3] + else: + bg = [0, 0, 0] + result = np.full((grid_h, grid_w, 3), bg, dtype=np.uint8) + + # Render characters + for row in range(rows): + for col in range(cols): + char_idx = char_indices[row, col] + char = char_set[char_idx] + char_mask = atlas.get(char) + + if char_mask is None: + continue + + y1, x1 = row * char_size, col * char_size + + if color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + # Colored background, dark text + result[y1:y1+char_size, x1:x1+char_size] = cell_colors[row, col] + color = np.array([0, 0, 0], dtype=np.uint8) + else: # color + color = cell_colors[row, col] + + # Apply character mask + if char != ' ': + mask = char_mask > 0 + result[y1:y1+char_size, x1:x1+char_size][mask] = color + + # Pad to original size if needed + if result.shape[0] < h or result.shape[1] < w: + padded = np.full((h, w, 3), bg, dtype=np.uint8) + padded[:grid_h, :grid_w] = result + result = padded + + return result, state + + +def _create_char_atlas(char_size: int, char_set: str) -> dict: + """Pre-render all characters as masks.""" + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = char_size / 20.0 + thickness = max(1, int(char_size / 10)) + + atlas = {} + for char in char_set: + char_img = np.zeros((char_size, char_size), dtype=np.uint8) + if char != ' ': + try: + (text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness) + text_x = (char_size - text_w) // 2 + text_y = (char_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA) + except: + pass + atlas[char] = char_img + + return atlas diff --git a/effects/ascii_art.sexp b/effects/ascii_art.sexp new file mode 100644 index 0000000..386f32b --- /dev/null +++ b/effects/ascii_art.sexp @@ -0,0 +1,34 @@ +;; ASCII art effect - render as text characters (music reactive) +;; Usage: python3 plan.py effects/ascii_art.sexp | python3 execute.py - -d . -o - | mpv --fs - +;; +;; Parameters: +;; color_mode: coloring mode ("color", "green", "white", default: "color") +;; char_size is bound to energy (wobbles with overall loudness) + +(recipe "ascii_art" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") + (analyzer energy :path "../artdag-analyzers/energy/analyzer.py") + + ;; Default parameter + (def color_mode "color") + + (def video (source :path "monday.webm")) + (def audio (source :path "dizzy.mp3")) + + (def clip (-> video (segment :start 0 :duration 10))) + ;; Audio from 60s where it's louder + (def audio-clip (-> audio (segment :start 60 :duration 10))) + + ;; Analyze just the audio clip - times are 0-10s relative to clip + (def energy-data (-> audio-clip (analyze energy))) + + ;; char_size bound to energy - no offset needed since analysis is on clip + (def result (-> clip + (effect ascii_art + :char_size (bind energy-data values :range [2 32]) + :color_mode color_mode))) + + (mux result audio-clip)) diff --git a/effects/ascii_zones.sexp b/effects/ascii_zones.sexp new file mode 100644 index 0000000..ad7c85c --- /dev/null +++ b/effects/ascii_zones.sexp @@ -0,0 +1,33 @@ +;; ASCII zones effect - region-based ASCII rendering +;; Usage: python3 plan.py effects/ascii_zones.sexp -p char_size=10 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; char_size: character cell size in pixels (4 to 20, default: 10) + +(recipe "ascii_zones" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") + (analyzer energy :path "../artdag-analyzers/energy/analyzer.py") + + + ;; Default parameter (overridden by -p char_size=N) + (def char_size 10) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def audio (source :path "dizzy.mp3")) + (def audio-clip (-> audio (segment :start 60 :duration 10))) + + ;; Analyze just the audio clip - times are 0-10s relative to clip + (def energy-data (-> audio-clip (analyze energy))) + + + + ;; char_size bound to energy - no offset needed since analysis is on clip + (def result (-> clip + (effect ascii_zones + :char_size (bind energy-data values :range [2 32])))) + + (mux result audio-clip)) diff --git a/effects/beam.py b/effects/beam.py new file mode 100644 index 0000000..85a543b --- /dev/null +++ b/effects/beam.py @@ -0,0 +1,164 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect beam +@version 1.0.0 +@author artdag + +@description +Beam effect. Creates animated light beams / lasers from a starting +point to an ending point with glow effect. + +@param start_x float + @range 0 1 + @default 0 + Beam start X position (0-1). + +@param start_y float + @range 0 1 + @default 0.5 + Beam start Y position (0-1). + +@param end_x float + @range 0 1 + @default 1 + Beam end X position (0-1). + +@param end_y float + @range 0 1 + @default 0.5 + Beam end Y position (0-1). + +@param thickness float + @range 1 50 + @default 5 + Beam core thickness in pixels. + +@param glow_radius float + @range 0 100 + @default 20 + Outer glow size in pixels. + +@param color list + @default [0, 255, 255] + Beam color RGB (default cyan). + +@param intensity float + @range 0 2 + @default 1.0 + Beam brightness. + +@param pulse bool + @default false + Enable pulsing animation. + +@param pulse_speed float + @range 0.1 10 + @default 2.0 + Pulse animation speed. + +@example + (effect beam :start_x 0 :start_y 0.5 :end_x 1 :end_y 0.5) + +@example + ;; Reactive laser + (effect beam :intensity (bind bass :range [0.5 2]) :color [255 0 0]) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply beam effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + start_x = params.get("start_x", 0) + start_y = params.get("start_y", 0.5) + end_x = params.get("end_x", 1) + end_y = params.get("end_y", 0.5) + thickness = params.get("thickness", 5) + glow_radius = params.get("glow_radius", 20) + color = params.get("color", [0, 255, 255]) + intensity = params.get("intensity", 1.0) + pulse = params.get("pulse", False) + pulse_speed = params.get("pulse_speed", 2.0) + t = params.get("_time", 0) + + if state is None: + state = {} + + h, w = frame.shape[:2] + result = frame.copy().astype(np.float32) + + # Calculate beam endpoints in pixels + x1, y1 = int(start_x * w), int(start_y * h) + x2, y2 = int(end_x * w), int(end_y * h) + + # Apply pulse modulation + if pulse: + pulse_mod = 0.5 + 0.5 * np.sin(t * pulse_speed * 2 * np.pi) + intensity = intensity * pulse_mod + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Calculate distance from each pixel to the line segment + line_vec = np.array([x2 - x1, y2 - y1], dtype=np.float32) + line_len = np.sqrt(line_vec[0]**2 + line_vec[1]**2) + + if line_len < 1: + return frame, state + + line_unit = line_vec / line_len + + # Vector from start to each pixel + px = x_coords - x1 + py = y_coords - y1 + + # Project onto line + proj_len = px * line_unit[0] + py * line_unit[1] + proj_len = np.clip(proj_len, 0, line_len) + + # Closest point on line + closest_x = x1 + proj_len * line_unit[0] + closest_y = y1 + proj_len * line_unit[1] + + # Distance to closest point + dist = np.sqrt((x_coords - closest_x)**2 + (y_coords - closest_y)**2) + + # Get beam color + if isinstance(color, (list, tuple)) and len(color) >= 3: + beam_color = np.array(color[:3], dtype=np.float32) + else: + beam_color = np.array([0, 255, 255], dtype=np.float32) + + # Core beam (bright center) + core_mask = dist < thickness + core_intensity = intensity * (1 - dist[core_mask] / max(1, thickness)) + for c in range(3): + result[core_mask, c] = np.clip( + result[core_mask, c] + beam_color[c] * core_intensity, + 0, 255 + ) + + # Glow (fading outer region) + glow_mask = (dist >= thickness) & (dist < thickness + glow_radius) + glow_dist = dist[glow_mask] - thickness + glow_intensity = intensity * 0.5 * (1 - glow_dist / max(1, glow_radius)) ** 2 + for c in range(3): + result[glow_mask, c] = np.clip( + result[glow_mask, c] + beam_color[c] * glow_intensity, + 0, 255 + ) + + return result.astype(np.uint8), state diff --git a/effects/bloom.py b/effects/bloom.py new file mode 100644 index 0000000..089e4e0 --- /dev/null +++ b/effects/bloom.py @@ -0,0 +1,108 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect bloom +@version 1.0.0 +@author artdag + +@description +Bloom effect. Creates a soft glow around bright areas, simulating +camera lens bloom. Great for dreamy or ethereal looks. + +@param intensity float + @range 0 2 + @default 0.5 + Bloom brightness. + +@param threshold int + @range 0 255 + @default 200 + Brightness threshold for bloom (pixels above this glow). + +@param radius int + @range 1 50 + @default 15 + Blur radius for the glow. + +@param soft_threshold bool + @default true + Use soft threshold (gradual) vs hard threshold. + +@param color_tint list + @default [255, 255, 255] + Tint color for the bloom. + +@example + (effect bloom :intensity 0.7 :threshold 180) + +@example + ;; Warm bloom + (effect bloom :intensity 0.6 :color_tint [255 200 150]) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply bloom effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - intensity: bloom brightness (default 0.5) + - threshold: brightness cutoff (default 200) + - radius: blur radius (default 15) + - soft_threshold: gradual vs hard (default True) + - color_tint: RGB tint (default white) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + intensity = params.get("intensity", 0.5) + threshold = int(params.get("threshold", 200)) + radius = max(1, int(params.get("radius", 15))) + soft_threshold = params.get("soft_threshold", True) + color_tint = params.get("color_tint", [255, 255, 255]) + + if state is None: + state = {} + + if intensity <= 0: + return frame, state + + # Convert to float + result = frame.astype(np.float32) + + # Get brightness (luminance) + lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2] + + # Create bloom mask + if soft_threshold: + # Soft threshold - gradual falloff + bloom_mask = np.clip((lum - threshold) / (255 - threshold + 1e-6), 0, 1) + else: + # Hard threshold + bloom_mask = (lum > threshold).astype(np.float32) + + # Extract bright areas + bloom = result * bloom_mask[:, :, np.newaxis] + + # Apply blur to create glow + ksize = radius * 2 + 1 + bloom = cv2.GaussianBlur(bloom, (ksize, ksize), 0) + + # Apply color tint + if isinstance(color_tint, (list, tuple)) and len(color_tint) >= 3: + tint = np.array(color_tint[:3], dtype=np.float32) / 255.0 + for c in range(3): + bloom[:, :, c] *= tint[c] + + # Add bloom to original (screen blend) + result = result + bloom * intensity + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/bloom.sexp b/effects/bloom.sexp new file mode 100644 index 0000000..50fcfdc --- /dev/null +++ b/effects/bloom.sexp @@ -0,0 +1,21 @@ +;; Bloom effect - glow around bright areas +;; Usage: python3 plan.py effects/bloom.sexp -p intensity=0.6 -p radius=20 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; intensity: bloom strength (0.0 to 2.0, default: 0.6) +;; radius: bloom spread in pixels (5 to 50, default: 20) + +(recipe "bloom" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect bloom :path "sexp_effects/effects/bloom.sexp") + + ;; Default parameters (overridden by -p intensity=N -p radius=N) + (def intensity 0.6) + (def radius 20) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect bloom :intensity intensity :radius radius))) + result) diff --git a/effects/blur.py b/effects/blur.py new file mode 100644 index 0000000..49aad81 --- /dev/null +++ b/effects/blur.py @@ -0,0 +1,54 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect blur +@version 1.0.0 +@author artdag + +@description +Gaussian blur effect. Softens the image by averaging neighboring pixels. +Useful for dreamy effects or reducing detail on quiet parts. + +@param radius float + @range 0 50 + @default 5 + Blur radius in pixels. Higher = more blur. Must be positive. + +@example + (effect blur :radius 10) + +@example + ;; Blur on quiet, sharp on loud + (effect blur :radius (bind energy :range [20 0])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply Gaussian blur to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - radius: blur radius in pixels (default 5) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + radius = params.get("radius", 5) + + if radius <= 0: + return frame, state + + # Kernel size must be odd + ksize = int(radius) * 2 + 1 + + result = cv2.GaussianBlur(frame, (ksize, ksize), 0) + + return result, state diff --git a/effects/blur.sexp b/effects/blur.sexp new file mode 100644 index 0000000..5992559 --- /dev/null +++ b/effects/blur.sexp @@ -0,0 +1,20 @@ +;; Blur effect - gaussian blur +;; Usage: python3 plan.py effects/blur.sexp -p radius=10 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; radius: blur radius in pixels (1 to 50, default: 10) +;; higher = more blurry + +(recipe "blur" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect blur :path "sexp_effects/effects/blur.sexp") + + ;; Default parameter (overridden by -p radius=N) + (def radius 10) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect blur :radius radius))) + result) diff --git a/effects/brightness.py b/effects/brightness.py new file mode 100644 index 0000000..11a5f0c --- /dev/null +++ b/effects/brightness.py @@ -0,0 +1,52 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect brightness +@version 1.0.0 +@author artdag + +@description +Adjusts the brightness of each frame by multiplying pixel values. +Values > 1.0 brighten, < 1.0 darken. Useful for pulse effects synced to music. + +@param factor float + @range 0 3 + @default 1.0 + Brightness multiplier. 0 = black, 1 = unchanged, 2 = double brightness. + Bind to bass/energy for reactive brightness pulses. + +@example + (effect brightness :factor 1.5) + +@example + ;; Pulse brighter on bass hits + (effect brightness :factor (bind bass :range [1.0 2.0] :transform sqrt)) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Adjust brightness of a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - factor: brightness multiplier (default 1.0) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + factor = params.get("factor", 1.0) + + if factor == 1.0: + return frame, state + + # Apply brightness multiplier with clipping + result = np.clip(frame.astype(np.float32) * factor, 0, 255).astype(np.uint8) + + return result, state diff --git a/effects/brightness.sexp b/effects/brightness.sexp new file mode 100644 index 0000000..9ef1ccc --- /dev/null +++ b/effects/brightness.sexp @@ -0,0 +1,20 @@ +;; Brightness effect - adjust image brightness +;; Usage: python3 plan.py effects/brightness.sexp [-p amount=30] | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amount: brightness adjustment (-255 to 255, default: 30) +;; negative = darker, positive = brighter + +(recipe "brightness" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect brightness :path "sexp_effects/effects/brightness.sexp") + + ;; Default parameter (overridden by -p amount=N) + (def amount 30) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect brightness :amount amount))) + result) diff --git a/effects/cartoon.py b/effects/cartoon.py new file mode 100644 index 0000000..de362e2 --- /dev/null +++ b/effects/cartoon.py @@ -0,0 +1,117 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "scipy"] +# /// +""" +@effect cartoon +@version 1.0.0 +@author artdag + +@description +Cartoon / Cel-shaded effect. Simplifies colors into flat regions +and adds dark edge outlines for a hand-drawn cartoon appearance. + +@param detail float + @range 0.1 1.0 + @default 0.5 + Edge detection sensitivity. Higher = more edges detected. + +@param edge_thickness int + @range 1 5 + @default 1 + Outline thickness in pixels. + +@param color_levels int + @range 2 32 + @default 6 + Number of color levels per channel. + +@param edge_color list + @default [0, 0, 0] + RGB color for edges (default black). + +@param blur_size int + @range 0 10 + @default 2 + Pre-blur for smoother color regions. + +@example + (effect cartoon :detail 0.6 :color_levels 4) + +@example + ;; Thick outlines, fewer colors + (effect cartoon :edge_thickness 3 :color_levels 3 :blur_size 4) +""" + +import numpy as np +from scipy import ndimage + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply cartoon effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - detail: edge sensitivity 0.1-1.0 (default 0.5) + - edge_thickness: outline thickness (default 1) + - color_levels: posterization levels (default 6) + - edge_color: RGB tuple (default [0,0,0]) + - blur_size: pre-blur amount (default 2) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + detail = np.clip(params.get("detail", 0.5), 0.1, 1.0) + edge_thickness = max(1, min(int(params.get("edge_thickness", 1)), 5)) + color_levels = max(2, min(int(params.get("color_levels", 6)), 32)) + edge_color = params.get("edge_color", [0, 0, 0]) + blur_size = max(0, int(params.get("blur_size", 2))) + + if state is None: + state = {} + + h, w = frame.shape[:2] + result = frame.copy().astype(np.float32) + + # Step 1: Blur to reduce noise and create smoother regions + if blur_size > 0: + for c in range(3): + result[:, :, c] = ndimage.uniform_filter(result[:, :, c], size=blur_size) + + # Step 2: Posterize colors (reduce to N levels) + step = 256 / color_levels + result = (np.floor(result / step) * step).astype(np.uint8) + + # Step 3: Detect edges using Sobel + gray = np.mean(frame, axis=2).astype(np.float32) + + sobel_x = ndimage.sobel(gray, axis=1) + sobel_y = ndimage.sobel(gray, axis=0) + edges = np.sqrt(sobel_x**2 + sobel_y**2) + + # Normalize and threshold + edge_max = edges.max() + if edge_max > 0: + edges = edges / edge_max + + edge_threshold = 1.0 - detail + edge_mask = edges > edge_threshold + + # Dilate edges for thickness + if edge_thickness > 1: + struct = ndimage.generate_binary_structure(2, 1) + for _ in range(edge_thickness - 1): + edge_mask = ndimage.binary_dilation(edge_mask, structure=struct) + + # Step 4: Apply edge color + if isinstance(edge_color, (list, tuple)) and len(edge_color) >= 3: + color = np.array(edge_color[:3], dtype=np.uint8) + else: + color = np.array([0, 0, 0], dtype=np.uint8) + + result[edge_mask] = color + + return result, state diff --git a/effects/chromatic.py b/effects/chromatic.py new file mode 100644 index 0000000..911beab --- /dev/null +++ b/effects/chromatic.py @@ -0,0 +1,115 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect chromatic +@version 1.0.0 +@author artdag + +@description +Chromatic aberration effect. Creates color fringing by offsetting +RGB channels radially from the center (lens distortion simulation). + +@param strength float + @range 0 50 + @default 10 + Aberration strength. Bind to bass for reactive effect. + +@param center_x float + @range 0 1 + @default 0.5 + Aberration center X. + +@param center_y float + @range 0 1 + @default 0.5 + Aberration center Y. + +@param radial bool + @default true + If true, aberration increases from center to edges. + +@example + (effect chromatic :strength 20) + +@example + ;; Beat-reactive chromatic aberration + (effect chromatic :strength (bind bass :range [0 30])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply chromatic aberration effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - strength: aberration amount (default 10) + - center_x: center X 0-1 (default 0.5) + - center_y: center Y 0-1 (default 0.5) + - radial: increase from center (default True) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + strength = params.get("strength", 10) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + radial = params.get("radial", True) + + if state is None: + state = {} + + if strength == 0: + return frame, state + + h, w = frame.shape[:2] + r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2] + + if radial: + # Create distance-from-center map + y_coords, x_coords = np.ogrid[:h, :w] + cx, cy = w * center_x, h * center_y + dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2) + max_dist = np.sqrt(cx**2 + cy**2) + dist_normalized = (dist / max_dist).astype(np.float32) + + # Create coordinate maps for remapping + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + + # Direction from center + dx = (map_x - cx) / (dist + 1e-6) + dy = (map_y - cy) / (dist + 1e-6) + + # Apply radial offset to red channel (outward) + r_offset = strength * dist_normalized + r_map_x = (map_x + dx * r_offset).astype(np.float32) + r_map_y = (map_y + dy * r_offset).astype(np.float32) + r_shifted = cv2.remap(r, r_map_x, r_map_y, + cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) + + # Apply radial offset to blue channel (inward) + b_offset = -strength * dist_normalized + b_map_x = (map_x + dx * b_offset).astype(np.float32) + b_map_y = (map_y + dy * b_offset).astype(np.float32) + b_shifted = cv2.remap(b, b_map_x, b_map_y, + cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE) + + return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state + else: + # Simple uniform offset + offset = int(strength) + M_r = np.float32([[1, 0, offset], [0, 1, 0]]) + M_b = np.float32([[1, 0, -offset], [0, 1, 0]]) + + r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE) + b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE) + + return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state diff --git a/effects/color-adjust.sexp b/effects/color-adjust.sexp new file mode 100644 index 0000000..d55f74d --- /dev/null +++ b/effects/color-adjust.sexp @@ -0,0 +1,21 @@ +;; Color adjust effect - combined brightness and contrast +;; Usage: python3 plan.py effects/color-adjust.sexp -p brightness=20 -p contrast=1.2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; brightness: brightness adjustment (-255 to 255, default: 20) +;; contrast: contrast multiplier (0.0 to 3.0, default: 1.2) + +(recipe "color-adjust" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + + ;; Default parameters (overridden by -p brightness=N -p contrast=N) + (def brightness 20) + (def contrast 1.2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect color-adjust :brightness brightness :contrast contrast))) + result) diff --git a/effects/color_cycle.py b/effects/color_cycle.py new file mode 100644 index 0000000..7483bb8 --- /dev/null +++ b/effects/color_cycle.py @@ -0,0 +1,104 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect color_cycle +@version 1.0.0 +@author artdag + +@description +Color cycle effect. Shifts all hues over time creating psychedelic +rainbow cycling. Great for trippy visuals. + +@param speed float + @range 0 10 + @default 1 + Cycle speed (rotations per second). + +@param offset float + @range 0 360 + @default 0 + Initial hue offset in degrees. + +@param saturation_boost float + @range 0 2 + @default 1 + Saturation multiplier. + +@param mode string + @enum all highlights shadows midtones + @default all + Which tones to affect. + +@example + (effect color_cycle :speed 0.5) + +@example + ;; Beat-synced color shift + (effect color_cycle :offset (bind beat_position :range [0 360])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply color cycle effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - speed: rotations per second (default 1) + - offset: initial hue offset (default 0) + - saturation_boost: saturation multiplier (default 1) + - mode: which tones to affect (default all) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + speed = params.get("speed", 1) + offset = params.get("offset", 0) + saturation_boost = max(0, min(params.get("saturation_boost", 1), 2)) + mode = params.get("mode", "all") + t = params.get("_time", 0) + + if state is None: + state = {} + + # Calculate hue shift + hue_shift = int((offset + speed * t * 360) % 360) + + # Convert to HSV (OpenCV uses BGR, our frame is RGB) + frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + hsv = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV).astype(np.float32) + + if mode == "all": + # Shift all hues + hsv[:, :, 0] = (hsv[:, :, 0] + hue_shift / 2) % 180 + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * saturation_boost, 0, 255) + else: + # Calculate luminance mask + lum = hsv[:, :, 2] / 255.0 + + if mode == "highlights": + mask = np.clip((lum - 0.67) * 3, 0, 1) + elif mode == "shadows": + mask = np.clip(1 - lum * 3, 0, 1) + else: # midtones + shadow_mask = np.clip(1 - lum * 3, 0, 1) + highlight_mask = np.clip((lum - 0.67) * 3, 0, 1) + mask = 1 - shadow_mask - highlight_mask + + # Apply selective hue shift + shifted_hue = (hsv[:, :, 0] + hue_shift / 2) % 180 + hsv[:, :, 0] = hsv[:, :, 0] * (1 - mask) + shifted_hue * mask + + # Convert back + hsv = np.clip(hsv, 0, 255).astype(np.uint8) + result_bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) + result = cv2.cvtColor(result_bgr, cv2.COLOR_BGR2RGB) + + return result, state diff --git a/effects/color_cycle.sexp b/effects/color_cycle.sexp new file mode 100644 index 0000000..c3f3901 --- /dev/null +++ b/effects/color_cycle.sexp @@ -0,0 +1,20 @@ +;; Color cycle effect - animated hue rotation over time +;; Usage: python3 plan.py effects/color_cycle.sexp -p speed=2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; speed: rotation speed multiplier (0.1 to 10, default: 2) +;; higher = faster color cycling + +(recipe "color_cycle" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") + + ;; Default parameter (overridden by -p speed=N) + (def speed 2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect color_cycle :speed speed))) + result) diff --git a/effects/color_grade.py b/effects/color_grade.py new file mode 100644 index 0000000..38ddf78 --- /dev/null +++ b/effects/color_grade.py @@ -0,0 +1,139 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect color_grade +@version 1.0.0 +@author artdag + +@description +Color grading effect. Applies cinematic color adjustments including +shadows/midtones/highlights tinting, lift/gamma/gain, and temperature. + +@param shadows list + @default [0, 0, 0] + RGB tint for dark areas. + +@param midtones list + @default [0, 0, 0] + RGB tint for middle tones. + +@param highlights list + @default [0, 0, 0] + RGB tint for bright areas. + +@param lift float + @range -0.5 0.5 + @default 0 + Raise/lower shadow levels. + +@param gamma float + @range 0.5 2 + @default 1 + Midtone brightness curve. + +@param gain float + @range 0.5 2 + @default 1 + Highlight intensity. + +@param temperature float + @range -100 100 + @default 0 + Color temperature (-100 = cool/blue, +100 = warm/orange). + +@param tint float + @range -100 100 + @default 0 + Green/magenta tint (-100 = green, +100 = magenta). + +@example + (effect color_grade :temperature 30 :shadows [0 0 20]) + +@example + ;; Cinematic teal-orange look + (effect color_grade :shadows [0 10 20] :highlights [20 10 0]) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply color grading to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + shadows = params.get("shadows", [0, 0, 0]) + midtones = params.get("midtones", [0, 0, 0]) + highlights = params.get("highlights", [0, 0, 0]) + lift = params.get("lift", 0) + gamma = max(0.5, min(params.get("gamma", 1), 2)) + gain = max(0.5, min(params.get("gain", 1), 2)) + temperature = params.get("temperature", 0) + tint = params.get("tint", 0) + + if state is None: + state = {} + + result = frame.astype(np.float32) / 255.0 + + # Apply lift (shadows) + result = result + lift + + # Apply gamma (midtones) + result = np.power(np.clip(result, 0.001, 1), 1 / gamma) + + # Apply gain (highlights) + result = result * gain + + # Convert tints to float + if isinstance(shadows, (list, tuple)) and len(shadows) >= 3: + shadows = np.array(shadows[:3], dtype=np.float32) / 255.0 + else: + shadows = np.zeros(3, dtype=np.float32) + + if isinstance(midtones, (list, tuple)) and len(midtones) >= 3: + midtones = np.array(midtones[:3], dtype=np.float32) / 255.0 + else: + midtones = np.zeros(3, dtype=np.float32) + + if isinstance(highlights, (list, tuple)) and len(highlights) >= 3: + highlights = np.array(highlights[:3], dtype=np.float32) / 255.0 + else: + highlights = np.zeros(3, dtype=np.float32) + + # Calculate luminance for zone-based grading + lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2] + + # Create zone masks + shadow_mask = np.clip(1 - lum * 3, 0, 1)[:, :, np.newaxis] + highlight_mask = np.clip((lum - 0.67) * 3, 0, 1)[:, :, np.newaxis] + midtone_mask = 1 - shadow_mask - highlight_mask + + # Apply zone tints + for c in range(3): + result[:, :, c] += shadows[c] * shadow_mask[:, :, 0] + result[:, :, c] += midtones[c] * midtone_mask[:, :, 0] + result[:, :, c] += highlights[c] * highlight_mask[:, :, 0] + + # Apply temperature (blue <-> orange) + if temperature != 0: + temp_shift = temperature / 100.0 + result[:, :, 0] += temp_shift * 0.3 # Red + result[:, :, 2] -= temp_shift * 0.3 # Blue + + # Apply tint (green <-> magenta) + if tint != 0: + tint_shift = tint / 100.0 + result[:, :, 1] -= tint_shift * 0.2 # Green + + return (np.clip(result, 0, 1) * 255).astype(np.uint8), state diff --git a/effects/contrast.py b/effects/contrast.py new file mode 100644 index 0000000..d14c497 --- /dev/null +++ b/effects/contrast.py @@ -0,0 +1,52 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect contrast +@version 1.0.0 +@author artdag + +@description +Adjusts contrast by scaling pixel values around the midpoint (128). +Higher values increase contrast, lower values flatten the image. + +@param factor float + @range 0 3 + @default 1.0 + Contrast multiplier. 0.5 = low contrast, 1 = unchanged, 2 = high contrast. + +@example + (effect contrast :factor 1.5) + +@example + ;; Dramatic contrast on energy peaks + (effect contrast :factor (bind energy :range [1.0 2.0])) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Adjust contrast of a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - factor: contrast multiplier (default 1.0) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + factor = params.get("factor", 1.0) + + if factor == 1.0: + return frame, state + + # Adjust contrast around midpoint (128) + img_float = frame.astype(np.float32) + result = 128 + factor * (img_float - 128) + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/contrast.sexp b/effects/contrast.sexp new file mode 100644 index 0000000..2c4a2ef --- /dev/null +++ b/effects/contrast.sexp @@ -0,0 +1,20 @@ +;; Contrast effect - adjust image contrast +;; Usage: python3 plan.py effects/contrast.sexp -p amount=1.5 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amount: contrast multiplier (0.0 to 3.0, default: 1.5) +;; <1 = less contrast, >1 = more contrast + +(recipe "contrast" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect contrast :path "sexp_effects/effects/contrast.sexp") + + ;; Default parameter (overridden by -p amount=N) + (def amount 1.5) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect contrast :amount amount))) + result) diff --git a/effects/crt.py b/effects/crt.py new file mode 100644 index 0000000..5690c05 --- /dev/null +++ b/effects/crt.py @@ -0,0 +1,193 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect crt +@version 1.0.0 +@author artdag + +@description +CRT / Scanlines effect. Simulates CRT monitor aesthetics with visible +scan lines, optional RGB subpixels, barrel distortion, and vignette. + +@param line_spacing int + @range 1 10 + @default 2 + Pixels between scanlines. + +@param line_opacity float + @range 0 1 + @default 0.3 + Darkness of scanlines. + +@param rgb_subpixels bool + @default false + Show RGB subpixel pattern. + +@param curvature float + @range 0 0.5 + @default 0 + Barrel distortion amount for curved screen look. + +@param vignette float + @range 0 1 + @default 0 + Dark corners effect. + +@param bloom float + @range 0 1 + @default 0 + Glow/blur on bright areas. + +@param flicker float + @range 0 0.3 + @default 0 + Brightness variation. + +@param seed int + @default 42 + Random seed for flicker. + +@state rng DeterministicRNG + Random number generator for flicker. + +@example + (effect crt :line_spacing 2 :line_opacity 0.4) + +@example + ;; Full retro CRT look + (effect crt :curvature 0.2 :vignette 0.3 :rgb_subpixels true :bloom 0.2) +""" + +import numpy as np +import cv2 +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply CRT effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + line_spacing = max(1, int(params.get("line_spacing", 2))) + line_opacity = params.get("line_opacity", 0.3) + rgb_subpixels = params.get("rgb_subpixels", False) + curvature = params.get("curvature", 0) + vignette = params.get("vignette", 0) + bloom = params.get("bloom", 0) + flicker = params.get("flicker", 0) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + rng = state["rng"] + + h, w = frame.shape[:2] + result = frame.astype(np.float32).copy() + + # Apply barrel distortion (curvature) + if curvature > 0: + result = _apply_curvature(result, curvature) + + # Apply bloom (glow on bright areas) + if bloom > 0: + result = _apply_bloom(result, bloom) + + # Apply scanlines + if line_opacity > 0: + for y in range(0, h, line_spacing): + result[y, :] = result[y, :] * (1 - line_opacity) + + # Apply RGB subpixel pattern + if rgb_subpixels: + for x in range(w): + col_type = x % 3 + if col_type == 0: + result[:, x, 0] *= 1.2 + result[:, x, 1] *= 0.8 + result[:, x, 2] *= 0.8 + elif col_type == 1: + result[:, x, 0] *= 0.8 + result[:, x, 1] *= 1.2 + result[:, x, 2] *= 0.8 + else: + result[:, x, 0] *= 0.8 + result[:, x, 1] *= 0.8 + result[:, x, 2] *= 1.2 + + # Apply vignette + if vignette > 0: + y_coords, x_coords = np.ogrid[:h, :w] + center_x, center_y = w / 2, h / 2 + dist = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2) + max_dist = np.sqrt(center_x**2 + center_y**2) + vignette_mask = 1 - (dist / max_dist) * vignette + vignette_mask = np.clip(vignette_mask, 0, 1) + result = result * vignette_mask[:, :, np.newaxis] + + # Apply flicker + if flicker > 0: + flicker_amount = 1.0 + rng.uniform(-flicker, flicker) + result = result * flicker_amount + + return np.clip(result, 0, 255).astype(np.uint8), state + + +def _apply_curvature(frame: np.ndarray, strength: float) -> np.ndarray: + """Apply barrel distortion.""" + h, w = frame.shape[:2] + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize to -1 to 1 + x_norm = (x_coords - w / 2) / (w / 2) + y_norm = (y_coords - h / 2) / (h / 2) + + # Calculate radius + r = np.sqrt(x_norm**2 + y_norm**2) + + # Apply barrel distortion + r_distorted = r * (1 + strength * r**2) + + # Scale factor + scale = np.where(r > 0, r_distorted / r, 1) + + # New coordinates + new_x = (x_norm * scale * (w / 2) + w / 2).astype(np.float32) + new_y = (y_norm * scale * (h / 2) + h / 2).astype(np.float32) + + result = cv2.remap(frame.astype(np.uint8), new_x, new_y, + cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0)) + return result.astype(np.float32) + + +def _apply_bloom(frame: np.ndarray, strength: float) -> np.ndarray: + """Apply bloom (glow on bright areas).""" + gray = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2GRAY) + _, bright = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY) + + bloom = cv2.GaussianBlur(bright, (21, 21), 0) + bloom = cv2.cvtColor(bloom, cv2.COLOR_GRAY2RGB) + + result = frame + bloom.astype(np.float32) * strength * 0.5 + return result diff --git a/effects/crt.sexp b/effects/crt.sexp new file mode 100644 index 0000000..6876a44 --- /dev/null +++ b/effects/crt.sexp @@ -0,0 +1,21 @@ +;; CRT effect - old TV/monitor look with scanlines and vignette +;; Usage: python3 plan.py effects/crt.sexp -p line_spacing=3 -p vignette_amount=0.3 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; line_spacing: pixels between scanlines (1 to 8, default: 3) +;; vignette_amount: edge darkening strength (0 to 1, default: 0.3) + +(recipe "crt" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect crt :path "sexp_effects/effects/crt.sexp") + + ;; Default parameters (overridden by -p line_spacing=N -p vignette_amount=N) + (def line_spacing 3) + (def vignette_amount 0.3) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect crt :line_spacing line_spacing :vignette_amount vignette_amount))) + result) diff --git a/effects/datamosh.py b/effects/datamosh.py new file mode 100644 index 0000000..2e638cb --- /dev/null +++ b/effects/datamosh.py @@ -0,0 +1,141 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect datamosh +@version 1.0.0 +@author artdag + +@description +Digital corruption / glitch block effect. Randomly corrupts rectangular +blocks by shifting, swapping, or duplicating from previous frames. +Simulates video compression artifacts. + +@param block_size int + @range 8 128 + @default 32 + Size of corruption blocks in pixels. + +@param corruption float + @range 0 1 + @default 0.3 + Probability of corrupting each block. Bind to energy for reactive glitch. + +@param max_offset int + @range 0 200 + @default 50 + Maximum pixel offset when shifting blocks. + +@param color_corrupt bool + @default true + Also apply color channel shifts to blocks. + +@param seed int + @default 42 + Random seed for deterministic glitch patterns. + +@state previous_frame ndarray + Stores previous frame for frame-blending corruption. + +@state rng DeterministicRNG + Random number generator for reproducible results. + +@example + (effect datamosh :corruption 0.4) + +@example + ;; Heavy glitch on energy peaks + (effect datamosh :corruption (bind energy :range [0 0.8]) :block_size 16) + +@example + ;; Reproducible glitch with seed + (effect datamosh :corruption 0.5 :seed 12345) +""" + +import numpy as np +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply datamosh/glitch block effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - block_size: corruption block size (default 32) + - corruption: probability 0-1 (default 0.3) + - max_offset: max shift in pixels (default 50) + - color_corrupt: apply color shifts (default True) + state: Persistent state dict + - previous_frame: last frame for duplication effect + + Returns: + Tuple of (processed_frame, new_state) + """ + block_size = max(8, min(int(params.get("block_size", 32)), 128)) + corruption = max(0, min(params.get("corruption", 0.3), 1)) + max_offset = int(params.get("max_offset", 50)) + color_corrupt = params.get("color_corrupt", True) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + rng = state["rng"] + + if corruption == 0: + state["previous_frame"] = frame.copy() + return frame, state + + h, w = frame.shape[:2] + result = frame.copy() + prev_frame = state.get("previous_frame") + + # Process blocks + for by in range(0, h, block_size): + for bx in range(0, w, block_size): + bh = min(block_size, h - by) + bw = min(block_size, w - bx) + + if rng.uniform() < corruption: + corruption_type = rng.choice(["shift", "duplicate", "color", "swap"]) + + if corruption_type == "shift" and max_offset > 0: + ox = rng.randint(-max_offset, max_offset) + oy = rng.randint(-max_offset, max_offset) + src_x = max(0, min(bx + ox, w - bw)) + src_y = max(0, min(by + oy, h - bh)) + result[by:by+bh, bx:bx+bw] = frame[src_y:src_y+bh, src_x:src_x+bw] + + elif corruption_type == "duplicate" and prev_frame is not None: + if prev_frame.shape == frame.shape: + result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw] + + elif corruption_type == "color" and color_corrupt: + block = result[by:by+bh, bx:bx+bw].copy() + shift = rng.randint(1, 3) + channel = rng.randint(0, 2) + block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0) + result[by:by+bh, bx:bx+bw] = block + + elif corruption_type == "swap": + other_bx = rng.randint(0, max(0, w - bw - 1)) + other_by = rng.randint(0, max(0, h - bh - 1)) + temp = result[by:by+bh, bx:bx+bw].copy() + result[by:by+bh, bx:bx+bw] = frame[other_by:other_by+bh, other_bx:other_bx+bw] + result[other_by:other_by+bh, other_bx:other_bx+bw] = temp + + state["previous_frame"] = frame.copy() + return result, state diff --git a/effects/datamosh.sexp b/effects/datamosh.sexp new file mode 100644 index 0000000..14f9ade --- /dev/null +++ b/effects/datamosh.sexp @@ -0,0 +1,21 @@ +;; Datamosh effect - glitch art block corruption +;; Usage: python3 plan.py effects/datamosh.sexp -p corruption=0.5 -p block_size=24 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; corruption: corruption intensity (0.1 to 1.0, default: 0.5) +;; block_size: size of corrupted blocks (8 to 64, default: 24) + +(recipe "datamosh" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect datamosh :path "sexp_effects/effects/datamosh.sexp") + + ;; Default parameters (overridden by -p corruption=N -p block_size=N) + (def corruption 0.5) + (def block_size 24) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect datamosh :corruption corruption :block_size block_size))) + result) diff --git a/effects/displacement.py b/effects/displacement.py new file mode 100644 index 0000000..32251b4 --- /dev/null +++ b/effects/displacement.py @@ -0,0 +1,170 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect displacement +@version 1.0.0 +@author artdag + +@description +Displacement effect. Warps the image based on a pattern (sine waves, +noise, or radial). Creates flowing, liquid-like distortions. + +@param amount float + @range 0 100 + @default 20 + Displacement strength in pixels. + +@param pattern string + @enum sine noise radial turbulence + @default sine + Displacement pattern type: + - sine: smooth sine wave + - noise: random displacement + - radial: swirl from center + - turbulence: multi-scale noise + +@param frequency float + @range 1 50 + @default 10 + Pattern frequency (waves per frame width). + +@param speed float + @range 0 10 + @default 1 + Animation speed. + +@param direction string + @enum horizontal vertical both + @default both + Displacement direction. + +@param seed int + @default 42 + Random seed for noise patterns. + +@state rng DeterministicRNG + Random number generator. + +@example + (effect displacement :amount 30 :pattern "sine" :frequency 5) + +@example + ;; Reactive turbulence + (effect displacement :amount (bind energy :range [10 50]) :pattern "turbulence") +""" + +import numpy as np +import cv2 +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply displacement effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + amount = params.get("amount", 20) + pattern = params.get("pattern", "sine") + frequency = max(1, params.get("frequency", 10)) + speed = params.get("speed", 1) + direction = params.get("direction", "both") + seed = int(params.get("seed", 42)) + t = params.get("_time", 0) + + if state is None: + state = {} + + if amount == 0: + return frame, state + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + + h, w = frame.shape[:2] + + # Create base coordinate maps + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + + # Generate displacement based on pattern + if pattern == "sine": + # Sine wave displacement + phase = t * speed * 2 * np.pi + if direction in ["horizontal", "both"]: + map_x = map_x + amount * np.sin(2 * np.pi * map_y / h * frequency + phase) + if direction in ["vertical", "both"]: + map_y = map_y + amount * np.sin(2 * np.pi * map_x / w * frequency + phase) + + elif pattern == "noise": + # Generate noise displacement + rng = state["rng"] + if "noise_x" not in state or state.get("noise_size") != (h, w): + state["noise_x"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32) + state["noise_y"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32) + state["noise_size"] = (h, w) + + if direction in ["horizontal", "both"]: + map_x = map_x + amount * state["noise_x"] + if direction in ["vertical", "both"]: + map_y = map_y + amount * state["noise_y"] + + elif pattern == "radial": + # Radial/swirl displacement + cx, cy = w / 2, h / 2 + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + 1e-6 + angle = np.arctan2(dy, dx) + + # Swirl amount varies with distance and time + swirl = amount * 0.01 * np.sin(dist / (w / frequency) + t * speed * 2 * np.pi) + + new_angle = angle + swirl + if direction in ["horizontal", "both"]: + map_x = cx + dist * np.cos(new_angle) + if direction in ["vertical", "both"]: + map_y = cy + dist * np.sin(new_angle) + + elif pattern == "turbulence": + # Multi-scale noise + rng = state["rng"] + disp_x = np.zeros((h, w), dtype=np.float32) + disp_y = np.zeros((h, w), dtype=np.float32) + + for scale in [1, 2, 4]: + sh, sw = h // scale, w // scale + noise_x = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32) + noise_y = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32) + if scale > 1: + noise_x = cv2.resize(noise_x, (w, h)) + noise_y = cv2.resize(noise_y, (w, h)) + disp_x += noise_x / scale + disp_y += noise_y / scale + + if direction in ["horizontal", "both"]: + map_x = map_x + amount * disp_x + if direction in ["vertical", "both"]: + map_y = map_y + amount * disp_y + + # Apply remapping + result = cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + return result, state diff --git a/effects/echo.py b/effects/echo.py new file mode 100644 index 0000000..8a144de --- /dev/null +++ b/effects/echo.py @@ -0,0 +1,139 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect echo +@version 1.0.0 +@author artdag + +@description +Motion trail / echo effect. Blends current frame with previous frames +to create ghosting/trailing effects. Great for fast movement scenes. + +Uses a frame buffer in state to store recent frames for blending. + +@param num_echoes int + @range 1 20 + @default 4 + Number of trailing frames to blend. + +@param decay float + @range 0 1 + @default 0.5 + Opacity ratio between successive echoes. 0.5 = each echo half as bright. + +@param blend_mode string + @enum blend add screen maximum + @default blend + How to combine echoes: + - blend: weighted average + - add: sum (can overexpose) + - screen: like add but resists overexposure + - maximum: brightest pixel wins + +@state frame_buffer list + Circular buffer of recent frames. + +@example + (effect echo :num_echoes 6 :decay 0.6) + +@example + ;; More echoes on energy + (effect echo :num_echoes (bind energy :range [2 10])) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply echo/motion trail effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - num_echoes: number of trailing frames (default 4) + - decay: opacity decay ratio (default 0.5) + - blend_mode: blend/add/screen/maximum (default blend) + state: Persistent state dict + - frame_buffer: list of recent frames + + Returns: + Tuple of (processed_frame, new_state) + """ + num_echoes = max(1, min(int(params.get("num_echoes", 4)), 20)) + decay = max(0, min(params.get("decay", 0.5), 1)) + blend_mode = params.get("blend_mode", "blend") + + if state is None: + state = {} + + # Initialize frame buffer + if "frame_buffer" not in state: + state["frame_buffer"] = [] + + buffer = state["frame_buffer"] + + # Add current frame to buffer + buffer.append(frame.copy()) + + # Limit buffer size + max_buffer = num_echoes + 5 + while len(buffer) > max_buffer: + buffer.pop(0) + + # Collect frames and intensities for blending + frames = [] + intensities = [] + intensity = 1.0 + + # Current frame first, then older frames + for i in range(min(num_echoes + 1, len(buffer))): + idx = len(buffer) - 1 - i + if idx >= 0: + frames.append(buffer[idx].astype(np.float32)) + intensities.append(intensity) + intensity *= decay + + if not frames: + return frame, state + + # Blend frames according to mode + result = _blend_frames(frames, intensities, blend_mode) + + return np.clip(result, 0, 255).astype(np.uint8), state + + +def _blend_frames(frames, intensities, blend_mode): + """Blend multiple frames according to blend mode.""" + if not frames: + return frames[0] + + if blend_mode == "add": + result = np.zeros_like(frames[0]) + for frame, intensity in zip(frames, intensities): + result += frame * intensity + return result + + elif blend_mode == "screen": + result = np.zeros_like(frames[0]) + for frame, intensity in zip(frames, intensities): + weighted = (frame / 255.0) * intensity + result = 255 * (1 - (1 - result / 255.0) * (1 - weighted)) + return result + + elif blend_mode == "maximum": + result = frames[0] * intensities[0] + for frame, intensity in zip(frames[1:], intensities[1:]): + result = np.maximum(result, frame * intensity) + return result + + else: # blend - weighted average + total = sum(intensities) + if total == 0: + return frames[0] + result = np.zeros_like(frames[0]) + for frame, intensity in zip(frames, intensities): + result += frame * (intensity / total) + return result diff --git a/effects/echo.sexp b/effects/echo.sexp new file mode 100644 index 0000000..2cee146 --- /dev/null +++ b/effects/echo.sexp @@ -0,0 +1,21 @@ +;; Echo effect - temporal ghosting/trails +;; Usage: python3 plan.py effects/echo.sexp -p num_echoes=4 -p decay=0.5 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; num_echoes: number of ghost frames (1 to 10, default: 4) +;; decay: fade rate per echo (0.2 to 0.9, default: 0.5) + +(recipe "echo" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect echo :path "sexp_effects/effects/echo.sexp") + + ;; Default parameters (overridden by -p num_echoes=N -p decay=N) + (def num_echoes 4) + (def decay 0.5) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect echo :num_echoes num_echoes :decay decay))) + result) diff --git a/effects/edge_detect.sexp b/effects/edge_detect.sexp new file mode 100644 index 0000000..a1d6743 --- /dev/null +++ b/effects/edge_detect.sexp @@ -0,0 +1,22 @@ +;; Edge detect effect - Canny edge detection +;; Usage: python3 plan.py effects/edge_detect.sexp -p low=50 -p high=150 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; low: lower threshold (10 to 100, default: 50) +;; high: upper threshold (100 to 300, default: 150) +;; edges between low-high are kept if connected to strong edges + +(recipe "edge_detect" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") + + ;; Default parameters (overridden by -p low=N -p high=N) + (def low 50) + (def high 150) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect edge_detect :low low :high high))) + result) diff --git a/effects/emboss.py b/effects/emboss.py new file mode 100644 index 0000000..6900307 --- /dev/null +++ b/effects/emboss.py @@ -0,0 +1,88 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect emboss +@version 1.0.0 +@author artdag + +@description +Emboss / relief effect. Creates a 3D raised appearance by highlighting +edges from a simulated light direction. Great for sculptural looks. + +@param strength float + @range 0.5 3 + @default 1.0 + Emboss intensity. + +@param direction float + @range 0 360 + @default 135 + Light direction in degrees. Bind to beat for rotating light. + +@param blend float + @range 0 1 + @default 0.3 + Blend with original (0 = full emboss, 1 = original). + +@example + (effect emboss :strength 1.5) + +@example + ;; Rotating light direction + (effect emboss :direction (bind beat_position :range [0 360])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply emboss effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - strength: emboss intensity (default 1.0) + - direction: light angle in degrees (default 135) + - blend: mix with original (default 0.3) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + strength = params.get("strength", 1.0) + direction = params.get("direction", 135) + blend = params.get("blend", 0.3) + + # Calculate kernel based on direction + angle_rad = np.deg2rad(direction) + dx = np.cos(angle_rad) + dy = np.sin(angle_rad) + + # Create emboss kernel + kernel = np.array([ + [-strength * dy - strength * dx, -strength * dy, -strength * dy + strength * dx], + [-strength * dx, 1, strength * dx], + [strength * dy - strength * dx, strength * dy, strength * dy + strength * dx] + ], dtype=np.float32) + + # Apply to grayscale + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + embossed = cv2.filter2D(gray, -1, kernel) + + # Normalize + embossed = embossed + 128 + embossed = np.clip(embossed, 0, 255) + + # Convert to RGB + embossed_rgb = cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2RGB) + + # Blend with original + if blend > 0: + result = frame.astype(np.float32) * blend + embossed_rgb.astype(np.float32) * (1 - blend) + return np.clip(result, 0, 255).astype(np.uint8), state + + return embossed_rgb, state diff --git a/effects/emboss.sexp b/effects/emboss.sexp new file mode 100644 index 0000000..81a3f74 --- /dev/null +++ b/effects/emboss.sexp @@ -0,0 +1,19 @@ +;; Emboss effect - 3D raised surface look +;; Usage: python3 plan.py effects/emboss.sexp -p strength=1.5 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; strength: emboss intensity (0.5 to 3.0, default: 1.5) + +(recipe "emboss" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect emboss :path "sexp_effects/effects/emboss.sexp") + + ;; Default parameter (overridden by -p strength=N) + (def strength 1.5) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect emboss :strength strength))) + result) diff --git a/effects/film_grain.py b/effects/film_grain.py new file mode 100644 index 0000000..445358b --- /dev/null +++ b/effects/film_grain.py @@ -0,0 +1,148 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect film_grain +@version 1.0.0 +@author artdag + +@description +Film grain / Noise effect. Adds realistic film grain texture. +Great for vintage aesthetics and subtle texture. + +@param intensity float + @range 0 1 + @default 0.2 + Noise intensity. Bind to energy for reactive grain. + +@param grain_size float + @range 0.5 5 + @default 1.0 + Size of grain particles. Larger = coarser grain. + +@param colored bool + @default false + Use colored noise instead of monochrome. + +@param temporal_variation float + @range 0 1 + @default 1.0 + How much grain changes frame-to-frame. 0 = static, 1 = full animation. + +@param seed int + @default 42 + Random seed for reproducible grain. + +@state rng DeterministicRNG + Random number generator for consistent grain. + +@example + (effect film_grain :intensity 0.3) + +@example + ;; Colored grain, reactive to energy + (effect film_grain :intensity (bind energy :range [0.1 0.5]) :colored true) +""" + +import numpy as np +import cv2 +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply film grain effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - intensity: noise intensity 0-1 (default 0.2) + - grain_size: grain particle size (default 1.0) + - colored: use colored noise (default False) + - temporal_variation: grain animation 0-1 (default 1.0) + - seed: random seed (default 42) + state: Persistent state dict + - rng: DeterministicRNG instance + - frame_count: frame counter + + Returns: + Tuple of (processed_frame, new_state) + """ + intensity = params.get("intensity", 0.2) + grain_size = max(0.5, params.get("grain_size", 1.0)) + colored = params.get("colored", False) + temporal_var = params.get("temporal_variation", 1.0) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + if intensity <= 0: + return frame, state + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + state["frame_count"] = 0 + state["static_noise"] = None + + rng = state["rng"] + frame_count = state["frame_count"] + + h, w = frame.shape[:2] + + # Calculate noise dimensions + if grain_size > 1: + noise_h = max(1, int(h / grain_size)) + noise_w = max(1, int(w / grain_size)) + else: + noise_h, noise_w = h, w + + # Generate noise based on temporal variation + if temporal_var >= 1.0 or state.get("static_noise") is None: + # Generate new noise + if colored: + noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)] + for _ in range(noise_w)] + for _ in range(noise_h)]) * intensity * 50 + else: + noise_2d = np.array([[rng.gaussian(0, 1) + for _ in range(noise_w)] + for _ in range(noise_h)]) * intensity * 50 + noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) + + if temporal_var < 1.0: + state["static_noise"] = noise + else: + # Blend static and new noise + static = state["static_noise"] + if colored: + new_noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)] + for _ in range(noise_w)] + for _ in range(noise_h)]) * intensity * 50 + else: + noise_2d = np.array([[rng.gaussian(0, 1) + for _ in range(noise_w)] + for _ in range(noise_h)]) * intensity * 50 + new_noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) + + noise = static * (1 - temporal_var) + new_noise * temporal_var + + # Scale noise up if using larger grain + if grain_size > 1: + noise = cv2.resize(noise.astype(np.float32), (w, h), interpolation=cv2.INTER_LINEAR) + + # Add noise to frame + result = frame.astype(np.float32) + noise + + state["frame_count"] = frame_count + 1 + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/film_grain.sexp b/effects/film_grain.sexp new file mode 100644 index 0000000..24984b8 --- /dev/null +++ b/effects/film_grain.sexp @@ -0,0 +1,19 @@ +;; Film grain effect - analog film noise +;; Usage: python3 plan.py effects/film_grain.sexp -p intensity=0.25 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; intensity: grain amount (0.05 to 0.5, default: 0.25) + +(recipe "film_grain" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect film_grain :path "sexp_effects/effects/film_grain.sexp") + + ;; Default parameter (overridden by -p intensity=N) + (def intensity 0.25) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect film_grain :intensity intensity))) + result) diff --git a/effects/fisheye.py b/effects/fisheye.py new file mode 100644 index 0000000..a785598 --- /dev/null +++ b/effects/fisheye.py @@ -0,0 +1,116 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect fisheye +@version 1.0.0 +@author artdag + +@description +Barrel/fisheye lens distortion. Positive values bulge outward (fisheye), +negative values pinch inward (pincushion). Great for emphasis effects. + +@param strength float + @range -1 1 + @default 0.3 + Distortion strength. Positive = fisheye bulge, negative = pincushion pinch. + +@param center_x float + @range 0 1 + @default 0.5 + Horizontal center of distortion (0 = left, 1 = right). + +@param center_y float + @range 0 1 + @default 0.5 + Vertical center of distortion (0 = top, 1 = bottom). + +@param zoom_correction bool + @default true + Automatically zoom to hide black edges on fisheye. + +@example + (effect fisheye :strength 0.5) + +@example + ;; Pulse fisheye on bass + (effect fisheye :strength (bind bass :range [0 0.8] :transform sqrt)) + +@example + ;; Pincushion effect + (effect fisheye :strength -0.3) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply fisheye/barrel distortion to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - strength: distortion strength -1 to 1 (default 0.3) + - center_x: horizontal center 0-1 (default 0.5) + - center_y: vertical center 0-1 (default 0.5) + - zoom_correction: auto-zoom for fisheye (default True) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + strength = params.get("strength", 0.3) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + zoom_correction = params.get("zoom_correction", True) + + if strength == 0: + return frame, state + + h, w = frame.shape[:2] + + # Calculate center in pixels + cx = w * center_x + cy = h * center_y + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize coordinates relative to center + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + + # Calculate radius from center + r = np.sqrt(x_norm**2 + y_norm**2) + + # Apply barrel/pincushion distortion + if strength > 0: + # Barrel distortion (fisheye) + r_distorted = r * (1 + strength * r**2) + else: + # Pincushion distortion + r_distorted = r / (1 - strength * r**2 + 0.001) + + # Calculate scale factor + scale = np.where(r > 0, r_distorted / r, 1) + + # Apply zoom correction to hide black edges + if zoom_correction and strength > 0: + zoom = 1 + strength * 0.5 + scale = scale / zoom + + # Calculate new coordinates + new_x = (x_norm * scale * (w / 2) + cx).astype(np.float32) + new_y = (y_norm * scale * (h / 2) + cy).astype(np.float32) + + # Remap + result = cv2.remap( + frame, new_x, new_y, + cv2.INTER_LINEAR, + borderMode=cv2.BORDER_REFLECT + ) + + return result, state diff --git a/effects/fisheye.sexp b/effects/fisheye.sexp new file mode 100644 index 0000000..13accae --- /dev/null +++ b/effects/fisheye.sexp @@ -0,0 +1,20 @@ +;; Fisheye effect - barrel/pincushion distortion +;; Usage: python3 plan.py effects/fisheye.sexp -p strength=0.5 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; strength: distortion amount (-1.0 to 1.0, default: 0.5) +;; positive = barrel (bulge), negative = pincushion + +(recipe "fisheye" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect fisheye :path "sexp_effects/effects/fisheye.sexp") + + ;; Default parameter (overridden by -p strength=N) + (def strength 0.5) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect fisheye :strength strength))) + result) diff --git a/effects/flip.py b/effects/flip.py new file mode 100644 index 0000000..57c08f7 --- /dev/null +++ b/effects/flip.py @@ -0,0 +1,64 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect flip +@version 1.0.0 +@author artdag + +@description +Flips the frame horizontally and/or vertically. +Can be used for mirror effects or beat-triggered flips. + +@param horizontal bool + @default false + Flip horizontally (left-right mirror). + +@param vertical bool + @default false + Flip vertically (top-bottom mirror). + +@example + (effect flip :horizontal true) + +@example + (effect flip :vertical true) + +@example + (effect flip :horizontal true :vertical true) ; 180 degree rotation +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Flip a video frame horizontally and/or vertically. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - horizontal: flip left-right (default False) + - vertical: flip top-bottom (default False) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + horizontal = params.get("horizontal", False) + vertical = params.get("vertical", False) + + result = frame + + if horizontal: + result = np.flip(result, axis=1) + + if vertical: + result = np.flip(result, axis=0) + + # Ensure contiguous array after flips + if horizontal or vertical: + result = np.ascontiguousarray(result) + + return result, state diff --git a/effects/flip.sexp b/effects/flip.sexp new file mode 100644 index 0000000..9aac79a --- /dev/null +++ b/effects/flip.sexp @@ -0,0 +1,19 @@ +;; Flip effect - flip image horizontally or vertically +;; Usage: python3 plan.py effects/flip.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; direction: flip axis ("horizontal" or "vertical", default: "horizontal") + +(recipe "flip" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect flip :path "sexp_effects/effects/flip.sexp") + + ;; Default parameter (overridden by -p direction=horizontal|vertical) + (def direction "horizontal") + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect flip :direction direction))) + result) diff --git a/effects/grayscale.sexp b/effects/grayscale.sexp new file mode 100644 index 0000000..e14bf10 --- /dev/null +++ b/effects/grayscale.sexp @@ -0,0 +1,14 @@ +;; Grayscale effect - converts to black and white +;; Usage: python3 plan.py effects/grayscale.sexp | python3 execute.py - -d . -o output.mp4 +;; No parameters + +(recipe "grayscale" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect grayscale :path "sexp_effects/effects/grayscale.sexp") + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect grayscale))) + result) diff --git a/effects/hue_shift.py b/effects/hue_shift.py new file mode 100644 index 0000000..db35929 --- /dev/null +++ b/effects/hue_shift.py @@ -0,0 +1,59 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect hue_shift +@version 1.0.0 +@author artdag + +@description +Rotates the hue of all colors by a specified amount (0-360 degrees). +Creates rainbow cycling effects when animated over time. + +@param shift float + @range 0 360 + @default 0 + Hue rotation in degrees. 0/360 = unchanged, 180 = complementary colors. + +@example + (effect hue_shift :shift 180) ; complementary colors + +@example + ;; Rainbow cycling based on time (use with frame counter in state) + (effect hue_shift :shift (bind beat_position :range [0 360])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Shift hue of a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - shift: hue rotation in degrees (default 0) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + shift = params.get("shift", 0) + + if shift == 0: + return frame, state + + # Convert RGB to HSV (OpenCV uses H: 0-179, S: 0-255, V: 0-255) + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + + # Shift hue (OpenCV hue is 0-179, so divide by 2) + hue_shift = int((shift % 360) / 2) + hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int16) + hue_shift) % 180 + + # Convert back to RGB + result = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + return result, state diff --git a/effects/hue_shift.sexp b/effects/hue_shift.sexp new file mode 100644 index 0000000..df1df59 --- /dev/null +++ b/effects/hue_shift.sexp @@ -0,0 +1,20 @@ +;; Hue shift effect - rotate colors around color wheel +;; Usage: python3 plan.py effects/hue_shift.sexp -p degrees=90 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; degrees: hue rotation in degrees (0 to 360, default: 90) +;; 90 = quarter rotation, 180 = complementary colors + +(recipe "hue_shift" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") + + ;; Default parameter (overridden by -p degrees=N) + (def degrees 90) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect hue_shift :degrees degrees))) + result) diff --git a/effects/invert.py b/effects/invert.py new file mode 100644 index 0000000..a92db36 --- /dev/null +++ b/effects/invert.py @@ -0,0 +1,62 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect invert +@version 1.0.0 +@author artdag + +@description +Inverts the colors of each frame. Can be partial (blended with original) +based on intensity parameter. Useful for beat-reactive flashing effects. + +@param intensity float + @range 0 100 + @default 100 + Intensity of the inversion effect (0 = original, 100 = fully inverted). + Bind to audio analysis for reactive effects. + +@example + (effect invert) + +@example + (effect invert :intensity 50) + +@example + ;; Beat-reactive inversion + (effect invert :intensity (bind bass :range [0 100] :transform sqrt)) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Invert colors of a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - intensity: 0-100, how much to invert (default 100) + state: Persistent state dict (unused for this effect) + + Returns: + Tuple of (processed_frame, new_state) + """ + intensity = params.get("intensity", 100) + + # Normalize intensity to 0-1 + t = intensity / 100.0 + + if t <= 0: + return frame, state + + if t >= 1: + return (255 - frame), state + + # Partial inversion: blend between original and inverted + inverted = 255 - frame + result = (frame * (1 - t) + inverted * t).astype(np.uint8) + + return result, state diff --git a/effects/invert.sexp b/effects/invert.sexp new file mode 100644 index 0000000..a355967 --- /dev/null +++ b/effects/invert.sexp @@ -0,0 +1,14 @@ +;; Invert effect - inverts all colors +;; Usage: python3 plan.py effects/invert.sexp | python3 execute.py - -d . -o output.mp4 +;; No parameters + +(recipe "invert" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect invert :path "sexp_effects/effects/invert.sexp") + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect invert))) + result) diff --git a/effects/kaleidoscope.py b/effects/kaleidoscope.py new file mode 100644 index 0000000..d998dd8 --- /dev/null +++ b/effects/kaleidoscope.py @@ -0,0 +1,137 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect kaleidoscope +@version 1.0.0 +@author artdag + +@description +Kaleidoscope effect. Creates mesmerizing mandala-like patterns by +dividing the frame into pie-slice segments and reflecting them. +Great for psychedelic visuals. + +@param segments int + @range 3 16 + @default 6 + Number of symmetry segments. + +@param rotation float + @range 0 360 + @default 0 + Base rotation angle in degrees. + +@param rotation_speed float + @range -180 180 + @default 0 + Continuous rotation speed in degrees/second. + +@param center_x float + @range 0 1 + @default 0.5 + Center X position (0-1). + +@param center_y float + @range 0 1 + @default 0.5 + Center Y position (0-1). + +@param zoom float + @range 0.5 3.0 + @default 1.0 + Zoom factor for the source region. + +@state cumulative_rotation float + Tracks rotation over time. + +@example + (effect kaleidoscope :segments 8 :rotation_speed 30) + +@example + ;; Beat-reactive segments + (effect kaleidoscope :segments (bind bass :range [4 12]) :zoom 1.5) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply kaleidoscope effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - segments: number of segments 3-16 (default 6) + - rotation: base rotation degrees (default 0) + - rotation_speed: degrees per second (default 0) + - center_x: center X 0-1 (default 0.5) + - center_y: center Y 0-1 (default 0.5) + - zoom: zoom factor 0.5-3 (default 1.0) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + segments = max(3, min(int(params.get("segments", 6)), 16)) + rotation = params.get("rotation", 0) + rotation_speed = params.get("rotation_speed", 0) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + zoom = max(0.5, min(params.get("zoom", 1.0), 3.0)) + + # Get time for animation + t = params.get("_time", 0) + + if state is None: + state = {} + + h, w = frame.shape[:2] + + # Calculate center in pixels + cx = int(w * center_x) + cy = int(h * center_y) + + # Total rotation including time-based animation + total_rotation = rotation + rotation_speed * t + + # Calculate the angle per segment + segment_angle = 2 * np.pi / segments + + # Create coordinate maps + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Translate to center + x_centered = x_coords - cx + y_centered = y_coords - cy + + # Convert to polar coordinates + r = np.sqrt(x_centered**2 + y_centered**2) + theta = np.arctan2(y_centered, x_centered) + + # Apply rotation + theta = theta - np.deg2rad(total_rotation) + + # Fold angle into first segment and mirror + theta_normalized = theta % (2 * np.pi) + segment_idx = (theta_normalized / segment_angle).astype(int) + theta_in_segment = theta_normalized - segment_idx * segment_angle + + # Mirror alternating segments + mirror_mask = (segment_idx % 2) == 1 + theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment) + + # Apply zoom + r = r / zoom + + # Convert back to Cartesian (source coordinates) + src_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32) + src_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32) + + # Remap + result = cv2.remap(frame, src_x, src_y, + cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + return result, state diff --git a/effects/kaleidoscope.sexp b/effects/kaleidoscope.sexp new file mode 100644 index 0000000..7c039bc --- /dev/null +++ b/effects/kaleidoscope.sexp @@ -0,0 +1,21 @@ +;; Kaleidoscope effect - radial mirror segments +;; Usage: python3 plan.py effects/kaleidoscope.sexp -p segments=6 -p rotation_speed=30 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; segments: number of mirror segments (2 to 16, default: 6) +;; rotation_speed: rotation in degrees per second (0 to 180, default: 30) + +(recipe "kaleidoscope" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") + + ;; Default parameters (overridden by -p segments=N -p rotation_speed=N) + (def segments 6) + (def rotation_speed 30) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect kaleidoscope :segments segments :rotation_speed rotation_speed))) + result) diff --git a/effects/mirror.py b/effects/mirror.py new file mode 100644 index 0000000..b412c22 --- /dev/null +++ b/effects/mirror.py @@ -0,0 +1,86 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect mirror +@version 1.0.0 +@author artdag + +@description +Creates mirror/reflection effects by duplicating and flipping portions +of the frame. Can create kaleidoscope-like symmetry effects. + +@param mode string + @enum left right top bottom quad + @default left + Mirror mode: + - left: Left half mirrored to right + - right: Right half mirrored to left + - top: Top half mirrored to bottom + - bottom: Bottom half mirrored to top + - quad: Four-way symmetry (kaleidoscope) + +@example + (effect mirror :mode "left") + +@example + (effect mirror :mode "quad") ; kaleidoscope effect +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Create mirror/reflection effects. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - mode: mirror mode (default "left") + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + mode = params.get("mode", "left") + + h, w = frame.shape[:2] + result = frame.copy() + + if mode == "left": + # Mirror left half to right + half = w // 2 + result[:, half:] = np.flip(result[:, :half], axis=1)[:, :w - half] + + elif mode == "right": + # Mirror right half to left + half = w // 2 + result[:, :half] = np.flip(result[:, half:], axis=1)[:, :half] + + elif mode == "top": + # Mirror top half to bottom + half = h // 2 + result[half:, :] = np.flip(result[:half, :], axis=0)[:h - half, :] + + elif mode == "bottom": + # Mirror bottom half to top + half = h // 2 + result[:half, :] = np.flip(result[half:, :], axis=0)[:half, :] + + elif mode == "quad": + # Four-way symmetry: use top-left quadrant + half_h = h // 2 + half_w = w // 2 + + # Get top-left quadrant + tl = result[:half_h, :half_w] + + # Mirror to top-right + result[:half_h, half_w:] = np.flip(tl, axis=1)[:, :w - half_w] + + # Mirror top half to bottom + result[half_h:, :] = np.flip(result[:half_h, :], axis=0)[:h - half_h, :] + + return np.ascontiguousarray(result), state diff --git a/effects/mirror.sexp b/effects/mirror.sexp new file mode 100644 index 0000000..b30008f --- /dev/null +++ b/effects/mirror.sexp @@ -0,0 +1,19 @@ +;; Mirror effect - reflect image +;; Usage: python3 plan.py effects/mirror.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; direction: mirror axis ("horizontal" or "vertical", default: "horizontal") + +(recipe "mirror" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect mirror :path "sexp_effects/effects/mirror.sexp") + + ;; Default parameter (overridden by -p direction=horizontal|vertical) + (def direction "horizontal") + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect mirror :direction direction))) + result) diff --git a/effects/motion_blur.py b/effects/motion_blur.py new file mode 100644 index 0000000..4f17f9d --- /dev/null +++ b/effects/motion_blur.py @@ -0,0 +1,122 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect motion_blur +@version 1.0.0 +@author artdag + +@description +Motion blur effect. Creates directional blur simulating camera or +subject motion. Great for speed effects. + +@param amount int + @range 1 100 + @default 20 + Blur length in pixels. + +@param angle float + @range 0 360 + @default 0 + Blur direction in degrees (0 = horizontal right). + +@param zoom bool + @default false + Radial zoom blur instead of directional. + +@param zoom_center_x float + @range 0 1 + @default 0.5 + Zoom blur center X. + +@param zoom_center_y float + @range 0 1 + @default 0.5 + Zoom blur center Y. + +@example + (effect motion_blur :amount 30 :angle 45) + +@example + ;; Zoom blur + (effect motion_blur :amount 20 :zoom true) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply motion blur effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - amount: blur length (default 20) + - angle: blur direction (default 0) + - zoom: radial zoom blur (default False) + - zoom_center_x/y: zoom center (default 0.5) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + amount = max(1, int(params.get("amount", 20))) + angle = params.get("angle", 0) + zoom = params.get("zoom", False) + zoom_center_x = params.get("zoom_center_x", 0.5) + zoom_center_y = params.get("zoom_center_y", 0.5) + + if state is None: + state = {} + + if amount <= 1: + return frame, state + + h, w = frame.shape[:2] + + if zoom: + # Radial zoom blur + cx, cy = int(w * zoom_center_x), int(h * zoom_center_y) + + # Accumulate multiple scaled versions + result = np.zeros_like(frame, dtype=np.float32) + num_samples = min(amount, 20) + + for i in range(num_samples): + scale = 1.0 + (i / num_samples) * (amount / 100.0) + M = cv2.getRotationMatrix2D((cx, cy), 0, scale) + scaled = cv2.warpAffine(frame, M, (w, h), borderMode=cv2.BORDER_REFLECT) + result += scaled.astype(np.float32) / num_samples + + return result.astype(np.uint8), state + else: + # Directional motion blur using a line kernel + angle_rad = np.deg2rad(angle) + dx = np.cos(angle_rad) + dy = np.sin(angle_rad) + + # Create motion blur kernel + kernel_size = amount + kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32) + + # Draw a line in the kernel + cx, cy = kernel_size // 2, kernel_size // 2 + for i in range(kernel_size): + t = (i - cx) / max(1, cx) + x = int(cx + t * cx * dx) + y = int(cy + t * cy * dy) + if 0 <= x < kernel_size and 0 <= y < kernel_size: + kernel[y, x] = 1 + + # Normalize kernel + kernel_sum = kernel.sum() + if kernel_sum > 0: + kernel /= kernel_sum + + # Apply kernel + result = cv2.filter2D(frame, -1, kernel) + + return result, state diff --git a/effects/neon_glow.py b/effects/neon_glow.py new file mode 100644 index 0000000..ebfa62d --- /dev/null +++ b/effects/neon_glow.py @@ -0,0 +1,130 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect neon_glow +@version 1.0.0 +@author artdag + +@description +Neon edge glow effect. Detects edges and applies a glowing colored outline. +Great for cyberpunk/synthwave aesthetics synced to music. + +@param glow_radius float + @range 0 50 + @default 15 + Blur radius for the glow. Bind to bass for pulsing glow. + +@param glow_intensity float + @range 0.5 5 + @default 2.0 + Brightness multiplier for the glow. + +@param edge_low float + @range 10 200 + @default 50 + Lower threshold for edge detection. + +@param edge_high float + @range 50 300 + @default 150 + Upper threshold for edge detection. + +@param color_r int + @range 0 255 + @default 0 + Red component of glow color. + +@param color_g int + @range 0 255 + @default 255 + Green component of glow color. + +@param color_b int + @range 0 255 + @default 255 + Blue component of glow color. + +@param background float + @range 0 1 + @default 0.3 + How much of the original image shows through (0 = glow only). + +@example + (effect neon_glow :glow_radius 20 :color_r 255 :color_g 0 :color_b 255) + +@example + ;; Pulsing cyan glow on bass + (effect neon_glow :glow_radius (bind bass :range [5 30] :transform sqrt)) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply neon glow effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - glow_radius: blur radius (default 15) + - glow_intensity: brightness (default 2.0) + - edge_low: canny low threshold (default 50) + - edge_high: canny high threshold (default 150) + - color_r/g/b: glow color (default cyan 0,255,255) + - background: original visibility (default 0.3) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + glow_radius = int(params.get("glow_radius", 15)) + glow_intensity = params.get("glow_intensity", 2.0) + edge_low = int(params.get("edge_low", 50)) + edge_high = int(params.get("edge_high", 150)) + color_r = int(params.get("color_r", 0)) + color_g = int(params.get("color_g", 255)) + color_b = int(params.get("color_b", 255)) + background = params.get("background", 0.3) + + h, w = frame.shape[:2] + color = np.array([color_r, color_g, color_b], dtype=np.float32) + + # Edge detection + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + blurred = cv2.GaussianBlur(gray, (5, 5), 0) + edges = cv2.Canny(blurred, edge_low, edge_high) + + # Create colored edge image + glow_base = np.zeros((h, w, 3), dtype=np.float32) + for c in range(3): + glow_base[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0) + + # Apply blur for glow + if glow_radius > 0: + ksize = glow_radius * 2 + 1 + glow = cv2.GaussianBlur(glow_base, (ksize, ksize), 0) + else: + glow = glow_base + + # Intensify + glow = glow * glow_intensity + + # Add sharp edges on top + edge_layer = np.zeros((h, w, 3), dtype=np.float32) + for c in range(3): + edge_layer[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0) * 255 + glow = np.maximum(glow, edge_layer) + + # Blend with original + if background > 0: + a = frame.astype(np.float32) / 255.0 * background + b = glow / 255.0 + result = (1 - (1 - a) * (1 - b)) * 255 # Screen blend + else: + result = glow + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/neon_glow.sexp b/effects/neon_glow.sexp new file mode 100644 index 0000000..fcdb4c8 --- /dev/null +++ b/effects/neon_glow.sexp @@ -0,0 +1,21 @@ +;; Neon glow effect - glowing edges on dark background +;; Usage: python3 plan.py effects/neon_glow.sexp -p glow_radius=20 -p glow_intensity=2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; glow_radius: glow spread in pixels (5 to 50, default: 20) +;; glow_intensity: glow brightness (0.5 to 5, default: 2) + +(recipe "neon_glow" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") + + ;; Default parameters (overridden by -p glow_radius=N -p glow_intensity=N) + (def glow_radius 20) + (def glow_intensity 2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect neon_glow :glow_radius glow_radius :glow_intensity glow_intensity))) + result) diff --git a/effects/noise.py b/effects/noise.py new file mode 100644 index 0000000..764c8e6 --- /dev/null +++ b/effects/noise.py @@ -0,0 +1,137 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect noise +@version 1.0.0 +@author artdag + +@description +Noise effect. Adds various types of noise to the image including +static, gaussian, salt & pepper, and more. + +@param intensity float + @range 0 1 + @default 0.2 + Noise intensity. + +@param mode string + @enum gaussian uniform salt_pepper scanline + @default gaussian + Type of noise: + - gaussian: smooth normal distribution + - uniform: flat random noise + - salt_pepper: random black/white pixels + - scanline: horizontal line noise + +@param colored bool + @default false + Use colored noise instead of monochrome. + +@param animate bool + @default true + Different noise each frame. + +@param seed int + @default 42 + Random seed for reproducible noise. + +@state rng DeterministicRNG + Random number generator. + +@example + (effect noise :intensity 0.3 :mode "gaussian") + +@example + ;; Static TV noise + (effect noise :intensity 0.5 :mode "uniform" :animate true) +""" + +import numpy as np +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply noise effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + intensity = params.get("intensity", 0.2) + mode = params.get("mode", "gaussian") + colored = params.get("colored", False) + animate = params.get("animate", True) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + if intensity <= 0: + return frame, state + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + rng = state["rng"] + + h, w = frame.shape[:2] + result = frame.astype(np.float32) + + if mode == "gaussian": + # Gaussian noise + if colored: + noise = np.array([[[rng.gaussian(0, intensity * 50) for _ in range(3)] + for _ in range(w)] + for _ in range(h)]) + else: + noise_2d = np.array([[rng.gaussian(0, intensity * 50) + for _ in range(w)] + for _ in range(h)]) + noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) + result = result + noise + + elif mode == "uniform": + # Uniform random noise + if colored: + noise = np.array([[[rng.uniform(-intensity * 100, intensity * 100) for _ in range(3)] + for _ in range(w)] + for _ in range(h)]) + else: + noise_2d = np.array([[rng.uniform(-intensity * 100, intensity * 100) + for _ in range(w)] + for _ in range(h)]) + noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1) + result = result + noise + + elif mode == "salt_pepper": + # Salt and pepper noise + for y in range(h): + for x in range(w): + if rng.uniform() < intensity * 0.1: + if rng.uniform() < 0.5: + result[y, x] = [0, 0, 0] + else: + result[y, x] = [255, 255, 255] + + elif mode == "scanline": + # Horizontal scanline noise + for y in range(h): + if rng.uniform() < intensity * 0.2: + noise_val = rng.uniform(-intensity * 100, intensity * 100) + result[y] = result[y] + noise_val + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/noise.sexp b/effects/noise.sexp new file mode 100644 index 0000000..908b751 --- /dev/null +++ b/effects/noise.sexp @@ -0,0 +1,19 @@ +;; Noise effect - random pixel noise +;; Usage: python3 plan.py effects/noise.sexp -p amount=40 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amount: noise intensity (5 to 100, default: 40) + +(recipe "noise" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect noise :path "sexp_effects/effects/noise.sexp") + + ;; Default parameter (overridden by -p amount=N) + (def amount 40) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect noise :amount amount))) + result) diff --git a/effects/outline.py b/effects/outline.py new file mode 100644 index 0000000..8690af1 --- /dev/null +++ b/effects/outline.py @@ -0,0 +1,111 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect outline +@version 1.0.0 +@author artdag + +@description +Outline / Toon Edges effect. Extracts and displays edges as outlines, +optionally with fill. Great for cartoon/comic book aesthetics. + +@param edge_thickness int + @range 1 10 + @default 2 + Thickness of outlines in pixels. + +@param threshold float + @range 20 300 + @default 100 + Edge detection sensitivity. + +@param outline_color list + @default [0, 0, 0] + RGB color for outlines (default black). + +@param fill_mode string + @enum original solid transparent + @default original + What to show in non-edge areas: + - original: keep source image + - solid: fill with solid color + - transparent: black background + +@param fill_color list + @default [255, 255, 255] + RGB color for solid fill mode. + +@example + (effect outline :edge_thickness 3 :threshold 80) + +@example + ;; White outlines on black + (effect outline :outline_color [255 255 255] :fill_mode "transparent") +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply outline effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - edge_thickness: outline width (default 2) + - threshold: edge sensitivity (default 100) + - outline_color: RGB tuple (default [0,0,0]) + - fill_mode: original/solid/transparent (default original) + - fill_color: RGB tuple for solid fill (default [255,255,255]) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + thickness = max(1, min(int(params.get("edge_thickness", 2)), 10)) + threshold = params.get("threshold", 100) + outline_color = params.get("outline_color", [0, 0, 0]) + fill_mode = params.get("fill_mode", "original") + fill_color = params.get("fill_color", [255, 255, 255]) + + if state is None: + state = {} + + h, w = frame.shape[:2] + + # Convert to grayscale + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + + # Apply edge detection + edges = cv2.Canny(gray, int(threshold * 0.5), int(threshold)) + + # Dilate edges for thickness + if thickness > 1: + kernel = np.ones((thickness, thickness), np.uint8) + edges = cv2.dilate(edges, kernel, iterations=1) + + # Create result based on fill mode + if fill_mode == "original": + result = frame.copy() + elif fill_mode == "solid": + if isinstance(fill_color, (list, tuple)) and len(fill_color) >= 3: + result = np.full((h, w, 3), fill_color[:3], dtype=np.uint8) + else: + result = np.full((h, w, 3), 255, dtype=np.uint8) + else: # transparent/none + result = np.zeros((h, w, 3), dtype=np.uint8) + + # Apply outline color where edges exist + if isinstance(outline_color, (list, tuple)) and len(outline_color) >= 3: + color = np.array(outline_color[:3], dtype=np.uint8) + else: + color = np.array([0, 0, 0], dtype=np.uint8) + + edge_mask = edges > 0 + result[edge_mask] = color + + return result, state diff --git a/effects/outline.sexp b/effects/outline.sexp new file mode 100644 index 0000000..a78bcf4 --- /dev/null +++ b/effects/outline.sexp @@ -0,0 +1,19 @@ +;; Outline effect - draw edges as lines +;; Usage: python3 plan.py effects/outline.sexp -p thickness=2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; thickness: outline width in pixels (1 to 5, default: 2) + +(recipe "outline" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect outline :path "sexp_effects/effects/outline.sexp") + + ;; Default parameter (overridden by -p thickness=N) + (def thickness 2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect outline :thickness thickness))) + result) diff --git a/effects/pixelate.py b/effects/pixelate.py new file mode 100644 index 0000000..3194964 --- /dev/null +++ b/effects/pixelate.py @@ -0,0 +1,72 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect pixelate +@version 1.0.0 +@author artdag + +@description +Pixelate effect. Reduces resolution to create blocky, retro pixel art +look. Great for 8-bit aesthetics. + +@param block_size int + @range 2 64 + @default 8 + Size of pixel blocks. Larger = more pixelated. + +@param maintain_edges bool + @default false + Try to preserve edges while pixelating. + +@example + (effect pixelate :block_size 16) + +@example + ;; Beat-reactive pixelation + (effect pixelate :block_size (bind bass :range [4 32])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply pixelate effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - block_size: pixel block size (default 8) + - maintain_edges: preserve edges (default False) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + block_size = max(2, min(int(params.get("block_size", 8)), 64)) + maintain_edges = params.get("maintain_edges", False) + + if state is None: + state = {} + + h, w = frame.shape[:2] + + # Scale down then up to create pixelation + small_h = max(1, h // block_size) + small_w = max(1, w // block_size) + + small = cv2.resize(frame, (small_w, small_h), interpolation=cv2.INTER_AREA) + result = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST) + + if maintain_edges: + # Detect edges in original and overlay + gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) + edges = cv2.Canny(gray, 50, 150) + edges_dilated = cv2.dilate(edges, np.ones((2, 2), np.uint8)) + edge_mask = edges_dilated > 0 + result[edge_mask] = frame[edge_mask] + + return result, state diff --git a/effects/pixelate.sexp b/effects/pixelate.sexp new file mode 100644 index 0000000..7b627d2 --- /dev/null +++ b/effects/pixelate.sexp @@ -0,0 +1,20 @@ +;; Pixelate effect - reduce resolution with blocky pixels +;; Usage: python3 plan.py effects/pixelate.sexp -p block_size=16 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; block_size: pixel block size (2 to 64, default: 16) +;; higher = more pixelated + +(recipe "pixelate" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect pixelate :path "sexp_effects/effects/pixelate.sexp") + + ;; Default parameter (overridden by -p block_size=N) + (def block_size 16) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect pixelate :block_size block_size))) + result) diff --git a/effects/pixelsort.py b/effects/pixelsort.py new file mode 100644 index 0000000..6c569af --- /dev/null +++ b/effects/pixelsort.py @@ -0,0 +1,152 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect pixelsort +@version 1.0.0 +@author artdag + +@description +Pixel sorting glitch art effect. Sorts pixels within rows by brightness, +hue, or other properties. Creates distinctive streaked/melted aesthetics. + +@param sort_by string + @enum lightness hue saturation red green blue + @default lightness + Property to sort pixels by. + +@param threshold_low float + @range 0 255 + @default 50 + Pixels darker than this are not sorted. + +@param threshold_high float + @range 0 255 + @default 200 + Pixels brighter than this are not sorted. + +@param angle float + @range 0 180 + @default 0 + Sort direction: 0 = horizontal, 90 = vertical. + +@param reverse bool + @default false + Reverse the sort order. + +@example + (effect pixelsort) + +@example + ;; Vertical pixel sort + (effect pixelsort :angle 90) + +@example + ;; Sort by hue for rainbow streaks + (effect pixelsort :sort_by "hue" :threshold_low 20 :threshold_high 240) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply pixel sorting to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - sort_by: property to sort by (default "lightness") + - threshold_low: min brightness to sort (default 50) + - threshold_high: max brightness to sort (default 200) + - angle: 0 = horizontal, 90 = vertical (default 0) + - reverse: reverse sort order (default False) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + sort_by = params.get("sort_by", "lightness") + threshold_low = params.get("threshold_low", 50) + threshold_high = params.get("threshold_high", 200) + angle = params.get("angle", 0) + reverse = params.get("reverse", False) + + h, w = frame.shape[:2] + + # Rotate for non-horizontal sorting + if 45 <= (angle % 180) <= 135: + frame = np.transpose(frame, (1, 0, 2)) + h, w = frame.shape[:2] + rotated = True + else: + rotated = False + + result = frame.copy() + + # Get sort values + sort_values = _get_sort_values(frame, sort_by) + + # Create mask of pixels to sort + mask = (sort_values >= threshold_low) & (sort_values <= threshold_high) + + # Sort each row + for y in range(h): + row = result[y].copy() + row_mask = mask[y] + row_values = sort_values[y] + + # Find contiguous segments to sort + segments = _find_segments(row_mask) + + for start, end in segments: + if end - start > 1: + segment_values = row_values[start:end] + sort_indices = np.argsort(segment_values) + if reverse: + sort_indices = sort_indices[::-1] + row[start:end] = row[start:end][sort_indices] + + result[y] = row + + # Rotate back if needed + if rotated: + result = np.transpose(result, (1, 0, 2)) + + return np.ascontiguousarray(result), state + + +def _get_sort_values(frame, sort_by): + """Get values to sort pixels by.""" + if sort_by == "lightness": + return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + elif sort_by == "hue": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + return hsv[:, :, 0].astype(np.float32) + elif sort_by == "saturation": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + return hsv[:, :, 1].astype(np.float32) + elif sort_by == "red": + return frame[:, :, 0].astype(np.float32) + elif sort_by == "green": + return frame[:, :, 1].astype(np.float32) + elif sort_by == "blue": + return frame[:, :, 2].astype(np.float32) + return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + + +def _find_segments(mask): + """Find contiguous True segments in mask.""" + segments = [] + start = None + for i, val in enumerate(mask): + if val and start is None: + start = i + elif not val and start is not None: + segments.append((start, i)) + start = None + if start is not None: + segments.append((start, len(mask))) + return segments diff --git a/effects/pixelsort.sexp b/effects/pixelsort.sexp new file mode 100644 index 0000000..6b9877d --- /dev/null +++ b/effects/pixelsort.sexp @@ -0,0 +1,22 @@ +;; Pixelsort effect - glitch art pixel sorting +;; Usage: python3 plan.py effects/pixelsort.sexp -p threshold_low=30 -p threshold_high=220 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; threshold_low: lower brightness threshold (0 to 128, default: 30) +;; threshold_high: upper brightness threshold (128 to 255, default: 220) +;; pixels between thresholds get sorted + +(recipe "pixelsort" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") + + ;; Default parameters (overridden by -p threshold_low=N -p threshold_high=N) + (def threshold_low 30) + (def threshold_high 220) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect pixelsort :threshold_low threshold_low :threshold_high threshold_high))) + result) diff --git a/effects/posterize.py b/effects/posterize.py new file mode 100644 index 0000000..b0767b8 --- /dev/null +++ b/effects/posterize.py @@ -0,0 +1,79 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect posterize +@version 1.0.0 +@author artdag + +@description +Posterize / Color Quantization effect. Reduces the number of colors +to create a poster/cartoon look. Great for stylized visuals. + +@param levels int + @range 2 32 + @default 8 + Number of color levels per channel. Lower = more stylized. + +@param dither bool + @default false + Apply dithering to reduce color banding. + +@example + (effect posterize :levels 4) + +@example + ;; Beat-reactive posterization + (effect posterize :levels (bind bass :range [2 16])) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply posterize effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - levels: color levels per channel (default 8) + - dither: apply dithering (default False) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + levels = max(2, min(int(params.get("levels", 8)), 256)) + dither = params.get("dither", False) + + if state is None: + state = {} + + step = 256 // levels + + if dither: + # Floyd-Steinberg dithering + result = frame.astype(np.float32).copy() + h, w = result.shape[:2] + + for y in range(h - 1): + for x in range(1, w - 1): + for c in range(3): + old_val = result[y, x, c] + new_val = np.round(old_val / step) * step + result[y, x, c] = new_val + error = old_val - new_val + + # Distribute error to neighbors + result[y, x + 1, c] += error * 7 / 16 + result[y + 1, x - 1, c] += error * 3 / 16 + result[y + 1, x, c] += error * 5 / 16 + result[y + 1, x + 1, c] += error * 1 / 16 + + return np.clip(result, 0, 255).astype(np.uint8), state + else: + # Simple quantization + quantized = (frame // step) * step + return quantized.astype(np.uint8), state diff --git a/effects/posterize.sexp b/effects/posterize.sexp new file mode 100644 index 0000000..d26a9f7 --- /dev/null +++ b/effects/posterize.sexp @@ -0,0 +1,20 @@ +;; Posterize effect - reduce color levels for poster-like look +;; Usage: python3 plan.py effects/posterize.sexp -p levels=6 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; levels: number of color levels per channel (2 to 32, default: 6) +;; lower = more posterized, higher = more gradual + +(recipe "posterize" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect posterize :path "sexp_effects/effects/posterize.sexp") + + ;; Default parameter (overridden by -p levels=N) + (def levels 6) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect posterize :levels levels))) + result) diff --git a/effects/random.py b/effects/random.py new file mode 100644 index 0000000..b6d866d --- /dev/null +++ b/effects/random.py @@ -0,0 +1,217 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect random +@version 1.0.0 +@author artdag + +@description +Deterministic random number generator for use in recipes and effects. +Given the same seed, produces the same sequence of values every time. + +This effect doesn't modify the frame - it provides random values that +can be bound to other effect parameters. The random state persists +across frames for consistent sequences. + +@param seed int + @default 42 + Random seed for reproducibility. Same seed = same sequence. + +@param min float + @default 0 + Minimum output value. + +@param max float + @default 1 + Maximum output value. + +@param mode string + @enum uniform gaussian integer choice + @default uniform + Distribution type: + - uniform: even distribution between min and max + - gaussian: normal distribution (min=mean, max=stddev) + - integer: random integers between min and max (inclusive) + - choice: randomly pick from a list (use choices param) + +@param choices list + @default [] + List of values to choose from (for mode=choice). + +@param step_every int + @default 1 + Only generate new value every N frames (1 = every frame). + +@state rng RandomState + Numpy random state for deterministic sequence. + +@state frame_count int + Tracks frames for step_every. + +@state current_value float + Current random value (persists between steps). + +@example + ;; Random value 0-1 each frame + (bind (random :seed 123)) + +@example + ;; Random integer 1-10, changes every 5 frames + (random :seed 42 :mode "integer" :min 1 :max 10 :step_every 5) + +@example + ;; Gaussian noise around 0.5 + (random :mode "gaussian" :min 0.5 :max 0.1) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Generate deterministic random values. + + This effect passes through the frame unchanged but updates state + with random values that can be used by the recipe/executor. + + Args: + frame: Input frame (passed through unchanged) + params: Effect parameters + - seed: random seed (default 42) + - min: minimum value (default 0) + - max: maximum value (default 1) + - mode: uniform/gaussian/integer/choice (default uniform) + - choices: list for choice mode + - step_every: frames between new values (default 1) + state: Persistent state dict + - rng: numpy RandomState + - frame_count: frame counter + - current_value: last generated value + + Returns: + Tuple of (frame, state_with_random_value) + """ + seed = int(params.get("seed", 42)) + min_val = params.get("min", 0) + max_val = params.get("max", 1) + mode = params.get("mode", "uniform") + choices = params.get("choices", []) + step_every = max(1, int(params.get("step_every", 1))) + + if state is None: + state = {} + + # Initialize RNG on first call + if "rng" not in state: + state["rng"] = np.random.RandomState(seed) + state["frame_count"] = 0 + state["current_value"] = None + + rng = state["rng"] + frame_count = state["frame_count"] + + # Generate new value if needed + if frame_count % step_every == 0 or state["current_value"] is None: + if mode == "uniform": + value = rng.uniform(min_val, max_val) + elif mode == "gaussian": + # min = mean, max = stddev + value = rng.normal(min_val, max_val) + elif mode == "integer": + value = rng.randint(int(min_val), int(max_val) + 1) + elif mode == "choice" and choices: + value = choices[rng.randint(0, len(choices))] + else: + value = rng.uniform(min_val, max_val) + + state["current_value"] = value + + state["frame_count"] = frame_count + 1 + + # Store value in state for recipe access + state["value"] = state["current_value"] + + return frame, state + + +# Standalone RNG class for use in other effects +class DeterministicRNG: + """ + Deterministic random number generator for use in effects. + + Usage in effects: + from effects.random import DeterministicRNG + + def process_frame(frame, params, state): + if "rng" not in state: + state["rng"] = DeterministicRNG(params.get("seed", 42)) + rng = state["rng"] + + value = rng.uniform(0, 1) + integer = rng.randint(0, 10) + choice = rng.choice(["a", "b", "c"]) + """ + + def __init__(self, seed: int = 42): + """Initialize with seed for reproducibility.""" + self._rng = np.random.RandomState(seed) + self._seed = seed + + def seed(self, seed: int): + """Reset with new seed.""" + self._rng = np.random.RandomState(seed) + self._seed = seed + + def uniform(self, low: float = 0, high: float = 1) -> float: + """Random float in [low, high).""" + return self._rng.uniform(low, high) + + def randint(self, low: int, high: int) -> int: + """Random integer in [low, high].""" + return self._rng.randint(low, high + 1) + + def gaussian(self, mean: float = 0, stddev: float = 1) -> float: + """Random float from normal distribution.""" + return self._rng.normal(mean, stddev) + + def choice(self, items: list): + """Random choice from list.""" + if not items: + return None + return items[self._rng.randint(0, len(items))] + + def shuffle(self, items: list) -> list: + """Return shuffled copy of list.""" + result = list(items) + self._rng.shuffle(result) + return result + + def sample(self, items: list, n: int) -> list: + """Random sample of n items without replacement.""" + if n >= len(items): + return self.shuffle(items) + indices = self._rng.choice(len(items), n, replace=False) + return [items[i] for i in indices] + + def weighted_choice(self, items: list, weights: list): + """Random choice with weights.""" + if not items or not weights: + return None + weights = np.array(weights, dtype=float) + weights /= weights.sum() + idx = self._rng.choice(len(items), p=weights) + return items[idx] + + @property + def state(self) -> dict: + """Get RNG state for serialization.""" + return {"seed": self._seed, "state": self._rng.get_state()} + + @classmethod + def from_state(cls, state: dict) -> 'DeterministicRNG': + """Restore RNG from serialized state.""" + rng = cls(state["seed"]) + rng._rng.set_state(state["state"]) + return rng diff --git a/effects/rgb_split.py b/effects/rgb_split.py new file mode 100644 index 0000000..bbee47d --- /dev/null +++ b/effects/rgb_split.py @@ -0,0 +1,106 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect rgb_split +@version 1.0.0 +@author artdag + +@description +Chromatic aberration / RGB channel separation. Offsets red and blue channels +in different directions, creating color fringing. Classic glitch aesthetic. + +@param amount float + @range 0 50 + @default 10 + Offset amount in pixels. Bind to bass for punchy glitch effect. + +@param angle float + @range 0 360 + @default 0 + Direction of split in degrees. 0 = horizontal, 90 = vertical. + +@param red_offset float + @range -50 50 + @default 0 + Override: specific red channel X offset (ignores amount/angle if set). + +@param blue_offset float + @range -50 50 + @default 0 + Override: specific blue channel X offset (ignores amount/angle if set). + +@example + (effect rgb_split :amount 15) + +@example + ;; Bass-reactive chromatic aberration + (effect rgb_split :amount (bind bass :range [0 30] :transform sqrt)) + +@example + ;; Vertical split + (effect rgb_split :amount 20 :angle 90) +""" + +import numpy as np +import cv2 +import math + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply RGB channel split to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - amount: offset in pixels (default 10) + - angle: split direction in degrees (default 0) + - red_offset: override red X offset + - blue_offset: override blue X offset + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + amount = params.get("amount", 10) + angle = params.get("angle", 0) + red_override = params.get("red_offset") + blue_override = params.get("blue_offset") + + # Calculate offsets + if red_override is not None or blue_override is not None: + # Use explicit offsets + r_x = int(red_override or 0) + r_y = 0 + b_x = int(blue_override or 0) + b_y = 0 + else: + # Calculate from amount and angle + angle_rad = math.radians(angle) + r_x = int(amount * math.cos(angle_rad)) + r_y = int(amount * math.sin(angle_rad)) + b_x = -r_x # Blue goes opposite direction + b_y = -r_y + + if r_x == 0 and r_y == 0 and b_x == 0 and b_y == 0: + return frame, state + + h, w = frame.shape[:2] + + # Split channels + r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2] + + # Create translation matrices + M_r = np.float32([[1, 0, r_x], [0, 1, r_y]]) + M_b = np.float32([[1, 0, b_x], [0, 1, b_y]]) + + # Translate red and blue channels + r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE) + b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE) + + # Merge channels + result = np.stack([r_shifted, g, b_shifted], axis=-1) + + return result, state diff --git a/effects/rgb_split.sexp b/effects/rgb_split.sexp new file mode 100644 index 0000000..3708bac --- /dev/null +++ b/effects/rgb_split.sexp @@ -0,0 +1,19 @@ +;; RGB split effect - chromatic aberration +;; Usage: python3 plan.py effects/rgb_split.sexp -p offset_x=20 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; offset_x: horizontal color channel offset in pixels (2 to 50, default: 20) + +(recipe "rgb_split" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") + + ;; Default parameter (overridden by -p offset_x=N) + (def offset_x 20) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect rgb_split :offset_x offset_x))) + result) diff --git a/effects/ripple.sexp b/effects/ripple.sexp new file mode 100644 index 0000000..865776d --- /dev/null +++ b/effects/ripple.sexp @@ -0,0 +1,21 @@ +;; Ripple effect - concentric wave distortion from center +;; Usage: python3 plan.py effects/ripple.sexp -p amplitude=20 -p frequency=6 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amplitude: ripple strength in pixels (5 to 50, default: 20) +;; frequency: number of ripples (1 to 20, default: 6) + +(recipe "ripple" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect ripple :path "sexp_effects/effects/ripple.sexp") + + ;; Default parameters (overridden by -p amplitude=N -p frequency=N) + (def amplitude 20) + (def frequency 6) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect ripple :amplitude amplitude :frequency frequency))) + result) diff --git a/effects/rotate.py b/effects/rotate.py new file mode 100644 index 0000000..c0b1c28 --- /dev/null +++ b/effects/rotate.py @@ -0,0 +1,110 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect rotate +@version 1.0.0 +@author artdag + +@description +Rotates the frame by a specified angle. Supports two modes: +- Static: rotate by fixed angle +- Continuous: rotate by speed (degrees per frame), accumulating over time + +The effect uses state to track cumulative rotation for smooth continuous motion. + +@param angle float + @range -360 360 + @default 0 + Rotation angle in degrees. Positive = counter-clockwise. + +@param speed float + @range -30 30 + @default 0 + Rotation speed in degrees per frame (for continuous rotation). + When non-zero, angle is ignored and rotation accumulates. + +@param auto_zoom bool + @default false + If true, automatically zooms to crop out black corners. + +@state cumulative_angle float + Tracks total rotation for continuous mode. Persists across frames. + +@example + (effect rotate :angle 45) + +@example + ;; Continuous spin at 5 degrees per frame + (effect rotate :speed 5) + +@example + ;; Beat-reactive rotation + (effect rotate :angle (bind bass :range [0 90])) +""" + +import numpy as np +import cv2 +import math + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Rotate a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - angle: rotation angle in degrees (default 0) + - speed: rotation speed per frame for continuous mode (default 0) + - auto_zoom: zoom to hide black corners (default False) + state: Persistent state dict + - cumulative_angle: total rotation for continuous mode + + Returns: + Tuple of (processed_frame, new_state) + """ + angle = params.get("angle", 0) + speed = params.get("speed", 0) + auto_zoom = params.get("auto_zoom", False) + + # Initialize state + if state is None: + state = {} + + # Handle continuous rotation mode + if speed != 0: + cumulative = state.get("cumulative_angle", 0) + cumulative += speed + state["cumulative_angle"] = cumulative + angle = cumulative + + if angle == 0: + return frame, state + + h, w = frame.shape[:2] + center = (w / 2, h / 2) + + # Calculate zoom factor to hide black corners + scale = 1.0 + if auto_zoom: + angle_rad = math.radians(abs(angle) % 90) + if w >= h: + scale = math.cos(angle_rad) + (w / h) * math.sin(angle_rad) + else: + scale = math.cos(angle_rad) + (h / w) * math.sin(angle_rad) + scale = max(1.0, scale) + + # Get rotation matrix + rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale) + + # Apply rotation + result = cv2.warpAffine( + frame, rotation_matrix, (w, h), + flags=cv2.INTER_LINEAR, + borderMode=cv2.BORDER_CONSTANT, + borderValue=(0, 0, 0) + ) + + return result, state diff --git a/effects/rotate.sexp b/effects/rotate.sexp new file mode 100644 index 0000000..0999765 --- /dev/null +++ b/effects/rotate.sexp @@ -0,0 +1,20 @@ +;; Rotate effect - rotate image around center +;; Usage: python3 plan.py effects/rotate.sexp -p angle=15 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; angle: rotation in degrees (-180 to 180, default: 15) +;; positive = clockwise + +(recipe "rotate" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect rotate :path "sexp_effects/effects/rotate.sexp") + + ;; Default parameter (overridden by -p angle=N) + (def angle 15) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect rotate :angle angle))) + result) diff --git a/effects/saturation.py b/effects/saturation.py new file mode 100644 index 0000000..7ab835e --- /dev/null +++ b/effects/saturation.py @@ -0,0 +1,61 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect saturation +@version 1.0.0 +@author artdag + +@description +Adjusts color saturation. 0 = grayscale, 1 = original, 2 = oversaturated. +Uses HSV color space for accurate saturation control. + +@param factor float + @range 0 3 + @default 1.0 + Saturation multiplier. 0 = grayscale, 1 = unchanged, 2 = double saturation. + +@example + (effect saturation :factor 0) ; grayscale + +@example + (effect saturation :factor 1.5) ; vibrant colors + +@example + ;; Desaturate on quiet parts, saturate on loud + (effect saturation :factor (bind energy :range [0.5 2.0])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Adjust saturation of a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - factor: saturation multiplier (default 1.0) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + factor = params.get("factor", 1.0) + + if factor == 1.0: + return frame, state + + # Convert RGB to HSV + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV).astype(np.float32) + + # Scale saturation channel (index 1) + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255) + + # Convert back to RGB + result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) + + return result, state diff --git a/effects/saturation.sexp b/effects/saturation.sexp new file mode 100644 index 0000000..af70925 --- /dev/null +++ b/effects/saturation.sexp @@ -0,0 +1,20 @@ +;; Saturation effect - adjust color saturation +;; Usage: python3 plan.py effects/saturation.sexp -p amount=2.0 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amount: saturation multiplier (0.0 to 3.0, default: 2.0) +;; 0 = grayscale, 1 = normal, >1 = vivid colors + +(recipe "saturation" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect saturation :path "sexp_effects/effects/saturation.sexp") + + ;; Default parameter (overridden by -p amount=N) + (def amount 2.0) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect saturation :amount amount))) + result) diff --git a/effects/scanlines.py b/effects/scanlines.py new file mode 100644 index 0000000..6a53735 --- /dev/null +++ b/effects/scanlines.py @@ -0,0 +1,116 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect scanlines +@version 1.0.0 +@author artdag + +@description +VHS/CRT-style scan line shift. Horizontally displaces alternating lines +to create analog video distortion. Great for retro/glitch aesthetics. + +@param amplitude float + @range 0 100 + @default 10 + Maximum shift amount in pixels. Bind to bass for reactive glitch. + +@param frequency float + @range 1 100 + @default 10 + Lines per cycle (affects pattern density). + +@param randomness float + @range 0 1 + @default 0.5 + 0 = regular sine pattern, 1 = fully random shifts. + +@param line_gap int + @range 1 20 + @default 1 + Only shift every Nth line (1 = all lines). + +@param seed int + @default 42 + Random seed for deterministic patterns. + +@state rng DeterministicRNG + Random number generator for reproducible results. + +@example + (effect scanlines :amplitude 20) + +@example + ;; Heavy glitch on bass + (effect scanlines :amplitude (bind bass :range [0 50]) :randomness 0.8) + +@example + ;; Reproducible scanlines + (effect scanlines :amplitude 30 :randomness 0.7 :seed 999) +""" + +import numpy as np +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply scan line shift to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - amplitude: max shift in pixels (default 10) + - frequency: lines per cycle (default 10) + - randomness: 0-1 random vs sine (default 0.5) + - line_gap: shift every Nth line (default 1) + - seed: random seed (default 42) + state: Persistent state dict + - rng: DeterministicRNG instance + + Returns: + Tuple of (processed_frame, new_state) + """ + amplitude = params.get("amplitude", 10) + frequency = params.get("frequency", 10) or 1 + randomness = params.get("randomness", 0.5) + line_gap = max(1, int(params.get("line_gap", 1))) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + rng = state["rng"] + + if amplitude == 0: + return frame, state + + h, w = frame.shape[:2] + result = frame.copy() + + for y in range(0, h, line_gap): + # Calculate shift amount + if randomness >= 1.0: + shift = int(rng.uniform(-amplitude, amplitude)) + elif randomness <= 0: + shift = int(amplitude * np.sin(2 * np.pi * y / frequency)) + else: + sine_shift = amplitude * np.sin(2 * np.pi * y / frequency) + rand_shift = rng.uniform(-amplitude, amplitude) + shift = int(sine_shift * (1 - randomness) + rand_shift * randomness) + + if shift != 0: + result[y] = np.roll(result[y], shift, axis=0) + + return result, state diff --git a/effects/scanlines.sexp b/effects/scanlines.sexp new file mode 100644 index 0000000..5736927 --- /dev/null +++ b/effects/scanlines.sexp @@ -0,0 +1,21 @@ +;; Scanlines effect - horizontal line overlay +;; Usage: python3 plan.py effects/scanlines.sexp -p spacing=3 -p intensity=0.4 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; spacing: pixels between lines (1 to 8, default: 3) +;; intensity: line darkness (0 to 1, default: 0.4) + +(recipe "scanlines" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect scanlines :path "sexp_effects/effects/scanlines.sexp") + + ;; Default parameters (overridden by -p spacing=N -p intensity=N) + (def spacing 3) + (def intensity 0.4) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect scanlines :spacing spacing :intensity intensity))) + result) diff --git a/effects/scatter.py b/effects/scatter.py new file mode 100644 index 0000000..9886b22 --- /dev/null +++ b/effects/scatter.py @@ -0,0 +1,142 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "scipy"] +# /// +""" +@effect scatter +@version 1.0.0 +@author artdag + +@description +Scatter effect. Randomly redistributes pixels in local area creating +a dissolving, dispersed look. + +@param amount float + @range 0 100 + @default 10 + Scatter radius in pixels. Bind to energy for reactive dissolve. + +@param randomize_per_frame bool + @default false + Different scatter pattern each frame (vs. static pattern). + +@param grain float + @range 0 1 + @default 0 + Add film grain to scattered result. + +@param seed int + @default 42 + Random seed for reproducible patterns. + +@state rng DeterministicRNG + Random number generator for displacement. + +@state displacement_map ndarray + Cached displacement map for static mode. + +@example + (effect scatter :amount 20) + +@example + ;; Reactive dissolve + (effect scatter :amount (bind energy :range [5 50]) :randomize_per_frame true) +""" + +import numpy as np +from scipy import ndimage +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply scatter effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - amount: scatter radius (default 10) + - randomize_per_frame: animate pattern (default False) + - grain: add film grain (default 0) + - seed: random seed (default 42) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + amount = int(np.clip(params.get("amount", 10), 0, 100)) + randomize = params.get("randomize_per_frame", False) + grain = params.get("grain", 0) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + if amount < 1: + return frame, state + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + rng = state["rng"] + + h, w = frame.shape[:2] + + # Generate or reuse displacement map + last_size = state.get("last_size") + if randomize or "displacement_map" not in state or last_size != (h, w): + # Generate new displacement map + displacement = np.zeros((h, w, 2), dtype=np.float32) + for y in range(h): + for x in range(w): + displacement[y, x, 0] = rng.uniform(-amount, amount) + displacement[y, x, 1] = rng.uniform(-amount, amount) + state["displacement_map"] = displacement + state["last_size"] = (h, w) + + displacement_map = state["displacement_map"] + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Apply displacement + new_y = y_coords + displacement_map[:, :, 0] + new_x = x_coords + displacement_map[:, :, 1] + + # Clip to valid range + new_y = np.clip(new_y, 0, h - 1) + new_x = np.clip(new_x, 0, w - 1) + + # Sample from displaced positions + result = np.zeros_like(frame) + for c in range(frame.shape[2] if len(frame.shape) > 2 else 1): + if len(frame.shape) > 2: + result[:, :, c] = ndimage.map_coordinates( + frame[:, :, c], + [new_y, new_x], + order=1, + mode='reflect' + ) + else: + result = ndimage.map_coordinates( + frame, + [new_y, new_x], + order=1, + mode='reflect' + ) + + # Add grain + if grain > 0: + noise = np.array([[[rng.uniform(-grain * 20, grain * 20) for _ in range(3)] + for _ in range(w)] + for _ in range(h)]) + result = np.clip(result.astype(np.float32) + noise, 0, 255).astype(np.uint8) + + return result, state diff --git a/effects/sepia.sexp b/effects/sepia.sexp new file mode 100644 index 0000000..47db6f5 --- /dev/null +++ b/effects/sepia.sexp @@ -0,0 +1,14 @@ +;; Sepia effect - vintage brown tone +;; Usage: python3 plan.py effects/sepia.sexp | python3 execute.py - -d . -o output.mp4 +;; No parameters + +(recipe "sepia" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect sepia :path "sexp_effects/effects/sepia.sexp") + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect sepia))) + result) diff --git a/effects/sharpen.py b/effects/sharpen.py new file mode 100644 index 0000000..6959d3b --- /dev/null +++ b/effects/sharpen.py @@ -0,0 +1,63 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect sharpen +@version 1.0.0 +@author artdag + +@description +Sharpening effect using unsharp mask technique. Enhances edges and detail. +Great for making footage pop on beats. + +@param amount float + @range 0 5 + @default 1.0 + Sharpening intensity. 0 = no change, 1 = normal, 2+ = aggressive. + +@param radius float + @range 0 10 + @default 1 + Radius of sharpening (affects edge thickness). + +@example + (effect sharpen :amount 1.5) + +@example + ;; Sharpen on beats + (effect sharpen :amount (bind bass :range [0.5 2.0])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply sharpening to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - amount: sharpening intensity (default 1.0) + - radius: edge radius (default 1) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + amount = params.get("amount", 1.0) + radius = params.get("radius", 1) + + if amount <= 0: + return frame, state + + # Create blurred version + ksize = max(1, int(radius)) * 2 + 1 + blurred = cv2.GaussianBlur(frame, (ksize, ksize), 0) + + # Unsharp mask: original + amount * (original - blurred) + result = frame.astype(np.float32) + amount * (frame.astype(np.float32) - blurred.astype(np.float32)) + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/sharpen.sexp b/effects/sharpen.sexp new file mode 100644 index 0000000..799c62e --- /dev/null +++ b/effects/sharpen.sexp @@ -0,0 +1,20 @@ +;; Sharpen effect - enhance edge detail +;; Usage: python3 plan.py effects/sharpen.sexp -p amount=2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amount: sharpening strength (0.5 to 5, default: 2) +;; higher = sharper edges + +(recipe "sharpen" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect sharpen :path "sexp_effects/effects/sharpen.sexp") + + ;; Default parameter (overridden by -p amount=N) + (def amount 2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect sharpen :amount amount))) + result) diff --git a/effects/shatter.py b/effects/shatter.py new file mode 100644 index 0000000..8fb690d --- /dev/null +++ b/effects/shatter.py @@ -0,0 +1,233 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "scipy"] +# /// +""" +@effect shatter +@version 1.0.0 +@author artdag + +@description +Shatter effect. Explodes the image into flying pieces that move +outward from a center point. Great for beat drops and transitions. + +@param intensity float + @range 0 1 + @default 0.5 + Explosion force (0 = no effect, 1 = full explosion). + +@param num_pieces int + @range 10 200 + @default 50 + Number of shatter pieces. + +@param center_x float + @range 0 1 + @default 0.5 + Explosion center X position. + +@param center_y float + @range 0 1 + @default 0.5 + Explosion center Y position. + +@param rotation_speed float + @range 0 5 + @default 1.0 + How fast pieces rotate as they fly. + +@param gravity float + @range 0 2 + @default 0.3 + Downward pull on pieces. + +@param fade_out bool + @default true + Fade pieces as they fly away. + +@param seed int + @default 42 + Random seed for piece positions. + +@state pieces list + List of piece positions and velocities. + +@example + (effect shatter :intensity 0.7 :num_pieces 80) + +@example + ;; Beat-reactive explosion + (effect shatter :intensity (bind onset :range [0 1]) :gravity 0.5) +""" + +import numpy as np +from scipy import ndimage +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply shatter effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + intensity = np.clip(params.get("intensity", 0.5), 0, 1) + num_pieces = max(10, min(int(params.get("num_pieces", 50)), 200)) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + rotation_speed = params.get("rotation_speed", 1.0) + gravity = params.get("gravity", 0.3) + fade_out = params.get("fade_out", True) + seed = int(params.get("seed", 42)) + t = params.get("_time", 0) + + if state is None: + state = {} + + if intensity < 0.01: + return frame, state + + h, w = frame.shape[:2] + cx, cy = int(center_x * w), int(center_y * h) + + # Initialize pieces + if "pieces" not in state or len(state["pieces"]) != num_pieces: + state["pieces"] = _init_pieces(w, h, num_pieces, cx, cy, seed) + state["start_time"] = t + + pieces = state["pieces"] + start_time = state.get("start_time", t) + effect_t = t - start_time + + # Create output with black background + result = np.zeros_like(frame) + + time_factor = effect_t * intensity * 3.0 + + # Draw each piece + for piece in pieces: + px, py, pw, ph, vx, vy, rot = piece + + if pw <= 0 or ph <= 0: + continue + + # Calculate current position with physics + curr_x = px + vx * time_factor * w * 0.5 + curr_y = py + vy * time_factor * h * 0.5 + gravity * time_factor ** 2 * h * 0.2 + curr_rot = rot * rotation_speed * time_factor * 180 + + # Calculate alpha (fade out over distance) + distance = np.sqrt((curr_x - px)**2 + (curr_y - py)**2) + alpha = 1.0 - (distance / max(w, h)) if fade_out else 1.0 + alpha = max(0, min(1, alpha)) + + if alpha < 0.05: + continue + + # Extract piece from original frame + px1, py1 = max(0, int(px)), max(0, int(py)) + px2, py2 = min(w, int(px + pw)), min(h, int(py + ph)) + + if px2 <= px1 or py2 <= py1: + continue + + piece_img = frame[py1:py2, px1:px2].copy() + + # Rotate piece + if abs(curr_rot) > 1: + piece_img = ndimage.rotate(piece_img, curr_rot, reshape=False, mode='constant', cval=0) + + # Calculate destination + dest_x = int(curr_x) + dest_y = int(curr_y) + piece_h, piece_w = piece_img.shape[:2] + + # Clip to frame bounds + src_x1, src_y1 = 0, 0 + src_x2, src_y2 = piece_w, piece_h + dst_x1, dst_y1 = dest_x, dest_y + dst_x2, dst_y2 = dest_x + piece_w, dest_y + piece_h + + if dst_x1 < 0: + src_x1 = -dst_x1 + dst_x1 = 0 + if dst_y1 < 0: + src_y1 = -dst_y1 + dst_y1 = 0 + if dst_x2 > w: + src_x2 -= (dst_x2 - w) + dst_x2 = w + if dst_y2 > h: + src_y2 -= (dst_y2 - h) + dst_y2 = h + + if dst_x2 <= dst_x1 or dst_y2 <= dst_y1: + continue + if src_x2 <= src_x1 or src_y2 <= src_y1: + continue + + # Blend piece onto result + piece_region = piece_img[src_y1:src_y2, src_x1:src_x2] + if piece_region.size == 0: + continue + + result_region = result[dst_y1:dst_y2, dst_x1:dst_x2] + if result_region.shape != piece_region.shape: + continue + + result[dst_y1:dst_y2, dst_x1:dst_x2] = ( + result_region * (1 - alpha) + piece_region * alpha + ).astype(np.uint8) + + # Blend with original based on intensity + final = ((1 - intensity) * frame + intensity * result).astype(np.uint8) + + return final, state + + +def _init_pieces(w: int, h: int, num_pieces: int, cx: int, cy: int, seed: int) -> list: + """Initialize shatter pieces with random positions and velocities.""" + rng = DeterministicRNG(seed) + + pieces = [] + + # Create grid of pieces + cols = max(1, int(np.sqrt(num_pieces * w / h))) + rows = max(1, int(num_pieces / cols)) + piece_w = w // cols + piece_h = h // rows + + for row in range(rows): + for col in range(cols): + px = col * piece_w + py = row * piece_h + pw = piece_w + (w % cols if col == cols - 1 else 0) + ph = piece_h + (h % rows if row == rows - 1 else 0) + + # Velocity away from center + piece_cx = px + pw // 2 + piece_cy = py + ph // 2 + dx = piece_cx - cx + dy = piece_cy - cy + dist = max(1, np.sqrt(dx*dx + dy*dy)) + + vx = dx / dist + rng.uniform(-0.3, 0.3) + vy = dy / dist + rng.uniform(-0.3, 0.3) + rot = rng.uniform(-2, 2) + + pieces.append((px, py, pw, ph, vx, vy, rot)) + + return pieces diff --git a/effects/strobe.py b/effects/strobe.py new file mode 100644 index 0000000..3933fb7 --- /dev/null +++ b/effects/strobe.py @@ -0,0 +1,90 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect strobe +@version 1.0.0 +@author artdag + +@description +Strobe / Posterize Time effect. Locks video to a reduced frame rate, +creating a choppy, stop-motion look. Also known as frame hold. + +@param frame_rate float + @range 1 60 + @default 12 + Target frame rate in fps. Lower = choppier. + +@param sync_to_beat bool + @default false + If true, hold frames until next beat (overrides frame_rate). + +@param beat_divisor int + @range 1 8 + @default 1 + Hold for 1/N beats when sync_to_beat is true. + +@state held_frame ndarray + Currently held frame. + +@state held_until float + Time until which to hold the frame. + +@example + (effect strobe :frame_rate 8) + +@example + ;; Very choppy at 4 fps + (effect strobe :frame_rate 4) + +@example + ;; Beat-synced frame hold + (effect strobe :sync_to_beat true :beat_divisor 2) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply strobe/posterize time effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - frame_rate: target fps 1-60 (default 12) + - sync_to_beat: use beat timing (default False) + - beat_divisor: beat fraction (default 1) + state: Persistent state dict + - held_frame: currently held frame + - held_until: hold expiry time + + Returns: + Tuple of (processed_frame, new_state) + """ + target_fps = max(1, min(params.get("frame_rate", 12), 60)) + sync_to_beat = params.get("sync_to_beat", False) + beat_divisor = max(1, int(params.get("beat_divisor", 1))) + + # Get current time from params (executor should provide this) + t = params.get("_time", 0) + + if state is None: + state = {} + + # Initialize state + if "held_frame" not in state: + state["held_frame"] = None + state["held_until"] = 0.0 + state["last_beat"] = -1 + + # Frame rate based hold + frame_duration = 1.0 / target_fps + + if t >= state["held_until"]: + # Time for new frame + state["held_frame"] = frame.copy() + state["held_until"] = t + frame_duration + + return state["held_frame"] if state["held_frame"] is not None else frame, state diff --git a/effects/strobe.sexp b/effects/strobe.sexp new file mode 100644 index 0000000..44c1c98 --- /dev/null +++ b/effects/strobe.sexp @@ -0,0 +1,20 @@ +;; Strobe effect - flashing/blinking +;; Usage: python3 plan.py effects/strobe.sexp -p frequency=4 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; frequency: flashes per second (1 to 15, default: 4) +;; WARNING: high values may cause discomfort + +(recipe "strobe" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect strobe :path "sexp_effects/effects/strobe.sexp") + + ;; Default parameter (overridden by -p frequency=N) + (def frequency 4) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect strobe :frequency frequency))) + result) diff --git a/effects/swirl.py b/effects/swirl.py new file mode 100644 index 0000000..39323b8 --- /dev/null +++ b/effects/swirl.py @@ -0,0 +1,121 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect swirl +@version 1.0.0 +@author artdag + +@description +Spiral/vortex distortion that twists the image around a center point. +Creates whirlpool-like effects. Great for psychedelic/hypnotic visuals. + +@param strength float + @range -10 10 + @default 1.0 + Swirl strength in radians. Positive = counter-clockwise, negative = clockwise. + +@param radius float + @range 0.1 2 + @default 0.5 + Effect radius as fraction of image size. Larger = wider swirl. + +@param center_x float + @range 0 1 + @default 0.5 + Horizontal center of swirl (0 = left, 1 = right). + +@param center_y float + @range 0 1 + @default 0.5 + Vertical center of swirl (0 = top, 1 = bottom). + +@param falloff string + @enum linear quadratic gaussian + @default quadratic + How swirl strength decreases from center: + - linear: constant decrease + - quadratic: sharper center, softer edges + - gaussian: smooth bell curve + +@example + (effect swirl :strength 2) + +@example + ;; Reactive swirl + (effect swirl :strength (bind energy :range [0 5])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply swirl distortion to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - strength: swirl amount in radians (default 1.0) + - radius: effect radius as fraction (default 0.5) + - center_x: horizontal center 0-1 (default 0.5) + - center_y: vertical center 0-1 (default 0.5) + - falloff: linear/quadratic/gaussian (default quadratic) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + strength = params.get("strength", 1.0) + radius_frac = params.get("radius", 0.5) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + falloff = params.get("falloff", "quadratic") + + if strength == 0: + return frame, state + + h, w = frame.shape[:2] + + # Calculate center and radius in pixels + cx = w * center_x + cy = h * center_y + radius = max(w, h) * radius_frac + + # Create coordinate grids + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float64) + + # Calculate distance and angle from center + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + angle = np.arctan2(dy, dx) + + # Normalized distance for falloff + norm_dist = dist / radius + + # Calculate falloff factor + if falloff == "linear": + factor = np.maximum(0, 1 - norm_dist) + elif falloff == "gaussian": + factor = np.exp(-norm_dist**2 * 2) + else: # quadratic + factor = np.maximum(0, 1 - norm_dist**2) + + # Apply swirl rotation + new_angle = angle + strength * factor + + # Calculate new coordinates + new_x = (cx + dist * np.cos(new_angle)).astype(np.float32) + new_y = (cy + dist * np.sin(new_angle)).astype(np.float32) + + # Remap + result = cv2.remap( + frame, new_x, new_y, + cv2.INTER_LINEAR, + borderMode=cv2.BORDER_REFLECT + ) + + return result, state diff --git a/effects/swirl.sexp b/effects/swirl.sexp new file mode 100644 index 0000000..2155e05 --- /dev/null +++ b/effects/swirl.sexp @@ -0,0 +1,20 @@ +;; Swirl effect - spiral distortion from center +;; Usage: python3 plan.py effects/swirl.sexp -p strength=3 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; strength: swirl intensity (-10 to 10, default: 3) +;; positive = clockwise, negative = counter-clockwise + +(recipe "swirl" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect swirl :path "sexp_effects/effects/swirl.sexp") + + ;; Default parameter (overridden by -p strength=N) + (def strength 3) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect swirl :strength strength))) + result) diff --git a/effects/threshold.py b/effects/threshold.py new file mode 100644 index 0000000..9f0ece6 --- /dev/null +++ b/effects/threshold.py @@ -0,0 +1,85 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect threshold +@version 1.0.0 +@author artdag + +@description +Threshold effect. Converts to high-contrast black and white. +Creates stark, graphic look by converting grayscale to pure +black/white based on a threshold value. + +@param level int + @range 0 255 + @default 128 + Threshold level. Pixels above = white, below = black. + +@param invert bool + @default false + Swap black and white. + +@param color_mode string + @enum bw color + @default bw + Output mode: + - bw: pure black and white + - color: keep original colors where above threshold + +@example + (effect threshold :level 100) + +@example + ;; Beat-reactive threshold + (effect threshold :level (bind bass :range [80 180]) :invert true) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply threshold effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - level: threshold 0-255 (default 128) + - invert: swap black/white (default False) + - color_mode: bw or color (default bw) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + level = int(np.clip(params.get("level", 128), 0, 255)) + invert = params.get("invert", False) + color_mode = params.get("color_mode", "bw") + + if state is None: + state = {} + + # Convert to grayscale for threshold comparison + if len(frame.shape) == 3: + gray = np.mean(frame, axis=2) + else: + gray = frame + + # Apply threshold + mask = gray > level + + if invert: + mask = ~mask + + if color_mode == "bw": + # Pure black and white + result = np.where(mask[:, :, np.newaxis], 255, 0).astype(np.uint8) + if len(frame.shape) == 3: + result = np.repeat(result, frame.shape[2], axis=2) + else: + # Keep original colors where above threshold + result = np.where(mask[:, :, np.newaxis], frame, 0).astype(np.uint8) + + return result, state diff --git a/effects/threshold.sexp b/effects/threshold.sexp new file mode 100644 index 0000000..0960a32 --- /dev/null +++ b/effects/threshold.sexp @@ -0,0 +1,20 @@ +;; Threshold effect - convert to pure black and white +;; Usage: python3 plan.py effects/threshold.sexp -p level=128 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; level: threshold cutoff (0 to 255, default: 128) +;; pixels below = black, above = white + +(recipe "threshold" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect threshold :path "sexp_effects/effects/threshold.sexp") + + ;; Default parameter (overridden by -p level=N) + (def level 128) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect threshold :level level))) + result) diff --git a/effects/tile_grid.py b/effects/tile_grid.py new file mode 100644 index 0000000..6a0e63e --- /dev/null +++ b/effects/tile_grid.py @@ -0,0 +1,134 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect tile_grid +@version 1.0.0 +@author artdag + +@description +Tile Grid effect. Repeats the frame in a grid pattern creating +a mosaic by tiling scaled-down copies. Great for psychedelic visuals. + +@param rows int + @range 1 10 + @default 2 + Number of rows in grid. + +@param cols int + @range 1 10 + @default 2 + Number of columns in grid. + +@param gap int + @range 0 50 + @default 0 + Gap between tiles in pixels. + +@param gap_color list + @default [0, 0, 0] + RGB color for gaps. + +@param rotation_per_tile float + @range -180 180 + @default 0 + Rotation increment per tile in degrees. + +@param alternate_flip bool + @default false + Flip alternating tiles horizontally. + +@example + (effect tile_grid :rows 3 :cols 3) + +@example + ;; Rotating tiles + (effect tile_grid :rows 2 :cols 2 :rotation_per_tile 90) + +@example + ;; Beat-reactive grid + (effect tile_grid :rows (bind bass :range [2 6]) :cols (bind bass :range [2 6])) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply tile grid effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - rows: number of rows (default 2) + - cols: number of columns (default 2) + - gap: gap between tiles (default 0) + - gap_color: RGB tuple (default [0,0,0]) + - rotation_per_tile: rotation increment (default 0) + - alternate_flip: flip alternating tiles (default False) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + rows = max(1, min(int(params.get("rows", 2)), 10)) + cols = max(1, min(int(params.get("cols", 2)), 10)) + gap = max(0, int(params.get("gap", 0))) + gap_color = params.get("gap_color", [0, 0, 0]) + rotation_per_tile = params.get("rotation_per_tile", 0) + alternate_flip = params.get("alternate_flip", False) + + if state is None: + state = {} + + h, w = frame.shape[:2] + + # Calculate tile size + tile_w = (w - gap * (cols - 1)) // cols + tile_h = (h - gap * (rows - 1)) // rows + + if tile_w <= 0 or tile_h <= 0: + return frame, state + + # Scale down the original frame to tile size + tile = cv2.resize(frame, (tile_w, tile_h), interpolation=cv2.INTER_LINEAR) + + # Create result with gap color + if isinstance(gap_color, (list, tuple)) and len(gap_color) >= 3: + result = np.full((h, w, 3), gap_color[:3], dtype=np.uint8) + else: + result = np.zeros((h, w, 3), dtype=np.uint8) + + # Place tiles + tile_idx = 0 + for row in range(rows): + for col in range(cols): + y = row * (tile_h + gap) + x = col * (tile_w + gap) + + current_tile = tile.copy() + + # Apply rotation if specified + if rotation_per_tile != 0: + angle = rotation_per_tile * tile_idx + center = (tile_w // 2, tile_h // 2) + M = cv2.getRotationMatrix2D(center, angle, 1.0) + current_tile = cv2.warpAffine(current_tile, M, (tile_w, tile_h), + borderMode=cv2.BORDER_REFLECT) + + # Apply flip for alternating tiles + if alternate_flip and (row + col) % 2 == 1: + current_tile = cv2.flip(current_tile, 1) + + # Place tile + y_end = min(y + tile_h, h) + x_end = min(x + tile_w, w) + tile_crop_h = y_end - y + tile_crop_w = x_end - x + result[y:y_end, x:x_end] = current_tile[:tile_crop_h, :tile_crop_w] + + tile_idx += 1 + + return result, state diff --git a/effects/tile_grid.sexp b/effects/tile_grid.sexp new file mode 100644 index 0000000..f368263 --- /dev/null +++ b/effects/tile_grid.sexp @@ -0,0 +1,21 @@ +;; Tile grid effect - repeat image in grid +;; Usage: python3 plan.py effects/tile_grid.sexp -p rows=2 -p cols=2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; rows: number of rows (1 to 8, default: 2) +;; cols: number of columns (1 to 8, default: 2) + +(recipe "tile_grid" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") + + ;; Default parameters (overridden by -p rows=N -p cols=N) + (def rows 2) + (def cols 2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect tile_grid :rows rows :cols cols))) + result) diff --git a/effects/trails.py b/effects/trails.py new file mode 100644 index 0000000..472d071 --- /dev/null +++ b/effects/trails.py @@ -0,0 +1,95 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect trails +@version 1.0.0 +@author artdag + +@description +Trails effect. Creates persistent motion trails by blending current +frame with previous frames. Like echo but with configurable blend. + +@param persistence float + @range 0 0.99 + @default 0.8 + How much of previous frame remains (0 = none, 0.99 = very long trails). + +@param blend_mode string + @enum blend add screen lighten darken + @default blend + How to combine frames. + +@param fade_color list + @default [0, 0, 0] + Color to fade toward. + +@state trail_buffer ndarray + Accumulated trail buffer. + +@example + (effect trails :persistence 0.85) + +@example + ;; Long bright trails + (effect trails :persistence 0.9 :blend_mode "add") +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply trails effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - persistence: trail length 0-0.99 (default 0.8) + - blend_mode: how to combine (default blend) + - fade_color: color to fade to (default black) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + persistence = max(0, min(params.get("persistence", 0.8), 0.99)) + blend_mode = params.get("blend_mode", "blend") + fade_color = params.get("fade_color", [0, 0, 0]) + + if state is None: + state = {} + + # Initialize trail buffer + if "trail_buffer" not in state or state["trail_buffer"].shape != frame.shape: + state["trail_buffer"] = frame.astype(np.float32) + + buffer = state["trail_buffer"] + current = frame.astype(np.float32) + + # Get fade color + if isinstance(fade_color, (list, tuple)) and len(fade_color) >= 3: + fade = np.array(fade_color[:3], dtype=np.float32) + else: + fade = np.array([0, 0, 0], dtype=np.float32) + + # Blend buffer toward fade color + faded_buffer = buffer * persistence + fade * (1 - persistence) + + # Combine with current frame based on blend mode + if blend_mode == "add": + result = faded_buffer + current + elif blend_mode == "screen": + result = 255 - ((255 - faded_buffer) * (255 - current) / 255) + elif blend_mode == "lighten": + result = np.maximum(faded_buffer, current) + elif blend_mode == "darken": + result = np.minimum(faded_buffer, current) + else: # blend + result = faded_buffer + current * (1 - persistence) + + # Update buffer + state["trail_buffer"] = np.clip(result, 0, 255) + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/trails.sexp b/effects/trails.sexp new file mode 100644 index 0000000..eaf1456 --- /dev/null +++ b/effects/trails.sexp @@ -0,0 +1,20 @@ +;; Trails effect - motion blur/persistence +;; Usage: python3 plan.py effects/trails.sexp -p persistence=0.7 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; persistence: trail strength (0.3 to 0.95, default: 0.7) +;; higher = longer trails + +(recipe "trails" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect trails :path "sexp_effects/effects/trails.sexp") + + ;; Default parameter (overridden by -p persistence=N) + (def persistence 0.7) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect trails :persistence persistence))) + result) diff --git a/effects/vhs.py b/effects/vhs.py new file mode 100644 index 0000000..a91ed21 --- /dev/null +++ b/effects/vhs.py @@ -0,0 +1,163 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect vhs +@version 1.0.0 +@author artdag + +@description +VHS / Analog Video effect. Complete VHS tape simulation combining +tracking errors, color bleeding, noise, and scan line distortion. + +@param tracking_error float + @range 0 50 + @default 5 + Horizontal displacement amount. Sync to onset for glitches. + +@param color_bleed int + @range 0 20 + @default 3 + Horizontal color smearing (typical VHS artifact). + +@param noise_intensity float + @range 0 1 + @default 0.2 + Static noise amount. + +@param chroma_shift int + @range 0 15 + @default 2 + Color channel offset (VHS color alignment issues). + +@param head_switching bool + @default true + Bottom-of-frame distortion. + +@param tape_crease_prob float + @range 0 0.5 + @default 0.05 + Probability of random tape crease distortion per frame. + +@param blur_amount float + @range 0 5 + @default 1 + VHS softness blur. + +@param seed int + @default 42 + Random seed for deterministic artifacts. + +@state rng DeterministicRNG + Random number generator for artifacts. + +@example + (effect vhs :tracking_error 10 :color_bleed 5) + +@example + ;; Reactive VHS glitch + (effect vhs :tracking_error (bind onset :range [0 30]) :tape_crease_prob 0.1) +""" + +import numpy as np +import cv2 +from pathlib import Path +import sys + +# Import DeterministicRNG from same directory +_effects_dir = Path(__file__).parent +if str(_effects_dir) not in sys.path: + sys.path.insert(0, str(_effects_dir)) +from random import DeterministicRNG + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply VHS effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + tracking_error = params.get("tracking_error", 5) + color_bleed = int(params.get("color_bleed", 3)) + noise_intensity = params.get("noise_intensity", 0.2) + chroma_shift = int(params.get("chroma_shift", 2)) + head_switching = params.get("head_switching", True) + tape_crease_prob = params.get("tape_crease_prob", 0.05) + blur_amount = params.get("blur_amount", 1) + seed = int(params.get("seed", 42)) + + if state is None: + state = {} + + # Initialize RNG + if "rng" not in state: + state["rng"] = DeterministicRNG(seed) + rng = state["rng"] + + h, w = frame.shape[:2] + result = frame.astype(np.float32).copy() + + # Apply slight blur (VHS softness) + if blur_amount > 0: + ksize = int(blur_amount * 2) * 2 + 1 + result = cv2.GaussianBlur(result, (ksize, 1), 0) + + # Apply color bleed (horizontal color smearing) + if color_bleed > 0: + ksize = color_bleed * 2 + 1 + result[:, :, 0] = cv2.blur(result[:, :, 0], (ksize, 1)) + result[:, :, 2] = cv2.blur(result[:, :, 2], (ksize, 1)) + + # Apply chroma shift (color channel misalignment) + if chroma_shift > 0: + shifted = np.zeros_like(result) + M_r = np.float32([[1, 0, chroma_shift], [0, 1, 0]]) + M_b = np.float32([[1, 0, -chroma_shift], [0, 1, 0]]) + shifted[:, :, 0] = cv2.warpAffine(result[:, :, 0], M_r, (w, h), borderMode=cv2.BORDER_REPLICATE) + shifted[:, :, 1] = result[:, :, 1] + shifted[:, :, 2] = cv2.warpAffine(result[:, :, 2], M_b, (w, h), borderMode=cv2.BORDER_REPLICATE) + result = shifted + + # Apply tracking error (horizontal line displacement) + if tracking_error > 0: + for y in range(h): + sine_shift = np.sin(y * 0.05) + rand_shift = rng.uniform(-0.3, 0.3) + displacement = int(tracking_error * (sine_shift + rand_shift)) + if displacement != 0: + result[y] = np.roll(result[y], displacement, axis=0) + + # Apply tape crease (random distortion bands) + if tape_crease_prob > 0 and rng.uniform() < tape_crease_prob: + band_start = rng.randint(0, max(1, h - 20)) + band_height = rng.randint(5, 20) + for y in range(band_start, min(band_start + band_height, h)): + displacement = rng.randint(-50, 50) + result[y] = np.roll(result[y], displacement, axis=0) + result[y] = result[y] * rng.uniform(0.5, 1.5) + + # Apply head switching noise (bottom of frame distortion) + if head_switching: + switch_height = rng.randint(5, 15) + for y in range(h - switch_height, h): + factor = (y - (h - switch_height)) / switch_height + displacement = int(factor * 30 * rng.uniform(0.5, 1.5)) + result[y] = np.roll(result[y], displacement, axis=0) + noise = np.array([[rng.gaussian(0, 20 * factor) for _ in range(3)] for _ in range(w)]) + result[y] = result[y] + noise + + # Apply static noise + if noise_intensity > 0: + noise = np.array([[[rng.gaussian(0, noise_intensity * 30) for _ in range(3)] + for _ in range(w)] + for _ in range(h)]) + result = result + noise + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/vignette.py b/effects/vignette.py new file mode 100644 index 0000000..dd7cdfa --- /dev/null +++ b/effects/vignette.py @@ -0,0 +1,116 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy"] +# /// +""" +@effect vignette +@version 1.0.0 +@author artdag + +@description +Vignette effect. Darkens the corners of the frame, drawing focus +to the center. Classic cinematic look. + +@param strength float + @range 0 1 + @default 0.5 + How dark the corners get (0 = none, 1 = black corners). + +@param radius float + @range 0.5 2 + @default 1.0 + Size of the bright center area. Smaller = more vignette. + +@param softness float + @range 0.1 1 + @default 0.5 + How gradual the falloff is. + +@param center_x float + @range 0 1 + @default 0.5 + Center X position. + +@param center_y float + @range 0 1 + @default 0.5 + Center Y position. + +@param color list + @default [0, 0, 0] + Vignette color (default black). + +@example + (effect vignette :strength 0.6) + +@example + ;; Off-center vignette + (effect vignette :center_x 0.3 :center_y 0.3 :strength 0.7) +""" + +import numpy as np + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply vignette effect to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - strength: darkness 0-1 (default 0.5) + - radius: center size 0.5-2 (default 1.0) + - softness: falloff gradient (default 0.5) + - center_x: center X 0-1 (default 0.5) + - center_y: center Y 0-1 (default 0.5) + - color: RGB tuple (default [0,0,0]) + state: Persistent state dict + + Returns: + Tuple of (processed_frame, new_state) + """ + strength = np.clip(params.get("strength", 0.5), 0, 1) + radius = max(0.5, min(params.get("radius", 1.0), 2)) + softness = max(0.1, min(params.get("softness", 0.5), 1)) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + color = params.get("color", [0, 0, 0]) + + if state is None: + state = {} + + if strength <= 0: + return frame, state + + h, w = frame.shape[:2] + + # Calculate center in pixels + cx = w * center_x + cy = h * center_y + + # Create distance map from center + y_coords, x_coords = np.ogrid[:h, :w] + dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2) + + # Normalize distance + max_dist = np.sqrt(cx**2 + cy**2) * radius + + # Create vignette mask + normalized_dist = dist / max_dist + + # Apply softness to the falloff + vignette_mask = 1 - np.clip((normalized_dist - (1 - softness)) / softness, 0, 1) * strength + + # Apply vignette + if isinstance(color, (list, tuple)) and len(color) >= 3: + vignette_color = np.array(color[:3], dtype=np.float32) + else: + vignette_color = np.array([0, 0, 0], dtype=np.float32) + + result = frame.astype(np.float32) + + # Blend toward vignette color based on mask + for c in range(3): + result[:, :, c] = result[:, :, c] * vignette_mask + vignette_color[c] * (1 - vignette_mask) + + return np.clip(result, 0, 255).astype(np.uint8), state diff --git a/effects/vignette.sexp b/effects/vignette.sexp new file mode 100644 index 0000000..123830f --- /dev/null +++ b/effects/vignette.sexp @@ -0,0 +1,19 @@ +;; Vignette effect - darken edges +;; Usage: python3 plan.py effects/vignette.sexp -p strength=0.6 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; strength: edge darkening amount (0.1 to 1.0, default: 0.6) + +(recipe "vignette" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect vignette :path "sexp_effects/effects/vignette.sexp") + + ;; Default parameter (overridden by -p strength=N) + (def strength 0.6) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect vignette :strength strength))) + result) diff --git a/effects/wave.py b/effects/wave.py new file mode 100644 index 0000000..dbd27f4 --- /dev/null +++ b/effects/wave.py @@ -0,0 +1,108 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect wave +@version 1.0.0 +@author artdag + +@description +Sine wave displacement distortion. Creates wavy, liquid-like warping. +Great for psychedelic and underwater effects synced to music. + +@param amplitude float + @range 0 100 + @default 10 + Wave height in pixels. Bind to bass for punchy distortion. + +@param wavelength float + @range 10 500 + @default 50 + Distance between wave peaks in pixels. + +@param speed float + @range 0 10 + @default 1 + Wave animation speed. Uses state to track phase over time. + +@param direction string + @enum horizontal vertical both + @default horizontal + Wave direction: + - horizontal: waves move left-right + - vertical: waves move up-down + - both: waves in both directions + +@state phase float + Tracks wave animation phase across frames. + +@example + (effect wave :amplitude 20 :wavelength 100) + +@example + ;; Bass-reactive waves + (effect wave :amplitude (bind bass :range [0 50] :transform sqrt)) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Apply wave distortion to a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - amplitude: wave height in pixels (default 10) + - wavelength: distance between peaks (default 50) + - speed: animation speed (default 1) + - direction: horizontal/vertical/both (default horizontal) + state: Persistent state dict + - phase: current wave phase + + Returns: + Tuple of (processed_frame, new_state) + """ + amplitude = params.get("amplitude", 10) + wavelength = params.get("wavelength", 50) + speed = params.get("speed", 1) + direction = params.get("direction", "horizontal") + + if state is None: + state = {} + + if amplitude == 0: + return frame, state + + h, w = frame.shape[:2] + + # Update phase for animation + phase = state.get("phase", 0) + phase += speed * 0.1 + state["phase"] = phase + + # Create coordinate maps + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + + if direction in ("horizontal", "both"): + # Horizontal waves: displace X based on Y + wave = np.sin(2 * np.pi * map_y / wavelength + phase) * amplitude + map_x = map_x + wave + + if direction in ("vertical", "both"): + # Vertical waves: displace Y based on X + wave = np.sin(2 * np.pi * map_x / wavelength + phase) * amplitude + map_y = map_y + wave + + # Apply distortion + result = cv2.remap( + frame, map_x, map_y, + cv2.INTER_LINEAR, + borderMode=cv2.BORDER_REFLECT + ) + + return result, state diff --git a/effects/wave.sexp b/effects/wave.sexp new file mode 100644 index 0000000..90a79d9 --- /dev/null +++ b/effects/wave.sexp @@ -0,0 +1,21 @@ +;; Wave effect - sinusoidal distortion +;; Usage: python3 plan.py effects/wave.sexp -p amplitude=30 -p wavelength=60 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; amplitude: wave height in pixels (5 to 100, default: 30) +;; wavelength: wave period in pixels (20 to 200, default: 60) + +(recipe "wave" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect wave :path "sexp_effects/effects/wave.sexp") + + ;; Default parameters (overridden by -p amplitude=N -p wavelength=N) + (def amplitude 30) + (def wavelength 60) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect wave :amplitude amplitude :wavelength wavelength))) + result) diff --git a/effects/zoom.py b/effects/zoom.py new file mode 100644 index 0000000..cc0d50c --- /dev/null +++ b/effects/zoom.py @@ -0,0 +1,91 @@ +# /// script +# requires-python = ">=3.10" +# dependencies = ["numpy", "opencv-python"] +# /// +""" +@effect zoom +@version 1.0.0 +@author artdag + +@description +Zooms into the center of the frame. Values > 1 zoom in (magnify), +values < 1 zoom out (shrink with black borders). + +@param factor float + @range 0.1 5 + @default 1.0 + Zoom factor. 1 = unchanged, 2 = 2x magnification, 0.5 = half size. + +@param center_x float + @range 0 1 + @default 0.5 + Horizontal center of zoom (0 = left, 1 = right). + +@param center_y float + @range 0 1 + @default 0.5 + Vertical center of zoom (0 = top, 1 = bottom). + +@example + (effect zoom :factor 1.5) + +@example + ;; Pulse zoom on bass + (effect zoom :factor (bind bass :range [1.0 1.5] :transform sqrt)) + +@example + ;; Zoom to corner + (effect zoom :factor 2 :center_x 0 :center_y 0) +""" + +import numpy as np +import cv2 + + +def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + """ + Zoom a video frame. + + Args: + frame: Input frame as numpy array (H, W, 3) RGB uint8 + params: Effect parameters + - factor: zoom multiplier (default 1.0) + - center_x: horizontal center 0-1 (default 0.5) + - center_y: vertical center 0-1 (default 0.5) + state: Persistent state dict (unused) + + Returns: + Tuple of (processed_frame, new_state) + """ + factor = params.get("factor", 1.0) + center_x = params.get("center_x", 0.5) + center_y = params.get("center_y", 0.5) + + if factor is None or factor <= 0.01: + factor = 1.0 + + if factor == 1.0: + return frame, state + + h, w = frame.shape[:2] + + # Calculate crop region for zoom in + new_w = int(w / factor) + new_h = int(h / factor) + + if new_w <= 0 or new_h <= 0: + return frame, state + + # Calculate crop offset based on center + x_start = int((w - new_w) * center_x) + y_start = int((h - new_h) * center_y) + + # Clamp to valid range + x_start = max(0, min(x_start, w - new_w)) + y_start = max(0, min(y_start, h - new_h)) + + # Crop and resize back to original dimensions + cropped = frame[y_start:y_start + new_h, x_start:x_start + new_w] + result = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_LINEAR) + + return result, state diff --git a/effects/zoom.sexp b/effects/zoom.sexp new file mode 100644 index 0000000..8bfdffd --- /dev/null +++ b/effects/zoom.sexp @@ -0,0 +1,20 @@ +;; Zoom effect - scale from center +;; Usage: python3 plan.py effects/zoom.sexp -p factor=1.2 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters: +;; factor: zoom multiplier (0.5 to 3.0, default: 1.2) +;; <1 = zoom out, >1 = zoom in + +(recipe "zoom" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect zoom :path "sexp_effects/effects/zoom.sexp") + + ;; Default parameter (overridden by -p factor=N) + (def factor 1.2) + + (def video (source :path "monday.webm")) + (def clip (-> video (segment :start 0 :duration 10))) + (def result (-> clip (effect zoom :factor factor))) + result) diff --git a/execute.py b/execute.py new file mode 100644 index 0000000..212d770 --- /dev/null +++ b/execute.py @@ -0,0 +1,1458 @@ +#!/usr/bin/env python3 +""" +Execute a pre-computed plan. + +Takes a plan file (S-expression) and executes primitive operations, +storing artifacts by their content hash. + +Usage: + analyze.py recipe.sexp > analysis.sexp + plan.py recipe.sexp --analysis analysis.sexp --sexp > plan.sexp + execute.py plan.sexp --analysis analysis.sexp +""" + +import json +import shutil +import subprocess +import sys +import tempfile +import importlib.util +from pathlib import Path +from typing import List + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import parse +from artdag.sexp.parser import Symbol, Keyword + + +def clean_nil_symbols(obj): + """Recursively convert Symbol('nil') to None and filter out None values from dicts.""" + if isinstance(obj, Symbol): + if obj.name == 'nil': + return None + return obj + elif isinstance(obj, dict): + result = {} + for k, v in obj.items(): + cleaned = clean_nil_symbols(v) + # Skip None values (they were nil) + if cleaned is not None: + result[k] = cleaned + return result + elif isinstance(obj, list): + return [clean_nil_symbols(v) for v in obj] + return obj + + +def parse_analysis_sexp(content: str) -> dict: + """Parse analysis S-expression into dict.""" + sexp = parse(content) + if isinstance(sexp, list) and len(sexp) == 1: + sexp = sexp[0] + + if not isinstance(sexp, list) or not sexp: + raise ValueError("Invalid analysis S-expression") + + # Should be (analysis (name ...) (name ...) ...) + if not isinstance(sexp[0], Symbol) or sexp[0].name != "analysis": + raise ValueError("Expected (analysis ...) S-expression") + + result = {} + for item in sexp[1:]: + if isinstance(item, list) and item and isinstance(item[0], Symbol): + name = item[0].name + data = {} + + i = 1 + while i < len(item): + if isinstance(item[i], Keyword): + key = item[i].name.replace("-", "_") + i += 1 + if i < len(item): + data[key] = item[i] + i += 1 + else: + i += 1 + + result[name] = data + + return result + + +def sexp_to_plan(sexp) -> dict: + """Convert a parsed S-expression plan to a dict.""" + if not isinstance(sexp, list) or not sexp: + raise ValueError("Invalid plan S-expression") + + # Skip 'plan' symbol and name + plan = { + "steps": [], + "analysis": {}, + } + + i = 0 + if isinstance(sexp[0], Symbol) and sexp[0].name == "plan": + i = 1 + if i < len(sexp) and isinstance(sexp[i], str): + plan["recipe_id"] = sexp[i] + i += 1 + + # Parse keywords and steps + while i < len(sexp): + item = sexp[i] + + if isinstance(item, Keyword): + key = item.name.replace("-", "_") + i += 1 + if i < len(sexp): + value = sexp[i] + if key == "encoding" and isinstance(value, list): + # Parse encoding dict from sexp + plan["encoding"] = sexp_to_dict(value) + elif key == "output": + # Map :output to output_step_id + plan["output_step_id"] = value + elif key == "id": + # Map :id to plan_id + plan["plan_id"] = value + elif key == "recipe": + # Map :recipe to recipe_id + plan["recipe_id"] = value + else: + plan[key] = value + i += 1 + elif isinstance(item, list) and item and isinstance(item[0], Symbol): + if item[0].name == "step": + # Parse step + step = parse_step_sexp(item) + plan["steps"].append(step) + elif item[0].name == "analysis": + # Parse analysis data + plan["analysis"] = parse_analysis_sexp(item) + i += 1 + else: + i += 1 + + return plan + + +def parse_analysis_sexp(sexp) -> dict: + """Parse analysis S-expression: (analysis (bass :times [...] :values [...]) ...)""" + analysis = {} + for item in sexp[1:]: # Skip 'analysis' symbol + if isinstance(item, list) and item and isinstance(item[0], Symbol): + name = item[0].name + data = {} + j = 1 + while j < len(item): + if isinstance(item[j], Keyword): + key = item[j].name + j += 1 + if j < len(item): + data[key] = item[j] + j += 1 + else: + j += 1 + analysis[name] = data + return analysis + + +def parse_bind_sexp(sexp) -> dict: + """Parse a bind S-expression: (bind analysis-ref :range [min max] :offset 60 :transform sqrt)""" + if not isinstance(sexp, list) or len(sexp) < 2: + return None + if not isinstance(sexp[0], Symbol) or sexp[0].name != "bind": + return None + + bind = { + "_bind": sexp[1] if isinstance(sexp[1], str) else sexp[1].name if isinstance(sexp[1], Symbol) else str(sexp[1]), + "range_min": 0.0, + "range_max": 1.0, + "transform": None, + "offset": 0.0, + } + + i = 2 + while i < len(sexp): + if isinstance(sexp[i], Keyword): + kw = sexp[i].name + if kw == "range": + i += 1 + if i < len(sexp) and isinstance(sexp[i], list) and len(sexp[i]) >= 2: + bind["range_min"] = float(sexp[i][0]) + bind["range_max"] = float(sexp[i][1]) + elif kw == "offset": + i += 1 + if i < len(sexp): + bind["offset"] = float(sexp[i]) + elif kw == "transform": + i += 1 + if i < len(sexp): + t = sexp[i] + if isinstance(t, Symbol): + bind["transform"] = t.name + elif isinstance(t, str): + bind["transform"] = t + i += 1 + + return bind + + +def sexp_to_dict(sexp) -> dict: + """Convert S-expression key-value pairs to dict.""" + result = {} + i = 0 + while i < len(sexp): + if isinstance(sexp[i], Keyword): + key = sexp[i].name.replace("-", "_") + i += 1 + if i < len(sexp): + value = sexp[i] + # Check for bind expression and convert to dict format + if isinstance(value, list) and value and isinstance(value[0], Symbol) and value[0].name == "bind": + value = parse_bind_sexp(value) + result[key] = value + i += 1 + else: + i += 1 + return result + + +def parse_step_sexp(sexp) -> dict: + """Parse a step S-expression. + + Supports two formats: + 1. (step "id" :cache-id "..." :type "SOURCE" :path "..." :inputs [...]) + 2. (step "id" :cache-id "..." :level 1 (source :path "..." :inputs [...])) + """ + step = { + "inputs": [], + "config": {}, + } + + i = 1 # Skip 'step' symbol + if i < len(sexp) and isinstance(sexp[i], str): + step["step_id"] = sexp[i] + i += 1 + + while i < len(sexp): + item = sexp[i] + + if isinstance(item, Keyword): + key = item.name.replace("-", "_") + i += 1 + if i < len(sexp): + value = sexp[i] + if key == "type": + step["node_type"] = value if isinstance(value, str) else value.name + elif key == "inputs": + step["inputs"] = value if isinstance(value, list) else [value] + elif key in ("level", "cache", "cache_id"): + if key == "cache": + key = "cache_id" + step[key] = value + else: + # Check for bind expression + if isinstance(value, list) and value and isinstance(value[0], Symbol) and value[0].name == "bind": + value = parse_bind_sexp(value) + # Config value + step["config"][key] = value + i += 1 + elif isinstance(item, list) and item and isinstance(item[0], Symbol): + # Nested node expression: (source :path "..." :inputs [...]) + node_type = item[0].name.upper() + step["node_type"] = node_type + + # Parse node config + j = 1 + while j < len(item): + if isinstance(item[j], Keyword): + key = item[j].name.replace("-", "_") + j += 1 + if j < len(item): + value = item[j] + if key == "inputs": + step["inputs"] = value if isinstance(value, list) else [value] + else: + # Check for bind expression + if isinstance(value, list) and value and isinstance(value[0], Symbol) and value[0].name == "bind": + value = parse_bind_sexp(value) + step["config"][key] = value + j += 1 + else: + j += 1 + i += 1 + else: + i += 1 + + return step + + +def parse_plan_input(content: str) -> dict: + """Parse plan from JSON or S-expression string.""" + content = content.strip() + if content.startswith("{"): + return json.loads(content) + elif content.startswith("("): + sexp = parse(content) + return sexp_to_plan(sexp[0] if isinstance(sexp, list) and len(sexp) == 1 else sexp) + else: + raise ValueError("Plan must be JSON (starting with '{') or S-expression (starting with '(')") + + +# Default encoding settings +DEFAULT_ENCODING = { + "codec": "libx264", + "preset": "fast", + "crf": 18, + "audio_codec": "aac", + "fps": 30, +} + + +def get_encoding(recipe_encoding: dict, step_config: dict) -> dict: + """Merge encoding settings: defaults < recipe < step overrides.""" + encoding = {**DEFAULT_ENCODING} + encoding.update(recipe_encoding) + if "encoding" in step_config: + encoding.update(step_config["encoding"]) + return encoding + + +class SexpEffectModule: + """Wrapper for S-expression effects to provide process_frame interface.""" + + def __init__(self, effect_path: Path): + from sexp_effects import get_interpreter + self.interp = get_interpreter() + self.interp.load_effect(str(effect_path)) + self.effect_name = effect_path.stem + + def process_frame(self, frame, params, state): + return self.interp.run_effect(self.effect_name, frame, params, state or {}) + + +def load_effect(effect_path: Path): + """Load an effect module from a local path (.py or .sexp).""" + if effect_path.suffix == ".sexp": + return SexpEffectModule(effect_path) + + spec = importlib.util.spec_from_file_location("effect", effect_path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def interpolate_analysis(times: list, values: list, t: float) -> float: + """Interpolate analysis value at time t.""" + if not times or not values: + return 0.0 + if t <= times[0]: + return values[0] + if t >= times[-1]: + return values[-1] + + # Binary search for surrounding times + lo, hi = 0, len(times) - 1 + while lo < hi - 1: + mid = (lo + hi) // 2 + if times[mid] <= t: + lo = mid + else: + hi = mid + + # Linear interpolation + t0, t1 = times[lo], times[hi] + v0, v1 = values[lo], values[hi] + if t1 == t0: + return v0 + alpha = (t - t0) / (t1 - t0) + return v0 + alpha * (v1 - v0) + + +def apply_transform(value: float, transform: str) -> float: + """Apply a transform function to a value (0-1 range).""" + if transform is None: + return value + if transform == "sqrt": + return value ** 0.5 + elif transform == "pow2": + return value ** 2 + elif transform == "pow3": + return value ** 3 + elif transform == "log": + # Logarithmic scale: log(1 + 9*x) / log(10) maps 0-1 to 0-1 with log curve + import math + return math.log(1 + 9 * value) / math.log(10) if value > 0 else 0 + elif transform == "exp": + # Exponential scale: (10^x - 1) / 9 maps 0-1 to 0-1 with exp curve + return (10 ** value - 1) / 9 + elif transform == "inv": + return 1 - value + else: + return value + + +def resolve_params(params: dict, frame_time: float, analysis_data: dict) -> dict: + """Resolve any binding params using analysis data at frame_time.""" + resolved = {} + for key, value in params.items(): + if isinstance(value, dict) and ("_bind" in value or "_binding" in value): + # This is a binding - resolve it + # Support both old format (_bind) and new format (_binding) + if "_bind" in value: + # Old format: {"_bind": "ref", "range_min": 0, "range_max": 1} + ref = value["_bind"] + range_min = value.get("range_min", 0.0) + range_max = value.get("range_max", 1.0) + else: + # New format from compiler: {"_binding": True, "source": "node_id", "feature": "values", "range": [min, max]} + ref = value.get("source", "") + range_val = value.get("range", [0.0, 1.0]) + range_min = range_val[0] if isinstance(range_val, list) else 0.0 + range_max = range_val[1] if isinstance(range_val, list) and len(range_val) > 1 else 1.0 + + transform = value.get("transform") + bind_offset = value.get("offset", 0.0) + + # Look up analysis track + track = analysis_data.get(ref, {}) + times = track.get("times", []) + values = track.get("values", []) + + # Interpolate raw value (0-1) - add binding offset to frame_time + lookup_time = frame_time + bind_offset + raw = interpolate_analysis(times, values, lookup_time) + + # Apply transform to raw value (before range scaling) + transformed = apply_transform(raw, transform) + + # Map to output range + resolved[key] = range_min + transformed * (range_max - range_min) + else: + resolved[key] = value + return resolved + + +def run_effect(effect_module, input_path: Path, output_path: Path, params: dict, encoding: dict, analysis_data: dict = None, time_offset: float = 0.0, max_duration: float = None): + """Run an effect on a video file. + + Args: + time_offset: Time offset in seconds for resolving bindings (e.g., segment start time in audio) + max_duration: Maximum duration in seconds to process (stops after this many seconds of frames) + """ + import numpy as np + + # Clean nil Symbols from params + params = clean_nil_symbols(params) + + # Get video info + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + # Find video stream + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + + if not video_stream: + raise ValueError("No video stream found") + + in_width = int(video_stream["width"]) + in_height = int(video_stream["height"]) + + # Get framerate + fps_str = video_stream.get("r_frame_rate", "30/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + # Read frames with ffmpeg + read_cmd = [ + "ffmpeg", "-i", str(input_path), + "-f", "rawvideo", "-pix_fmt", "rgb24", "-" + ] + read_proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Check if we have any bindings that need per-frame resolution + has_bindings = any(isinstance(v, dict) and ("_bind" in v or "_binding" in v) for v in params.values()) + analysis_data = analysis_data or {} + + # Debug: print bindings and analysis info once + if has_bindings: + print(f" BINDINGS DEBUG: time_offset={time_offset:.2f}", file=sys.stderr) + for k, v in params.items(): + if isinstance(v, dict) and ("_bind" in v or "_binding" in v): + ref = v.get("_bind") or v.get("source") + bind_offset = float(v.get("offset", 0.0)) + track = analysis_data.get(ref, {}) + times = track.get("times", []) + values = track.get("values", []) + if times and values: + # Find first non-zero value + first_nonzero_idx = next((i for i, v in enumerate(values) if v > 0.01), -1) + first_nonzero_time = times[first_nonzero_idx] if first_nonzero_idx >= 0 else -1 + print(f" param {k}: ref='{ref}' bind_offset={bind_offset} time_range=[{min(times):.2f}, {max(times):.2f}]", file=sys.stderr) + print(f" first_nonzero at t={first_nonzero_time:.2f} max_value={max(values):.4f}", file=sys.stderr) + else: + raise ValueError(f"Binding for param '{k}' references '{ref}' but no analysis data found. Available: {list(analysis_data.keys())}") + + # Process first frame to detect output dimensions + in_frame_size = in_width * in_height * 3 + frame_data = read_proc.stdout.read(in_frame_size) + if len(frame_data) < in_frame_size: + read_proc.stdout.close() + read_proc.wait() + raise ValueError("No frames in input video") + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((in_height, in_width, 3)) + + # Resolve params for first frame + if has_bindings: + frame_params = resolve_params(params, time_offset, analysis_data) + else: + frame_params = params + + state = None + processed, state = effect_module.process_frame(frame, frame_params, state) + + # Get output dimensions from processed frame + out_height, out_width = processed.shape[:2] + if out_width != in_width or out_height != in_height: + print(f" Effect resizes: {in_width}x{in_height} -> {out_width}x{out_height}", file=sys.stderr) + + # Now start write process with correct output dimensions + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{out_width}x{out_height}", "-r", str(encoding.get("fps", 30)), + "-i", "-", + "-i", str(input_path), # For audio + "-map", "0:v", "-map", "1:a?", + "-c:v", encoding["codec"], "-preset", encoding["preset"], "-crf", str(encoding["crf"]), + "-pix_fmt", "yuv420p", + "-c:a", encoding["audio_codec"], + str(output_path) + ] + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Write first processed frame + write_proc.stdin.write(processed.tobytes()) + frame_count = 1 + + # Calculate max frames if duration limit specified + max_frames = None + if max_duration: + max_frames = int(max_duration * fps) + + # Process remaining frames + while True: + # Stop if we've reached the frame limit + if max_frames and frame_count >= max_frames: + break + + frame_data = read_proc.stdout.read(in_frame_size) + if len(frame_data) < in_frame_size: + break + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((in_height, in_width, 3)) + + # Resolve params for this frame + if has_bindings: + frame_time = time_offset + frame_count / fps + frame_params = resolve_params(params, frame_time, analysis_data) + else: + frame_params = params + + processed, state = effect_module.process_frame(frame, frame_params, state) + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + + if frame_count % 30 == 0: + print(f" Processed {frame_count} frames...", end="\r", file=sys.stderr) + + read_proc.stdout.close() + write_proc.stdin.close() + read_proc.wait() + write_proc.wait() + + print(f" Processed {frame_count} frames total", file=sys.stderr) + + +def run_multi_effect(effect_module, input_paths: List[Path], output_path: Path, params: dict, encoding: dict, analysis_data: dict = None, time_offset: float = 0.0, max_duration: float = None): + """Run a multi-input effect on multiple video files. + + Args: + time_offset: Time offset in seconds for resolving bindings (e.g., segment start time in audio) + max_duration: Maximum duration in seconds to process (stops after this many seconds of frames) + """ + import numpy as np + + # Clean nil Symbols from params + params = clean_nil_symbols(params) + + if len(input_paths) < 2: + raise ValueError("Multi-input effect requires at least 2 inputs") + + # Get video info for each input (preserve original dimensions) + input_infos = [] + for input_path in input_paths: + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + + if not video_stream: + raise ValueError(f"No video stream found in {input_path}") + + w = int(video_stream["width"]) + h = int(video_stream["height"]) + input_infos.append({"width": w, "height": h, "path": input_path}) + print(f" Input: {input_path.name} ({w}x{h})", file=sys.stderr) + + # Get framerate from first input + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(input_paths[0]) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + video_stream = next(s for s in probe_data.get("streams", []) if s.get("codec_type") == "video") + fps_str = video_stream.get("r_frame_rate", "30/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + # Open read processes for all inputs - preserve original dimensions + read_procs = [] + for info in input_infos: + read_cmd = [ + "ffmpeg", "-i", str(info["path"]), + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-" # Don't scale - keep original dimensions + ] + proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + read_procs.append(proc) + + analysis_data = analysis_data or {} + state = None + + # Process first frame to detect output dimensions + frames = [] + for i, (proc, info) in enumerate(zip(read_procs, input_infos)): + frame_size = info["width"] * info["height"] * 3 + frame_data = proc.stdout.read(frame_size) + if len(frame_data) < frame_size: + # Cleanup + for p in read_procs: + p.stdout.close() + p.wait() + raise ValueError(f"No frames in input {i}") + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((info["height"], info["width"], 3)) + frames.append(frame) + + # Check if we have any bindings that need per-frame resolution + has_bindings = any(isinstance(v, dict) and ("_bind" in v or "_binding" in v) for v in params.values()) + + # Resolve params for first frame + if has_bindings: + frame_params = resolve_params(params, time_offset, analysis_data) + else: + frame_params = params + + processed, state = effect_module.process_frame(frames, frame_params, state) + out_height, out_width = processed.shape[:2] + print(f" Output dimensions: {out_width}x{out_height}", file=sys.stderr) + + # Now start write process with correct output dimensions + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{out_width}x{out_height}", "-r", str(encoding.get("fps", 30)), + "-i", "-", + "-i", str(input_paths[0]), # For audio from first input + "-map", "0:v", "-map", "1:a?", + "-c:v", encoding["codec"], "-preset", encoding["preset"], "-crf", str(encoding["crf"]), + "-pix_fmt", "yuv420p", + "-c:a", encoding["audio_codec"], + str(output_path) + ] + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.DEVNULL) + + # Write first processed frame + write_proc.stdin.write(processed.tobytes()) + frame_count = 1 + + # Calculate max frames if duration limit specified + max_frames = None + if max_duration: + max_frames = int(max_duration * fps) + + # Process remaining frames + while True: + # Stop if we've reached the frame limit + if max_frames and frame_count >= max_frames: + break + + # Read frame from each input (each may have different dimensions) + frames = [] + all_valid = True + for i, (proc, info) in enumerate(zip(read_procs, input_infos)): + frame_size = info["width"] * info["height"] * 3 + frame_data = proc.stdout.read(frame_size) + if len(frame_data) < frame_size: + all_valid = False + break + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((info["height"], info["width"], 3)) + frames.append(frame) + + if not all_valid: + break + + # Resolve params for this frame + if has_bindings: + frame_time = time_offset + frame_count / fps + frame_params = resolve_params(params, frame_time, analysis_data) + else: + frame_params = params + + # Pass list of frames to effect + processed, state = effect_module.process_frame(frames, frame_params, state) + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + + if frame_count % 30 == 0: + print(f" Processed {frame_count} frames...", end="\r", file=sys.stderr) + + # Cleanup + for proc in read_procs: + proc.stdout.close() + proc.wait() + write_proc.stdin.close() + write_proc.wait() + + print(f" Processed {frame_count} frames total", file=sys.stderr) + + +def get_video_dimensions(file_path: Path) -> tuple: + """Get video dimensions using ffprobe.""" + cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(file_path) + ] + result = subprocess.run(cmd, capture_output=True, text=True) + data = json.loads(result.stdout) + + for stream in data.get("streams", []): + if stream.get("codec_type") == "video": + return int(stream["width"]), int(stream["height"]) + + return None, None + + +def normalize_video( + input_path: Path, + output_path: Path, + target_width: int, + target_height: int, + resize_mode: str, + priority: str = None, + pad_color: str = "black", + crop_gravity: str = "center", + encoding: dict = None, +) -> Path: + """ + Normalize video to target dimensions. + + resize_mode: + - stretch: force to exact size (distorts) + - crop: scale to fill, crop overflow + - fit: scale to fit, pad remainder + - cover: scale to cover, crop minimally + + priority: width | height (which dimension to match exactly for fit/crop) + """ + enc = encoding or {} + src_width, src_height = get_video_dimensions(input_path) + + if src_width is None: + # Can't determine dimensions, just copy + shutil.copy(input_path, output_path) + return output_path + + # Already correct size? + if src_width == target_width and src_height == target_height: + shutil.copy(input_path, output_path) + return output_path + + src_aspect = src_width / src_height + target_aspect = target_width / target_height + + if resize_mode == "stretch": + # Force exact size + vf = f"scale={target_width}:{target_height}" + + elif resize_mode == "fit": + # Scale to fit within bounds, pad remainder + if priority == "width": + # Match width exactly, pad height + vf = f"scale={target_width}:-1,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + elif priority == "height": + # Match height exactly, pad width + vf = f"scale=-1:{target_height},pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + else: + # Auto: fit within bounds (may pad both) + if src_aspect > target_aspect: + # Source is wider, fit to width + vf = f"scale={target_width}:-1,pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + else: + # Source is taller, fit to height + vf = f"scale=-1:{target_height},pad={target_width}:{target_height}:(ow-iw)/2:(oh-ih)/2:{pad_color}" + + elif resize_mode == "crop": + # Scale to fill, crop overflow + if priority == "width": + # Match width, crop height + vf = f"scale={target_width}:-1,crop={target_width}:{target_height}" + elif priority == "height": + # Match height, crop width + vf = f"scale=-1:{target_height},crop={target_width}:{target_height}" + else: + # Auto: fill bounds, crop minimally + if src_aspect > target_aspect: + # Source is wider, match height and crop width + vf = f"scale=-1:{target_height},crop={target_width}:{target_height}" + else: + # Source is taller, match width and crop height + vf = f"scale={target_width}:-1,crop={target_width}:{target_height}" + + elif resize_mode == "cover": + # Scale to cover target, crop to exact size + if src_aspect > target_aspect: + vf = f"scale=-1:{target_height},crop={target_width}:{target_height}" + else: + vf = f"scale={target_width}:-1,crop={target_width}:{target_height}" + + else: + # Unknown mode, just copy + shutil.copy(input_path, output_path) + return output_path + + cmd = [ + "ffmpeg", "-y", "-i", str(input_path), + "-vf", vf, + "-r", str(enc.get("fps", 30)), # Normalize framerate for concat compatibility + "-c:v", enc.get("codec", "libx264"), + "-preset", enc.get("preset", "fast"), + "-crf", str(enc.get("crf", 18)), + "-pix_fmt", "yuv420p", # Normalize pixel format for concat compatibility + "-c:a", enc.get("audio_codec", "aac"), + str(output_path) + ] + subprocess.run(cmd, check=True, capture_output=True) + return output_path + + +def tree_concat(files: list, work_dir: Path, prefix: str = "concat") -> Path: + """Concatenate files using a binary tree approach.""" + if len(files) == 1: + return files[0] + + level = 0 + current_files = list(files) + print(f" Tree concat: {len(current_files)} files", file=sys.stderr) + for i, f in enumerate(current_files): + print(f" [{i}] {f}", file=sys.stderr) + + while len(current_files) > 1: + next_files = [] + pairs = (len(current_files) + 1) // 2 + print(f" Level {level}: {len(current_files)} -> {pairs} pairs", file=sys.stderr) + + for i in range(0, len(current_files), 2): + if i + 1 < len(current_files): + concat_file = work_dir / f"{prefix}_L{level}_{i}.txt" + output_file = work_dir / f"{prefix}_L{level}_{i}.mp4" + + with open(concat_file, "w") as f: + f.write(f"file '{current_files[i]}'\n") + f.write(f"file '{current_files[i+1]}'\n") + + cmd = ["ffmpeg", "-y", "-f", "concat", "-safe", "0", + "-i", str(concat_file), "-c", "copy", str(output_file)] + subprocess.run(cmd, capture_output=True) + next_files.append(output_file) + else: + next_files.append(current_files[i]) + + current_files = next_files + level += 1 + + return current_files[0] + + +def execute_plan(plan_path: Path = None, output_path: Path = None, recipe_dir: Path = None, plan_data: dict = None, external_analysis: dict = None): + """Execute a plan file (S-expression) or plan dict.""" + + # Load plan from file, stdin, or dict + if plan_data: + plan = plan_data + elif plan_path and str(plan_path) != "-": + content = plan_path.read_text() + plan = parse_plan_input(content) + else: + # Read from stdin + content = sys.stdin.read() + plan = parse_plan_input(content) + + print(f"Executing plan: {plan['plan_id'][:16]}...", file=sys.stderr) + print(f"Recipe: {plan['recipe_id']}", file=sys.stderr) + print(f"Steps: {len(plan['steps'])}", file=sys.stderr) + + recipe_encoding = plan.get("encoding", {}) + + # Use external analysis if provided, otherwise fall back to plan's embedded analysis + analysis_data = external_analysis or plan.get("analysis", {}) + if recipe_dir is None: + recipe_dir = plan_path.parent if plan_path else Path(".") + + if analysis_data: + print(f"Analysis tracks: {list(analysis_data.keys())}", file=sys.stderr) + + # Execute steps + results = {} # step_id -> output_path + work_dir = Path(tempfile.mkdtemp(prefix="artdag_exec_")) + + # Sort steps: SOURCE first, then by level, but ANALYZE before COMPOUND/EFFECT at any level + # This ensures analysis data is available for binding resolution + steps = plan["steps"] + def step_sort_key(s): + node_type = s.get("node_type") or "UNKNOWN" + # Handle node_type being a Symbol + if hasattr(node_type, 'name'): + node_type = node_type.name + level = s.get("level", 0) + # Ensure level is an int (could be Symbol or None) + if not isinstance(level, int): + level = 0 + # Priority: SOURCE=0, SEGMENT=1, ANALYZE=2, others=3 + if node_type == "SOURCE": + type_priority = 0 + elif node_type == "SEGMENT": + type_priority = 1 + elif node_type == "ANALYZE": + type_priority = 2 + else: + type_priority = 3 + return (type_priority, level) + ordered_steps = sorted(steps, key=step_sort_key) + + try: + for step in ordered_steps: + step_id = step["step_id"] + node_type = step["node_type"] + config = step["config"] + inputs = step.get("inputs", []) + + print(f"\n[{step.get('level', 0)}] {node_type}: {step_id[:16]}...", file=sys.stderr) + + if node_type == "SOURCE": + if "path" in config: + src_path = (recipe_dir / config["path"]).resolve() + if not src_path.exists(): + raise FileNotFoundError(f"Source not found: {src_path}") + results[step_id] = src_path + print(f" -> {src_path}", file=sys.stderr) + + elif node_type == "SEGMENT": + input_path = results[inputs[0]] + start = config.get("start", 0) + duration = config.get("duration") + end = config.get("end") + + is_audio = str(input_path).lower().endswith( + ('.mp3', '.wav', '.flac', '.aac', '.ogg', '.m4a')) + enc = get_encoding(recipe_encoding, config) + + if is_audio: + output_file = work_dir / f"segment_{step_id}.m4a" + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + cmd.extend(["-c:a", enc["audio_codec"], str(output_file)]) + else: + output_file = work_dir / f"segment_{step_id}.mp4" + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + elif end: + cmd.extend(["-t", str(end - start)]) + cmd.extend(["-r", str(enc["fps"]), # Normalize frame rate + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + str(output_file)]) + + result = subprocess.run(cmd, capture_output=True, text=True) + + # Check if segment has video content, if not try with looping + needs_loop = False + if not is_audio and result.returncode == 0: + probe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(output_file)] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + has_video = any(s.get("codec_type") == "video" for s in probe_data.get("streams", [])) + if not has_video: + needs_loop = True + + if needs_loop or result.returncode != 0: + # Get source duration and loop the input + probe_cmd = ["ffprobe", "-v", "quiet", "-print_format", "json", + "-show_format", str(input_path)] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + src_duration = float(probe_data.get("format", {}).get("duration", 0)) + + if src_duration > 0: + # Wrap start time to source duration + wrapped_start = start % src_duration if start else 0 + seg_duration = duration if duration else (end - start if end else None) + + print(f" Wrapping segment: {start:.2f}s -> {wrapped_start:.2f}s (source={src_duration:.2f}s)", file=sys.stderr) + + # Use stream_loop for seamless looping if segment spans wrap point + if wrapped_start + (seg_duration or 0) > src_duration: + # Need to loop - use concat filter + cmd = ["ffmpeg", "-y", "-stream_loop", "-1", "-i", str(input_path)] + cmd.extend(["-ss", str(wrapped_start)]) + if seg_duration: + cmd.extend(["-t", str(seg_duration)]) + cmd.extend(["-r", str(enc["fps"]), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + str(output_file)]) + else: + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + cmd.extend(["-ss", str(wrapped_start)]) + if seg_duration: + cmd.extend(["-t", str(seg_duration)]) + cmd.extend(["-r", str(enc["fps"]), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + str(output_file)]) + + subprocess.run(cmd, check=True, capture_output=True) + else: + raise ValueError(f"Cannot determine source duration for looping") + + results[step_id] = output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "EFFECT": + effect_name = config.get("effect", "unknown") + effect_path = config.get("effect_path") + is_multi_input = config.get("multi_input", False) + + output_file = work_dir / f"effect_{step_id}.mp4" + enc = get_encoding(recipe_encoding, config) + + if effect_path: + full_path = recipe_dir / effect_path + effect_module = load_effect(full_path) + params = {k: v for k, v in config.items() + if k not in ("effect", "effect_path", "cid", "encoding", "multi_input")} + print(f" Effect: {effect_name}", file=sys.stderr) + + # Get timing offset and duration for bindings + effect_time_offset = config.get("start", config.get("segment_start", 0)) + effect_duration = config.get("duration") + + if is_multi_input and len(inputs) > 1: + # Multi-input effect (blend, layer, etc.) + input_paths = [results[inp] for inp in inputs] + run_multi_effect(effect_module, input_paths, output_file, params, enc, analysis_data, time_offset=effect_time_offset, max_duration=effect_duration) + else: + # Single-input effect + input_path = results[inputs[0]] + run_effect(effect_module, input_path, output_file, params, enc, analysis_data, time_offset=effect_time_offset, max_duration=effect_duration) + else: + input_path = results[inputs[0]] + shutil.copy(input_path, output_file) + + results[step_id] = output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "SEQUENCE": + if len(inputs) < 2: + results[step_id] = results[inputs[0]] + continue + + input_files = [results[inp] for inp in inputs] + enc = get_encoding(recipe_encoding, config) + + # Check for normalization config + resize_mode = config.get("resize_mode") + if resize_mode: + # Determine target dimensions + target_width = config.get("target_width") or enc.get("width") + target_height = config.get("target_height") or enc.get("height") + + # If no explicit target, use first input's dimensions + if not target_width or not target_height: + first_w, first_h = get_video_dimensions(input_files[0]) + target_width = target_width or first_w + target_height = target_height or first_h + + if target_width and target_height: + print(f" Normalizing {len(input_files)} inputs to {target_width}x{target_height} ({resize_mode})", file=sys.stderr) + normalized_files = [] + for i, inp_file in enumerate(input_files): + norm_file = work_dir / f"norm_{step_id[:8]}_{i:04d}.mp4" + normalize_video( + inp_file, norm_file, + target_width, target_height, + resize_mode, + priority=config.get("priority"), + pad_color=config.get("pad_color", "black"), + crop_gravity=config.get("crop_gravity", "center"), + encoding=enc, + ) + normalized_files.append(norm_file) + input_files = normalized_files + + # Use tree concat for efficiency + output_file = tree_concat(input_files, work_dir, f"seq_{step_id[:8]}") + results[step_id] = output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "MUX": + video_path = results[inputs[0]] + audio_path = results[inputs[1]] + enc = get_encoding(recipe_encoding, config) + + output_file = work_dir / f"mux_{step_id}.mp4" + cmd = ["ffmpeg", "-y", + "-i", str(video_path), "-i", str(audio_path), + "-map", "0:v", "-map", "1:a", + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-c:a", enc["audio_codec"], + "-shortest", str(output_file)] + + subprocess.run(cmd, check=True, capture_output=True) + results[step_id] = output_file + print(f" -> {output_file}", file=sys.stderr) + + elif node_type == "ANALYZE": + output_file = work_dir / f"analysis_{step_id}.json" + + if "analysis_results" in config: + # Analysis was done during planning + with open(output_file, "w") as f: + json.dump(config["analysis_results"], f) + analysis_data[step_id] = config["analysis_results"] + print(f" -> {output_file} (from plan)", file=sys.stderr) + else: + # Run analyzer now + analyzer_path = config.get("analyzer_path") + if analyzer_path: + analyzer_path = (recipe_dir / analyzer_path).resolve() + input_path = results[inputs[0]] + + # Load and run analyzer + import importlib.util + spec = importlib.util.spec_from_file_location("analyzer", analyzer_path) + analyzer_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(analyzer_module) + + # Run analysis + analyzer_params = {k: v for k, v in config.items() + if k not in ("analyzer", "analyzer_path", "cid")} + analysis_result = analyzer_module.analyze(input_path, analyzer_params) + + # Save and store results + with open(output_file, "w") as f: + json.dump(analysis_result, f) + analysis_data[step_id] = analysis_result + print(f" -> {output_file} (ran analyzer: {len(analysis_result.get('times', []))} pts)", file=sys.stderr) + else: + print(f" -> no analyzer path!", file=sys.stderr) + + results[step_id] = output_file + + elif node_type == "COMPOUND": + # Collapsed effect chains - compile to single FFmpeg command with sendcmd + filter_chain_raw = config.get("filter_chain", []) + if not filter_chain_raw: + raise ValueError("COMPOUND step has empty filter_chain") + + # Convert filter_chain items from S-expression lists to dicts + # and clean nil Symbols from configs + filter_chain = [] + for item in filter_chain_raw: + if isinstance(item, dict): + # Clean nil Symbols from the config + cleaned_item = clean_nil_symbols(item) + filter_chain.append(cleaned_item) + elif isinstance(item, list) and item: + item_dict = sexp_to_dict(item) + ftype = item_dict.get("type", "UNKNOWN") + if isinstance(ftype, Symbol): + ftype = ftype.name + fconfig_raw = item_dict.get("config", {}) + if isinstance(fconfig_raw, list): + fconfig = sexp_to_dict(fconfig_raw) + elif isinstance(fconfig_raw, dict): + fconfig = fconfig_raw + else: + fconfig = {} + # Clean nil Symbols from config + fconfig = clean_nil_symbols(fconfig) + filter_chain.append({"type": ftype, "config": fconfig}) + else: + filter_chain.append({"type": "UNKNOWN", "config": {}}) + + input_path = results[inputs[0]] + # Debug: verify input exists and has content + if not input_path.exists(): + raise ValueError(f"COMPOUND input does not exist: {input_path}") + if input_path.stat().st_size == 0: + raise ValueError(f"COMPOUND input is empty: {input_path}") + print(f" COMPOUND input: {input_path} ({input_path.stat().st_size} bytes)", file=sys.stderr) + enc = get_encoding(recipe_encoding, config) + output_file = work_dir / f"compound_{step_id}.mp4" + + # Extract segment timing and effects + segment_start = 0 + segment_duration = None + effects = [] + + for filter_item in filter_chain: + filter_type = filter_item.get("type", "") + filter_config = filter_item.get("config", {}) + + if filter_type == "SEGMENT": + segment_start = filter_config.get("start", 0) + segment_duration = filter_config.get("duration") + if not segment_duration and filter_config.get("end"): + segment_duration = filter_config["end"] - segment_start + elif filter_type == "EFFECT": + effects.append(filter_config) + + # Try to compile effects to FFmpeg filters + from artdag.sexp.ffmpeg_compiler import FFmpegCompiler, generate_sendcmd_filter + compiler = FFmpegCompiler() + + # Check if any effect has bindings - these need Python path for per-frame resolution + def has_bindings(effect_config): + for k, v in effect_config.items(): + if isinstance(v, dict) and ("_bind" in v or "_binding" in v): + return True + return False + + any_has_bindings = any(has_bindings(e) for e in effects) + + # Check if all effects have FFmpeg mappings + all_have_mappings = all( + compiler.get_mapping(e.get("effect", "")) is not None + for e in effects + ) + + # Use FFmpeg only for static effects (no bindings) + # Effects with bindings use Python path for proper per-frame binding resolution + if all_have_mappings and effects and not any_has_bindings: + # Compile to FFmpeg with sendcmd for dynamic params + ffmpeg_filters, sendcmd_path = generate_sendcmd_filter( + effects, + analysis_data, + segment_start, + segment_duration or 1.0, + ) + + # Build FFmpeg command + cmd = ["ffmpeg", "-y", "-i", str(input_path)] + if segment_start: + cmd.extend(["-ss", str(segment_start)]) + if segment_duration: + cmd.extend(["-t", str(segment_duration)]) + + if ffmpeg_filters: + cmd.extend(["-vf", ffmpeg_filters]) + + cmd.extend(["-r", str(enc.get("fps", 30)), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-pix_fmt", "yuv420p", + "-c:a", enc["audio_codec"], + str(output_file)]) + + effect_names = [e.get("effect", "?") for e in effects] + print(f" COMPOUND (FFmpeg): {', '.join(effect_names)}", file=sys.stderr) + print(f" filters: {ffmpeg_filters[:80]}{'...' if len(ffmpeg_filters) > 80 else ''}", file=sys.stderr) + + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + print(f" FFmpeg error: {result.stderr[:200]}", file=sys.stderr) + raise RuntimeError(f"FFmpeg failed: {result.stderr}") + + # Clean up sendcmd file + if sendcmd_path and sendcmd_path.exists(): + sendcmd_path.unlink() + else: + # Fall back to sequential processing for effects without FFmpeg mappings + current_input = input_path + + # First handle segment + for filter_item in filter_chain: + if filter_item.get("type") == "SEGMENT": + filter_config = filter_item.get("config", {}) + start = filter_config.get("start", 0) + duration = filter_config.get("duration") + + if start or duration: + seg_output = work_dir / f"compound_{step_id}_seg.mp4" + cmd = ["ffmpeg", "-y", "-i", str(current_input)] + if start: + cmd.extend(["-ss", str(start)]) + if duration: + cmd.extend(["-t", str(duration)]) + cmd.extend(["-r", str(enc.get("fps", 30)), + "-c:v", enc["codec"], "-preset", enc["preset"], + "-crf", str(enc["crf"]), "-pix_fmt", "yuv420p", + "-c:a", enc["audio_codec"], + str(seg_output)]) + print(f" Extracting segment: start={start}, duration={duration}", file=sys.stderr) + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode != 0: + print(f" FFmpeg segment error: {result.stderr}", file=sys.stderr) + raise ValueError(f"FFmpeg segment extraction failed: {result.stderr}") + if not seg_output.exists() or seg_output.stat().st_size == 0: + raise ValueError(f"Segment output invalid: {seg_output}") + print(f" Segment output: {seg_output.stat().st_size} bytes", file=sys.stderr) + current_input = seg_output + break + + # Then handle effects sequentially + for i, effect_config in enumerate(effects): + effect_name = effect_config.get("effect", "unknown") + effect_path = effect_config.get("effect_path") + + # Try to resolve effect path + if not effect_path: + for effects_dir in ["effects", "sexp_effects/effects"]: + for ext in [".py", ".sexp"]: + candidate = recipe_dir / effects_dir / f"{effect_name}{ext}" + if candidate.exists(): + effect_path = str(candidate.relative_to(recipe_dir)) + break + if effect_path: + break + + is_last = (i == len(effects) - 1) + effect_output = output_file if is_last else work_dir / f"compound_{step_id}_fx_{i:02d}.mp4" + + if effect_path: + full_path = recipe_dir / effect_path + effect_module = load_effect(full_path) + params = {k: v for k, v in effect_config.items() + if k not in ("effect", "effect_path", "cid", "encoding", "type")} + print(f" COMPOUND [{i+1}/{len(effects)}]: {effect_name} (Python)", file=sys.stderr) + # Debug: check input file + if not current_input.exists(): + raise ValueError(f"Input file does not exist: {current_input}") + input_size = current_input.stat().st_size + print(f" Input: {current_input.name} ({input_size} bytes)", file=sys.stderr) + if input_size == 0: + raise ValueError(f"Input file is empty: {current_input}") + run_effect(effect_module, current_input, effect_output, params, enc, analysis_data, time_offset=segment_start, max_duration=segment_duration) + else: + raise ValueError(f"COMPOUND EFFECT '{effect_name}' has no effect_path or FFmpeg mapping") + + current_input = effect_output + + results[step_id] = output_file + print(f" -> {output_file}", file=sys.stderr) + + else: + raise ValueError(f"Unknown node type: {node_type}") + + # Get final output + final_output = results[plan["output_step_id"]] + print(f"\n--- Output ---", file=sys.stderr) + print(f"Final: {final_output}", file=sys.stderr) + + if output_path: + # Handle stdout specially - remux to streamable format + if str(output_path) in ("/dev/stdout", "-"): + # MP4 isn't streamable, use matroska which is + cmd = [ + "ffmpeg", "-y", "-i", str(final_output), + "-c", "copy", "-f", "matroska", "pipe:1" + ] + subprocess.run(cmd, stdout=sys.stdout.buffer, stderr=subprocess.DEVNULL) + return output_path + else: + shutil.copy(final_output, output_path) + print(f"Copied to: {output_path}", file=sys.stderr) + # Print path to stdout for piping + print(output_path) + return output_path + else: + out = recipe_dir / f"{plan['recipe_id']}-output.mp4" + shutil.copy(final_output, out) + print(f"Copied to: {out}", file=sys.stderr) + # Print path to stdout for piping + print(out) + return out + + finally: + print(f"Debug: temp files in {work_dir}", file=sys.stderr) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Execute a plan") + parser.add_argument("plan", nargs="?", default="-", help="Plan file (- for stdin)") + parser.add_argument("-o", "--output", type=Path, help="Output file") + parser.add_argument("-d", "--dir", type=Path, default=Path("."), help="Recipe directory for resolving paths") + parser.add_argument("-a", "--analysis", type=Path, help="Analysis file (.sexp)") + + args = parser.parse_args() + + plan_path = None if args.plan == "-" else Path(args.plan) + if plan_path and not plan_path.exists(): + print(f"Plan not found: {plan_path}") + sys.exit(1) + + # Load external analysis if provided + external_analysis = None + if args.analysis: + if not args.analysis.exists(): + print(f"Analysis file not found: {args.analysis}") + sys.exit(1) + external_analysis = parse_analysis_sexp(args.analysis.read_text()) + + execute_plan(plan_path, args.output, args.dir, external_analysis=external_analysis) diff --git a/libs/all-effects.sexp b/libs/all-effects.sexp new file mode 100644 index 0000000..54364f2 --- /dev/null +++ b/libs/all-effects.sexp @@ -0,0 +1,63 @@ +;; All 42 Sexp Effects +;; Include with: (include :path "libs/all-effects.sexp") +;; Or from cache: (include :cid "bafy...") + +;; Color effects +(effect invert :path "sexp_effects/effects/invert.sexp") +(effect grayscale :path "sexp_effects/effects/grayscale.sexp") +(effect sepia :path "sexp_effects/effects/sepia.sexp") +(effect brightness :path "sexp_effects/effects/brightness.sexp") +(effect contrast :path "sexp_effects/effects/contrast.sexp") +(effect saturation :path "sexp_effects/effects/saturation.sexp") +(effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") +(effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") +(effect threshold :path "sexp_effects/effects/threshold.sexp") +(effect posterize :path "sexp_effects/effects/posterize.sexp") + +;; Blur/sharpen +(effect blur :path "sexp_effects/effects/blur.sexp") +(effect sharpen :path "sexp_effects/effects/sharpen.sexp") +(effect bloom :path "sexp_effects/effects/bloom.sexp") +(effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + +;; Distortion +(effect swirl :path "sexp_effects/effects/swirl.sexp") +(effect fisheye :path "sexp_effects/effects/fisheye.sexp") +(effect wave :path "sexp_effects/effects/wave.sexp") +(effect ripple :path "sexp_effects/effects/ripple.sexp") +(effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") +(effect zoom :path "sexp_effects/effects/zoom.sexp") +(effect rotate :path "sexp_effects/effects/rotate.sexp") +(effect mirror :path "sexp_effects/effects/mirror.sexp") + +;; Stylization +(effect pixelate :path "sexp_effects/effects/pixelate.sexp") +(effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") +(effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") +(effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") +(effect emboss :path "sexp_effects/effects/emboss.sexp") +(effect outline :path "sexp_effects/effects/outline.sexp") +(effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") + +;; Retro/film +(effect crt :path "sexp_effects/effects/crt.sexp") +(effect scanlines :path "sexp_effects/effects/scanlines.sexp") +(effect film_grain :path "sexp_effects/effects/film_grain.sexp") +(effect vignette :path "sexp_effects/effects/vignette.sexp") +(effect noise :path "sexp_effects/effects/noise.sexp") + +;; Chromatic +(effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") + +;; Temporal +(effect echo :path "sexp_effects/effects/echo.sexp") +(effect trails :path "sexp_effects/effects/trails.sexp") +(effect strobe :path "sexp_effects/effects/strobe.sexp") + +;; Geometric +(effect flip :path "sexp_effects/effects/flip.sexp") +(effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") + +;; Glitch +(effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") +(effect datamosh :path "sexp_effects/effects/datamosh.sexp") diff --git a/libs/plan b/libs/plan new file mode 100644 index 0000000..07535ef --- /dev/null +++ b/libs/plan @@ -0,0 +1,68 @@ +Exactly. You're describing a DAG of pipelines that can branch and merge: + + audio-a ─→ analyze ─→ plan-a ─┐ + ├─→ combine ─→ final + audio-b ─→ analyze ─→ plan-b ─┘ + + videos ─→ analyze ─────────────┴─→ (shared by both plans) + + Each node is independently cacheable. Parallel branches run in tandem. + + A clean syntax might be: + + (recipe "multi-track-video" + :encoding (...) + + ;; Sources (stage 0 - always available) + (def audio-a (source "track1.mp3")) + (def audio-b (source "track2.mp3")) + (def videos (source-glob "videos/*.mp4")) + + ;; Analysis stages (run in parallel, cached by input hash) + (stage :analyze-a + (def beats-a (-> audio-a (analyze beats)))) + + (stage :analyze-b + (def beats-b (-> audio-b (analyze beats)))) + + (stage :analyze-videos + (def video-infos (-> videos (analyze-each video-info)))) + + ;; Planning stages (depend on analysis, explicit deps) + (stage :plan-a :requires [:analyze-a :analyze-videos] + (def segments-a (make-segments :beats beats-a :video-infos video-infos))) + + (stage :plan-b :requires [:analyze-b :analyze-videos] + (def segments-b (make-segments :beats beats-b :video-infos video-infos))) + + ;; Render stages (can parallelize) + (stage :render-a :requires [:plan-a] + (def rendered-a (-> segments-a (sequence)))) + + (stage :render-b :requires [:plan-b] + (def rendered-b (-> segments-b (sequence)))) + + ;; Final combine + (stage :output :requires [:render-a :render-b] + (-> (list rendered-a rendered-b) + (concat) + (crossfade :duration 2) + (mux audio-a audio-b)))) + + What this gives you: + + 1. Explicit data availability - :requires declares what's available + 2. Parallel execution - :analyze-a and :analyze-b run simultaneously + 3. Granular caching - each stage output cached by its inputs' hashes + 4. Flexible composition - add more tracks, branches, merge points as needed + 5. Clear errors - referencing beats-a before :analyze-a is a compile error + + Changes needed to sexp system: + + 1. stage form with :requires dependency declaration + 2. Stage scheduler that builds execution DAG + 3. Cache layer keyed by stage + input hashes + 4. Dict iteration (keys, for-each) for generic constructs + + Want to prototype this direction? + \ No newline at end of file diff --git a/libs/standard-analyzers.sexp b/libs/standard-analyzers.sexp new file mode 100644 index 0000000..1544a86 --- /dev/null +++ b/libs/standard-analyzers.sexp @@ -0,0 +1,11 @@ +;; Standard Analyzers (Audio + Video) +;; Include with: (include :path "libs/standard-analyzers.sexp") +;; Or from cache: (include :cid "bafy...") + +;; Audio analyzers +(analyzer beats :path "../artdag-analyzers/beats/analyzer.py") +(analyzer bass :path "../artdag-analyzers/bass/analyzer.py") +(analyzer energy :path "../artdag-analyzers/energy/analyzer.py") + +;; Video analyzers +(analyzer video-info :path "../artdag-analyzers/video-info/analyzer.py") diff --git a/libs/standard-constructs.sexp b/libs/standard-constructs.sexp new file mode 100644 index 0000000..ed492c7 --- /dev/null +++ b/libs/standard-constructs.sexp @@ -0,0 +1,6 @@ +;; Standard Constructs +;; Include with: (include :path "libs/standard-constructs.sexp") +;; Or from cache: (include :cid "bafy...") + +(construct slice-every-n :path "constructs/slice-every-n.sexp") +(construct cycle-effects-preset :path "constructs/cycle-effects-preset.sexp") diff --git a/plan.py b/plan.py new file mode 100644 index 0000000..6bdead3 --- /dev/null +++ b/plan.py @@ -0,0 +1,415 @@ +#!/usr/bin/env python3 +""" +Plan generator for S-expression recipes. + +Expands dynamic nodes (SLICE_ON) into primitives using analysis data. +Outputs a plan that can be executed by execute.py. + +Usage: + analyze.py recipe.sexp > analysis.sexp + plan.py recipe.sexp --analysis analysis.sexp --sexp > plan.sexp + execute.py plan.sexp --analysis analysis.sexp +""" + +import sys +import json +from pathlib import Path + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string, parse +from artdag.sexp.planner import create_plan +from artdag.sexp.parser import Binding, serialize as sexp_serialize, Symbol, Keyword + + +def parse_analysis_sexp(content: str) -> dict: + """Parse analysis S-expression into dict.""" + sexp = parse(content) + if isinstance(sexp, list) and len(sexp) == 1: + sexp = sexp[0] + + if not isinstance(sexp, list) or not sexp: + raise ValueError("Invalid analysis S-expression") + + # Should be (analysis (name ...) (name ...) ...) + if not isinstance(sexp[0], Symbol) or sexp[0].name != "analysis": + raise ValueError("Expected (analysis ...) S-expression") + + result = {} + for item in sexp[1:]: + if isinstance(item, list) and item: + # Handle both Symbol names and quoted string names (node IDs) + first = item[0] + if isinstance(first, Symbol): + name = first.name + elif isinstance(first, str): + name = first + else: + continue # Skip malformed entries + data = {} + + i = 1 + while i < len(item): + if isinstance(item[i], Keyword): + key = item[i].name.replace("-", "_") + i += 1 + if i < len(item): + data[key] = item[i] + i += 1 + else: + i += 1 + + result[name] = data + + return result + + +def to_sexp(value, indent=0): + """Convert a Python value to S-expression string.""" + from artdag.sexp.parser import Lambda + + # Handle Binding objects + if isinstance(value, Binding): + # analysis_ref can be a string, node ID, or dict - serialize it properly + if isinstance(value.analysis_ref, str): + ref_str = f'"{value.analysis_ref}"' + else: + ref_str = to_sexp(value.analysis_ref, 0) + s = f'(bind {ref_str} :range [{value.range_min} {value.range_max}]' + if value.transform: + s += f' :transform {value.transform}' + return s + ')' + + # Handle binding dicts from compiler (convert to bind sexp format) + if isinstance(value, dict) and value.get("_binding"): + source = value.get("source", "") + range_val = value.get("range", [0.0, 1.0]) + range_min = range_val[0] if isinstance(range_val, list) else 0.0 + range_max = range_val[1] if isinstance(range_val, list) and len(range_val) > 1 else 1.0 + transform = value.get("transform") + offset = value.get("offset") + s = f'(bind "{source}" :range [{range_min} {range_max}]' + if offset: + s += f' :offset {offset}' + if transform: + s += f' :transform {transform}' + return s + ')' + + # Handle Symbol - serialize as bare identifier + if isinstance(value, Symbol): + return value.name + + # Handle Keyword - serialize with colon prefix + if isinstance(value, Keyword): + return f':{value.name}' + + # Handle Lambda + if isinstance(value, Lambda): + params = " ".join(value.params) + body = to_sexp(value.body, 0) + return f'(fn [{params}] {body})' + + prefix = " " * indent + if isinstance(value, dict): + if not value: + return "()" + items = [] + for k, v in value.items(): + if isinstance(k, str): + # Keys starting with _ are internal markers - keep underscore to avoid :-foo + if k.startswith('_'): + key_str = k # Keep as-is: _binding -> :_binding + else: + key_str = k.replace('_', '-') + else: + key_str = str(k) + items.append(f":{key_str} {to_sexp(v, 0)}") + return "(" + " ".join(items) + ")" + elif isinstance(value, list): + if not value: + return "()" + items = [to_sexp(v, 0) for v in value] + return "(" + " ".join(items) + ")" + elif isinstance(value, str): + # Escape special characters in strings + escaped = value.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n') + return f'"{escaped}"' + elif isinstance(value, bool): + return "true" if value else "false" + elif isinstance(value, (int, float)): + return str(value) + elif value is None: + return "nil" + else: + # For any unknown type, convert to string and quote it + return f'"{str(value)}"' + + +def plan_recipe(recipe_path: Path, output_format: str = "text", output_file: Path = None, analysis_path: Path = None, params: dict = None): + """Compile recipe, expand dynamic nodes using analysis, output plan. + + Args: + recipe_path: Path to recipe file + output_format: Output format (text, json, sexp) + output_file: Optional output file path + analysis_path: Optional pre-computed analysis file + params: Optional dict of name -> value bindings to inject into compilation + """ + + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + print(f"Compiling: {recipe_path}", file=sys.stderr) + if params: + print(f"Parameters: {params}", file=sys.stderr) + compiled = compile_string(recipe_text, params) + print(f"Recipe: {compiled.name} v{compiled.version}", file=sys.stderr) + print(f"Nodes: {len(compiled.nodes)}", file=sys.stderr) + + # Load pre-computed analysis if provided (file or stdin with -) + pre_analysis = None + if analysis_path: + if str(analysis_path) == "-": + print(f"Loading analysis: stdin", file=sys.stderr) + analysis_text = sys.stdin.read() + else: + print(f"Loading analysis: {analysis_path}", file=sys.stderr) + analysis_text = analysis_path.read_text() + pre_analysis = parse_analysis_sexp(analysis_text) + print(f" Tracks: {list(pre_analysis.keys())}", file=sys.stderr) + + # Track analysis results for embedding in plan + analysis_data = {} + + def on_analysis(node_id, results): + analysis_data[node_id] = results + times = results.get("times", []) + print(f" Analysis complete: {len(times)} beat times", file=sys.stderr) + + # Create plan (uses pre_analysis or runs analyzers, expands SLICE_ON) + print("\n--- Planning ---", file=sys.stderr) + plan = create_plan( + compiled, + inputs={}, + recipe_dir=recipe_dir, + on_analysis=on_analysis, + pre_analysis=pre_analysis, + ) + + print(f"\nPlan ID: {plan.plan_id[:16]}...", file=sys.stderr) + print(f"Steps: {len(plan.steps)}", file=sys.stderr) + + # Generate output + if output_format == "sexp": + output = generate_sexp_output(compiled, plan, analysis_data) + elif output_format == "json": + output = generate_json_output(compiled, plan, analysis_data) + else: + output = generate_text_output(compiled, plan, analysis_data) + + # Write output + if output_file: + output_file.write_text(output) + print(f"\nPlan written to: {output_file}", file=sys.stderr) + else: + print(output) + + +class PlanJSONEncoder(json.JSONEncoder): + """Custom encoder for plan objects.""" + def default(self, obj): + if isinstance(obj, Binding): + return { + "_type": "binding", + "analysis_ref": obj.analysis_ref, + "track": obj.track, + "range_min": obj.range_min, + "range_max": obj.range_max, + "transform": obj.transform, + } + if isinstance(obj, Symbol): + return {"_type": "symbol", "name": obj.name} + if isinstance(obj, Keyword): + return {"_type": "keyword", "name": obj.name} + return super().default(obj) + + +def generate_json_output(compiled, plan, analysis_data): + """Generate JSON plan output.""" + output = { + "plan_id": plan.plan_id, + "recipe_id": compiled.name, + "recipe_hash": plan.recipe_hash, + "encoding": compiled.encoding, + "output_step_id": plan.output_step_id, + "steps": [], + } + + for step in plan.steps: + step_dict = { + "step_id": step.step_id, + "node_type": step.node_type, + "config": step.config, + "inputs": step.inputs, + "level": step.level, + "cache_id": step.cache_id, + } + # Embed analysis results for ANALYZE steps + if step.node_type == "ANALYZE" and step.step_id in analysis_data: + step_dict["config"]["analysis_results"] = analysis_data[step.step_id] + output["steps"].append(step_dict) + + return json.dumps(output, indent=2, cls=PlanJSONEncoder) + + +def generate_sexp_output(compiled, plan, analysis_data): + """Generate S-expression plan output.""" + lines = [ + f'(plan "{compiled.name}"', + f' :version "{compiled.version}"', + f' :plan-id "{plan.plan_id}"', + ] + + if compiled.encoding: + lines.append(f' :encoding {to_sexp(compiled.encoding)}') + + # Include analysis data for effect parameter bindings + if plan.analysis: + lines.append('') + lines.append(' (analysis') + for name, data in plan.analysis.items(): + times = data.get("times", []) + values = data.get("values", []) + # Truncate for display but include all data + times_str = " ".join(str(t) for t in times) + values_str = " ".join(str(v) for v in values) + lines.append(f' ({name}') + lines.append(f' :times ({times_str})') + lines.append(f' :values ({values_str}))') + lines.append(' )') + + lines.append('') + + for step in plan.steps: + lines.append(f' (step "{step.step_id}"') + lines.append(f' :type {step.node_type}') + lines.append(f' :level {step.level}') + lines.append(f' :cache "{step.cache_id}"') + if step.inputs: + inputs_str = " ".join(f'"{i}"' for i in step.inputs) + lines.append(f' :inputs ({inputs_str})') + for key, value in step.config.items(): + lines.append(f' :{key.replace("_", "-")} {to_sexp(value)}') + lines.append(' )') + + lines.append('') + lines.append(f' :output "{plan.output_step_id}")') + + return '\n'.join(lines) + + +def generate_text_output(compiled, plan, analysis_data): + """Generate human-readable text output.""" + lines = [ + f"Recipe: {compiled.name} v{compiled.version}", + ] + + if compiled.encoding: + lines.append(f"Encoding: {compiled.encoding}") + + lines.extend([ + f"\nPlan ID: {plan.plan_id}", + f"Output: {plan.output_step_id[:16]}...", + f"\nSteps ({len(plan.steps)}):", + "-" * 60, + ]) + + for step in plan.steps: + lines.append(f"\n[{step.level}] {step.node_type}") + lines.append(f" id: {step.step_id[:16]}...") + lines.append(f" cache: {step.cache_id[:16]}...") + if step.inputs: + lines.append(f" inputs: {[i[:16] + '...' for i in step.inputs]}") + for key, value in step.config.items(): + if key == "analysis_results": + lines.append(f" {key}: <{len(value.get('times', []))} times>") + else: + lines.append(f" {key}: {value}") + + return '\n'.join(lines) + + +def parse_param(param_str: str) -> tuple: + """Parse a key=value parameter string. + + Args: + param_str: String in format "key=value" + + Returns: + Tuple of (key, parsed_value) where value is converted to int/float if possible + """ + if "=" not in param_str: + raise ValueError(f"Invalid parameter format: {param_str} (expected key=value)") + + key, value = param_str.split("=", 1) + key = key.strip() + value = value.strip() + + # Try to parse as int + try: + return (key, int(value)) + except ValueError: + pass + + # Try to parse as float + try: + return (key, float(value)) + except ValueError: + pass + + # Return as string + return (key, value) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Generate execution plan from recipe") + parser.add_argument("recipe", type=Path, help="Recipe file (.sexp)") + parser.add_argument("-o", "--output", type=Path, help="Output file (default: stdout)") + parser.add_argument("-a", "--analysis", type=Path, help="Pre-computed analysis file (.sexp)") + parser.add_argument("-p", "--param", action="append", dest="params", metavar="KEY=VALUE", + help="Set recipe parameter (can be used multiple times)") + parser.add_argument("--json", action="store_true", help="Output JSON format") + parser.add_argument("--text", action="store_true", help="Output human-readable text format") + + args = parser.parse_args() + + if not args.recipe.exists(): + print(f"Recipe not found: {args.recipe}", file=sys.stderr) + sys.exit(1) + + if args.analysis and str(args.analysis) != "-" and not args.analysis.exists(): + print(f"Analysis file not found: {args.analysis}", file=sys.stderr) + sys.exit(1) + + # Parse parameters + params = {} + if args.params: + for param_str in args.params: + try: + key, value = parse_param(param_str) + params[key] = value + except ValueError as e: + print(f"Error: {e}", file=sys.stderr) + sys.exit(1) + + if args.json: + fmt = "json" + elif args.text: + fmt = "text" + else: + fmt = "sexp" + + plan_recipe(args.recipe, fmt, args.output, args.analysis, params or None) diff --git a/recipe-1.sexp b/recipe-1.sexp new file mode 100644 index 0000000..b122815 --- /dev/null +++ b/recipe-1.sexp @@ -0,0 +1,16 @@ +; test recipe for language development +(recipe "test" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "medium" :audio-codec "aac") + + (effect invert + :path "../artdag-effects/invert/effect.py") + + ; User-supplied input video + (def input-video + (-> (source :path "new.webm" :description "Input video") + (segment :start 0 :duration 3))) + + (def branch-01 (-> input-video (fx invert :intensity 1.0))) + + (sequence input-video branch-01)) diff --git a/recipe-all-effects-modular.sexp b/recipe-all-effects-modular.sexp new file mode 100644 index 0000000..1fe9574 --- /dev/null +++ b/recipe-all-effects-modular.sexp @@ -0,0 +1,55 @@ +;; All Effects Showcase (Modular Version) +;; Uses data-driven effect presets for easy customization +(recipe "all-effects-showcase-modular" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + ;; Standard libraries - analyzers, effects, constructs + (include :path "libs/standard-analyzers.sexp") + (include :path "libs/all-effects.sexp") + (include :path "libs/standard-constructs.sexp") + + ;; Effect preset - SWAP THIS to change the entire effect palette + (include all-42-preset :path "effect-presets/all-42.sexp") + + ;; Sources with auto-detected info + (def video-a (source :path "monday.webm")) + (def video-b (source :path "new.webm")) + (def video-c (source :path "ecstacy.mp4")) + + ;; Video info analysis (duration, resolution, fps, etc.) + (def video-a-info (-> video-a (analyze video-info))) + (def video-b-info (-> video-b (analyze video-info))) + (def video-c-info (-> video-c (analyze video-info))) + + (def videos (list video-a video-b video-c)) + (def video-infos (list video-a-info video-b-info video-c-info)) + + ;; Audio + (def audio (source :path "dizzy.mp3")) + + ;; Analysis + (def beats-data (-> audio (analyze beats))) + (def bass-data (-> audio (analyze bass))) + (def energy-data (-> audio (analyze energy))) + + ;; Analyzers dict for binding resolution (use dict fn, not literal, for symbol resolution) + (def analyzers (dict :bass bass-data :energy energy-data)) + + ;; Segments - now just one clean call! + ;; Note: kwargs use underscores (planner converts - to _) + (def segments (cycle-effects-preset + :beats beats-data + :videos videos + :video_infos video-infos + :preset all-42-preset + :analyzers analyzers + :beats_per_segment 21)) + + (assert (> (len segments) 0) "No segments created - all videos too short") + + ;; Sequence and output + (def showcase (-> segments + (sequence :resize-mode :fit :priority :width))) + + (mux showcase audio)) diff --git a/recipe-all-effects.sexp b/recipe-all-effects.sexp new file mode 100644 index 0000000..fc2151d --- /dev/null +++ b/recipe-all-effects.sexp @@ -0,0 +1,172 @@ +;; All Effects Showcase +;; Cycles through every sexp effect on beat, using entire audio +(recipe "all-effects-showcase" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + ;; Analyzers + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + (analyzer bass :path "../artdag-analyzers/bass/analyzer.py") + (analyzer energy :path "../artdag-analyzers/energy/analyzer.py") + + ;; All sexp effects + (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") + (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") + (effect datamosh :path "sexp_effects/effects/datamosh.sexp") + (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") + (effect bloom :path "sexp_effects/effects/bloom.sexp") + (effect blur :path "sexp_effects/effects/blur.sexp") + (effect brightness :path "sexp_effects/effects/brightness.sexp") + (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") + (effect contrast :path "sexp_effects/effects/contrast.sexp") + (effect crt :path "sexp_effects/effects/crt.sexp") + (effect echo :path "sexp_effects/effects/echo.sexp") + (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") + (effect emboss :path "sexp_effects/effects/emboss.sexp") + (effect film_grain :path "sexp_effects/effects/film_grain.sexp") + (effect fisheye :path "sexp_effects/effects/fisheye.sexp") + (effect flip :path "sexp_effects/effects/flip.sexp") + (effect grayscale :path "sexp_effects/effects/grayscale.sexp") + (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") + (effect invert :path "sexp_effects/effects/invert.sexp") + (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") + (effect mirror :path "sexp_effects/effects/mirror.sexp") + (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") + (effect noise :path "sexp_effects/effects/noise.sexp") + (effect outline :path "sexp_effects/effects/outline.sexp") + (effect pixelate :path "sexp_effects/effects/pixelate.sexp") + (effect posterize :path "sexp_effects/effects/posterize.sexp") + (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") + (effect ripple :path "sexp_effects/effects/ripple.sexp") + (effect rotate :path "sexp_effects/effects/rotate.sexp") + (effect saturation :path "sexp_effects/effects/saturation.sexp") + (effect scanlines :path "sexp_effects/effects/scanlines.sexp") + (effect sepia :path "sexp_effects/effects/sepia.sexp") + (effect sharpen :path "sexp_effects/effects/sharpen.sexp") + (effect strobe :path "sexp_effects/effects/strobe.sexp") + (effect swirl :path "sexp_effects/effects/swirl.sexp") + (effect threshold :path "sexp_effects/effects/threshold.sexp") + (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") + (effect trails :path "sexp_effects/effects/trails.sexp") + (effect vignette :path "sexp_effects/effects/vignette.sexp") + (effect wave :path "sexp_effects/effects/wave.sexp") + (effect zoom :path "sexp_effects/effects/zoom.sexp") + + ;; Constructs + (construct slice-every-n :path "constructs/slice-every-n.sexp") + + ;; Sources with durations (seconds) + (def video-a (source :path "monday.webm")) + (def video-a-duration 30) ;; adjust to actual duration + (def video-b (source :path "new.webm")) + (def video-b-duration 60) ;; adjust to actual duration + (def video-c (source :path "ecstacy.mp4")) + (def video-c-duration 45) ;; adjust to actual duration + + ;; Video list with durations for easy lookup + (def videos (list video-a video-b video-c)) + (def video-durations (list video-a-duration video-b-duration video-c-duration)) + + ;; Audio - entire file + (def audio (source :path "dizzy.mp3")) + + ;; Analysis + (def beats-data (-> audio (analyze beats))) + (def bass-data (-> audio (analyze bass))) + (def energy-data (-> audio (analyze energy))) + + ;; Group every 21 beats into one segment (~42 segments for this track) + (def beats-per-seg 21) + + ;; Slice into segments, one effect each + ;; Wraps video start time; skips if segment longer than all videos + (def segments (slice-every-n beats-data beats-per-seg + :init 0 + :reducer (fn [acc i start end] + (let [seg-duration (- end start) + ;; Try preferred video first, then others + vid-idx (mod acc 3) + ;; Find a video long enough for this segment + valid-vid-idx (cond + (>= (nth video-durations vid-idx) seg-duration) vid-idx + (>= (nth video-durations (mod (+ vid-idx 1) 3)) seg-duration) (mod (+ vid-idx 1) 3) + (>= (nth video-durations (mod (+ vid-idx 2) 3)) seg-duration) (mod (+ vid-idx 2) 3) + :else nil)] + ;; Skip if no video is long enough + (if (= valid-vid-idx nil) + {:skip true :acc (inc acc)} + (let [src (nth videos valid-vid-idx) + src-duration (nth video-durations valid-vid-idx) + ;; Wrap start time within video duration + wrapped-start (mod start src-duration) + effect-idx (mod acc 42) + fx (cond + ;; Color effects 0-9 - DRAMATIC ranges for visible music reactivity + (= effect-idx 0) {:effect invert} + (= effect-idx 1) {:effect grayscale} + (= effect-idx 2) {:effect sepia} + (= effect-idx 3) {:effect brightness :amount (bind bass values :range [-80 80])} + (= effect-idx 4) {:effect contrast :amount (bind energy values :range [0.5 2.5])} + (= effect-idx 5) {:effect saturation :amount (bind bass values :range [0.2 3.0])} + (= effect-idx 6) {:effect hue_shift :degrees (bind energy values :range [0 360])} + (= effect-idx 7) {:effect color_cycle :speed 2} + (= effect-idx 8) {:effect threshold :level 128} + (= effect-idx 9) {:effect posterize :levels 6} + ;; Blur/sharpen 10-13 - wider ranges + (= effect-idx 10) {:effect blur :radius (bind bass values :range [1 30])} + (= effect-idx 11) {:effect sharpen :amount (bind energy values :range [0.5 4])} + (= effect-idx 12) {:effect bloom :intensity 0.6 :radius 20} + (= effect-idx 13) {:effect color-adjust :brightness 20 :contrast 1.2} + ;; Distortion 14-21 - much more dramatic + (= effect-idx 14) {:effect swirl :strength (bind bass values :range [-6 6])} + (= effect-idx 15) {:effect fisheye :strength (bind bass values :range [-0.5 0.8])} + (= effect-idx 16) {:effect wave :amplitude (bind bass values :range [10 60]) :wavelength 60} + (= effect-idx 17) {:effect ripple :amplitude (bind bass values :range [10 40]) :frequency 6} + (= effect-idx 18) {:effect kaleidoscope :segments 6 :rotation_speed 30} + (= effect-idx 19) {:effect zoom :factor (bind bass values :range [0.8 1.5])} + (= effect-idx 20) {:effect rotate :angle (bind energy values :range [-30 30])} + (= effect-idx 21) {:effect mirror :direction "horizontal"} + ;; Stylization 22-28 - more variation + (= effect-idx 22) {:effect pixelate :block_size (bind bass values :range [4 32])} + (= effect-idx 23) {:effect ascii_art :char_size 8 :color_mode "color"} + (= effect-idx 24) {:effect ascii_zones :char_size 10} + (= effect-idx 25) {:effect edge_detect :low 50 :high 150} + (= effect-idx 26) {:effect emboss :strength 1.5} + (= effect-idx 27) {:effect outline :thickness 2} + (= effect-idx 28) {:effect neon_glow :glow_radius 20 :glow_intensity 2} + ;; Retro/film 29-33 + (= effect-idx 29) {:effect crt :line_spacing 3 :vignette_amount 0.3} + (= effect-idx 30) {:effect scanlines :spacing 3 :intensity 0.4} + (= effect-idx 31) {:effect film_grain :intensity 0.25} + (= effect-idx 32) {:effect vignette :strength 0.6} + (= effect-idx 33) {:effect noise :amount (bind bass values :range [10 80])} + ;; Chromatic 34 - bigger split + (= effect-idx 34) {:effect rgb_split :offset_x (bind bass values :range [5 40])} + ;; Temporal 35-37 + (= effect-idx 35) {:effect echo :num_echoes 4 :decay 0.5} + (= effect-idx 36) {:effect trails :persistence 0.7} + (= effect-idx 37) {:effect strobe :frequency 4} + ;; Geometric 38-39 + (= effect-idx 38) {:effect flip :direction "horizontal"} + (= effect-idx 39) {:effect tile_grid :rows 2 :cols 2} + ;; Glitch 40-41 - more glitchy + (= effect-idx 40) {:effect pixelsort :threshold_low 30 :threshold_high 220} + (= effect-idx 41) {:effect datamosh :corruption (bind bass values :range [0.2 0.8]) :block_size 24} + ;; Default fallback + :else {:effect invert})] + {:source src + :start wrapped-start + :duration seg-duration + :effects (list fx) + :acc (inc acc)})))))) + + ;; Error if no segments were created (all videos too short) + (assert (> (len segments) 0) "No segments created - all videos too short for segment durations") + + ;; Sequence all segments + (def showcase (-> segments + (sequence :resize-mode :fit :priority :width))) + + ;; Output with original audio + (mux showcase audio)) diff --git a/recipe-bound copy.sexp b/recipe-bound copy.sexp new file mode 100644 index 0000000..c1756e8 --- /dev/null +++ b/recipe-bound copy.sexp @@ -0,0 +1,45 @@ +; Beat-synced video with full lambda control +(recipe "bound-effects" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) + + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + (analyzer bass :path "../artdag-analyzers/bass/analyzer.py") + (effect invert :path "../artdag-effects/invert/effect.py") + (effect sepia :path "sexp_effects/effects/sepia.sexp") + + (construct slice-on :path "constructs/slice-on.sexp") + + + (def video-a (source :path "monday.webm" :description "Video A")) + (def video-b (source :path "new.webm" :description "Video B")) + + (def video-c (source :path "ecstacy.mp4" :description "Video c")) + + (def audio (-> (source :path "dizzy.mp3" :description "Audio input") + (segment :start 0 :duration 10))) + + (def beats-data (-> audio (analyze beats))) + (def bass-data (-> audio (analyze bass))) + + ; Alternate sources and bind invert intensity to bass + ; slice-on produces a list of segments, then we explicitly concat with normalization + ; video-b always gets sepia effect applied + (def segments (slice-on beats-data + :init 0 + :reducer (fn [acc i start end] + (let [idx (mod acc 3) + src (nth (list video-a video-b video-c) idx) + base-effects (list {:effect invert :intensity (bind bass-data :range [0 100])}) + effects (if (= idx 1) + (concat base-effects (list {:effect sepia})) + base-effects)] + {:source src + :effects effects + :acc (inc acc)})))) + + ; Concatenate with resize normalization (fit to width, pad height) + (def synced-video (-> segments + (sequence :resize-mode :fit :priority :width))) + + (mux synced-video audio)) diff --git a/recipe-bound.sexp b/recipe-bound.sexp new file mode 100644 index 0000000..17c73e5 --- /dev/null +++ b/recipe-bound.sexp @@ -0,0 +1,67 @@ +; Beat-synced video demonstrating all node types +; Uses: source, segment, effect, resize, blend, layer, sequence, mux, analyze +(recipe "bound-effects" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) + + ;; Analyzers + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + (analyzer bass :path "../artdag-analyzers/bass/analyzer.py") + + ;; Effects (all sexp-based now) + (effect invert :path "../artdag-effects/invert/effect.py") + (effect sepia :path "sexp_effects/effects/sepia.sexp") + (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + (effect resize-frame :path "sexp_effects/effects/resize-frame.sexp") + (effect blend :path "sexp_effects/effects/blend.sexp") + (effect layer :path "sexp_effects/effects/layer.sexp") + + ;; Construct + (construct slice-on :path "constructs/slice-on.sexp") + + ;; Sources + (def video-a (source :path "monday.webm" :description "Video A")) + (def video-b (source :path "new.webm" :description "Video B")) + (def video-c (source :path "ecstacy.mp4" :description "Video C")) + + ;; Audio with segment + (def audio (-> (source :path "dizzy.mp3" :description "Audio input") + (segment :start 0 :duration 10))) + + ;; Analysis + (def beats-data (-> audio (analyze beats))) + (def bass-data (-> audio (analyze bass))) + + ;; Create an overlay track: video-c resized small, with color adjustment + (def overlay-track (-> video-c + (segment :start 0 :duration 10) + (resize 320 180 :mode "linear") + (effect color-adjust :brightness 20 :contrast 1.2 :saturation 1.5))) + + ;; Main sliced video with effects + (def segments (slice-on beats-data + :init 0 + :reducer (fn [acc i start end] + (let [idx (mod acc 3) + src (nth (list video-a video-b video-c) idx) + base-effects (list {:effect invert :intensity (bind bass-data :range [0 100])}) + effects (if (= idx 1) + (concat base-effects (list {:effect sepia})) + base-effects)] + {:source src + :effects effects + :acc (inc acc)})))) + + ;; Sequence the beat-sliced segments + (def synced-video (-> segments + (sequence :resize-mode :fit :priority :width))) + + ;; Blend overlay with main video (picture-in-picture style) + (def with-overlay (layer synced-video overlay-track :x 20 :y 20 :opacity 0.8)) + + ;; Final blend with original video-a for ghosting effect + (def final-video (-> video-a + (segment :start 0 :duration 10) + (blend with-overlay :mode "screen" :opacity 0.15))) + + (mux final-video audio)) diff --git a/recipe-construct.sexp b/recipe-construct.sexp new file mode 100644 index 0000000..82aac23 --- /dev/null +++ b/recipe-construct.sexp @@ -0,0 +1,24 @@ +; Test recipe using user-defined construct +(recipe "construct-test" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) + + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + (construct beat-alternate :path "constructs/beat-alternate.sexp") + + (def video-a (source :path "monday.webm" :description "Video A")) + (def video-b (source :path "new.webm" :description "Video B")) + + (def audio (-> (source :path "dizzy.mp3" :description "Audio input") + (segment :start 0 :duration 10))) + + (def beats-data (-> audio (analyze beats))) + + ;; Use the beat-alternate construct instead of slice-on + (def segments (beat-alternate beats-data (list video-a video-b))) + + ;; Concatenate with resize normalization + (def synced-video (-> segments + (sequence :resize-mode :fit :priority :width))) + + (mux synced-video audio)) diff --git a/recipe-lambda.sexp b/recipe-lambda.sexp new file mode 100644 index 0000000..dd6db52 --- /dev/null +++ b/recipe-lambda.sexp @@ -0,0 +1,28 @@ +; Beat-synced video recipe with lambda reducer +(recipe "beat-sync-lambda" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac") + + ; Declare analyzers and effects + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + (effect invert :path "../artdag-effects/invert/effect.py") + + ; Sources + (def video (source :path "new.webm" :description "Video input")) + (def audio (-> (source :path "dizzy.mp3" :description "Audio input") + (segment :start 0 :duration 10))) + + ; Analyze audio for beats + (def analysis (-> audio (analyze beats))) + + ; Slice video at beat times using lambda reducer + (def synced-video (slice-on video analysis + :times times + :init 0 + :fn (fn [acc i start end] + {:source video + :effects (if (odd? i) (list invert) (list)) + :acc (inc acc)}))) + + ; Mux video with audio + (mux synced-video audio)) diff --git a/recipe-parametric.sexp b/recipe-parametric.sexp new file mode 100644 index 0000000..14f12f9 --- /dev/null +++ b/recipe-parametric.sexp @@ -0,0 +1,33 @@ +;; Parametric Effect Test +;; Demonstrates using command-line parameters for effect values +;; +;; Usage: +;; python3 plan.py recipe-parametric.sexp -p strength=3 -p amount=50 | python3 execute.py - -d . -o output.mp4 +;; +;; Parameters (with defaults): +;; strength - swirl strength (default: 2) +;; amount - brightness amount (default: 0) + +(recipe "parametric-test" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + ;; Effects + (effect swirl :path "sexp_effects/effects/swirl.sexp") + (effect brightness :path "sexp_effects/effects/brightness.sexp") + + ;; Source + (def video (source :path "monday.webm")) + (def audio (source :path "dizzy.mp3")) + + ;; 10 second segments + (def clip (-> video (segment :start 0 :duration 10))) + (def audio-clip (-> audio (segment :start 0 :duration 10))) + + ;; Apply effects with parameters (passed via -p flag) + ;; Parameters are injected as bindings before compilation + (def result (-> clip + (effect swirl :strength strength) + (effect brightness :amount amount))) + + (mux result audio-clip)) diff --git a/recipe-sexp-slice.sexp b/recipe-sexp-slice.sexp new file mode 100644 index 0000000..62243c0 --- /dev/null +++ b/recipe-sexp-slice.sexp @@ -0,0 +1,30 @@ +; Test recipe using S-expression slice-on construct +(recipe "sexp-slice-test" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac" :fps 30) + + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + + ;; Import slice-on as a user-defined construct (not the built-in!) + (construct slice-on :path "constructs/slice-on.sexp") + + (def video-a (source :path "monday.webm" :description "Video A")) + (def video-b (source :path "new.webm" :description "Video B")) + + (def audio (-> (source :path "dizzy.mp3" :description "Audio input") + (segment :start 0 :duration 10))) + + (def beats-data (-> audio (analyze beats))) + + ;; Use the S-expression slice-on with a reducer function + (def segments (slice-on beats-data + :init 0 + :fn (fn [acc i start end] + {:source (nth (list video-a video-b) (mod acc 2)) + :effects (list) + :acc (inc acc)}))) + + (def synced-video (-> segments + (sequence :resize-mode :fit :priority :width))) + + (mux synced-video audio)) diff --git a/recipe-single-effect.sexp b/recipe-single-effect.sexp new file mode 100644 index 0000000..1892c95 --- /dev/null +++ b/recipe-single-effect.sexp @@ -0,0 +1,94 @@ +;; Single Effect Test - Static Effects +;; Change the effect line to test different effects +(recipe "single-effect-test" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + ;; Effects + (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") + (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") + (effect bloom :path "sexp_effects/effects/bloom.sexp") + (effect blur :path "sexp_effects/effects/blur.sexp") + (effect brightness :path "sexp_effects/effects/brightness.sexp") + (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") + (effect contrast :path "sexp_effects/effects/contrast.sexp") + (effect crt :path "sexp_effects/effects/crt.sexp") + (effect datamosh :path "sexp_effects/effects/datamosh.sexp") + (effect echo :path "sexp_effects/effects/echo.sexp") + (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") + (effect emboss :path "sexp_effects/effects/emboss.sexp") + (effect film_grain :path "sexp_effects/effects/film_grain.sexp") + (effect fisheye :path "sexp_effects/effects/fisheye.sexp") + (effect flip :path "sexp_effects/effects/flip.sexp") + (effect grayscale :path "sexp_effects/effects/grayscale.sexp") + (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") + (effect invert :path "sexp_effects/effects/invert.sexp") + (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") + (effect mirror :path "sexp_effects/effects/mirror.sexp") + (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") + (effect noise :path "sexp_effects/effects/noise.sexp") + (effect outline :path "sexp_effects/effects/outline.sexp") + (effect pixelate :path "sexp_effects/effects/pixelate.sexp") + (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") + (effect posterize :path "sexp_effects/effects/posterize.sexp") + (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") + (effect ripple :path "sexp_effects/effects/ripple.sexp") + (effect rotate :path "sexp_effects/effects/rotate.sexp") + (effect saturation :path "sexp_effects/effects/saturation.sexp") + (effect scanlines :path "sexp_effects/effects/scanlines.sexp") + (effect sepia :path "sexp_effects/effects/sepia.sexp") + (effect sharpen :path "sexp_effects/effects/sharpen.sexp") + (effect strobe :path "sexp_effects/effects/strobe.sexp") + (effect swirl :path "sexp_effects/effects/swirl.sexp") + (effect threshold :path "sexp_effects/effects/threshold.sexp") + (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") + (effect trails :path "sexp_effects/effects/trails.sexp") + (effect vignette :path "sexp_effects/effects/vignette.sexp") + (effect wave :path "sexp_effects/effects/wave.sexp") + (effect zoom :path "sexp_effects/effects/zoom.sexp") + + ;; Source + (def video (source :path "monday.webm")) + (def audio (source :path "dizzy.mp3")) + + ;; 10 second segments + (def clip (-> video (segment :start 0 :duration 10))) + (def audio-clip (-> audio (segment :start 0 :duration 10))) + + ;; ============================================================ + ;; CHANGE THE EFFECT LINE BELOW: + ;; (effect invert) (effect grayscale) (effect sepia) + ;; (effect brightness :amount 30) (effect contrast :amount 1.5) + ;; (effect saturation :amount 2.0) (effect hue_shift :degrees 90) + ;; (effect color_cycle :speed 2) (effect threshold :level 128) + ;; (effect posterize :levels 6) (effect blur :radius 10) + ;; (effect sharpen :amount 2) (effect bloom :intensity 0.6 :radius 20) + ;; (effect color-adjust :brightness 20 :contrast 1.2) + ;; (effect swirl :strength 3) (effect fisheye :strength 0.5) + ;; (effect wave :amplitude 30 :wavelength 60) + ;; (effect ripple :amplitude 20 :frequency 6) + ;; (effect kaleidoscope :segments 6 :rotation_speed 30) + ;; (effect zoom :factor 1.2) (effect rotate :angle 15) + ;; (effect mirror :direction "horizontal") + ;; (effect pixelate :block_size 16) + ;; (effect ascii_art :char_size 8 :color_mode "color") + ;; (effect ascii_zones :char_size 10) + ;; (effect edge_detect :low 50 :high 150) + ;; (effect emboss :strength 1.5) (effect outline :thickness 2) + ;; (effect neon_glow :glow_radius 20 :glow_intensity 2) + ;; (effect crt :line_spacing 3 :vignette_amount 0.3) + ;; (effect scanlines :spacing 3 :intensity 0.4) + ;; (effect film_grain :intensity 0.25) (effect vignette :strength 0.6) + ;; (effect noise :amount 40) (effect rgb_split :offset_x 20) + ;; (effect echo :num_echoes 4 :decay 0.5) + ;; (effect trails :persistence 0.7) (effect strobe :frequency 4) + ;; (effect flip :direction "horizontal") + ;; (effect tile_grid :rows 2 :cols 2) + ;; (effect pixelsort :threshold_low 30 :threshold_high 220) + ;; (effect datamosh :corruption 0.5 :block_size 24) + ;; ============================================================ + (def result (-> clip + (effect swirl :strength 3))) + + (mux result audio-clip)) diff --git a/recipe.sexp b/recipe.sexp new file mode 100644 index 0000000..0878bdc --- /dev/null +++ b/recipe.sexp @@ -0,0 +1,25 @@ +; Beat-synced video recipe +(recipe "beat-sync" + :version "1.0" + :encoding (:codec "libx264" :crf 22 :preset "fast" :audio-codec "aac") + + ; Declare analyzers and effects + (analyzer beats :path "../artdag-analyzers/beats/analyzer.py") + (effect invert :path "../artdag-effects/invert/effect.py") + + ; Sources + (def video (source :path "new.webm" :description "Video input")) + (def audio (-> (source :path "dizzy.mp3" :description "Audio input") + (segment :start 0 :duration 10))) + + ; Analyze audio for beats + (def analysis (-> audio (analyze beats))) + + ; Slice video at beat times, invert on odd beats + (def synced-video (slice-on video analysis + :times times + :effect invert + :pattern alternate)) + + ; Mux video with audio + (mux synced-video audio)) diff --git a/run-effect.sh b/run-effect.sh new file mode 100644 index 0000000..1c7114a --- /dev/null +++ b/run-effect.sh @@ -0,0 +1,122 @@ +#!/bin/bash +# Run a single effect by number (0-42) +# Usage: ./run-effect.sh +# +# Note: For effects with simple numeric params, you can also use native params: +# python3 plan.py recipe-parametric.sexp -p strength=5 -p amount=30 | python3 execute.py - -d . -o output.mp4 + +EFFECT_NUM=${1:-0} + +# Effect definitions array +EFFECTS=( + "(effect invert)" + "(effect grayscale)" + "(effect sepia)" + "(effect brightness :amount 30)" + "(effect contrast :amount 1.5)" + "(effect saturation :amount 2.0)" + "(effect hue_shift :degrees 90)" + "(effect color_cycle :speed 2)" + "(effect threshold :level 128)" + "(effect posterize :levels 6)" + "(effect blur :radius 10)" + "(effect sharpen :amount 2)" + "(effect bloom :intensity 0.6 :radius 20)" + "(effect color-adjust :brightness 20 :contrast 1.2)" + "(effect swirl :strength 3)" + "(effect fisheye :strength 0.5)" + "(effect wave :amplitude 30 :wavelength 60)" + "(effect ripple :amplitude 20 :frequency 6)" + "(effect kaleidoscope :segments 6 :rotation_speed 30)" + "(effect zoom :factor 1.2)" + "(effect rotate :angle 15)" + "(effect mirror :direction \"horizontal\")" + "(effect pixelate :block_size 16)" + "(effect ascii_art :char_size 8 :color_mode \"color\")" + "(effect ascii_zones :char_size 10)" + "(effect edge_detect :low 50 :high 150)" + "(effect emboss :strength 1.5)" + "(effect outline :thickness 2)" + "(effect neon_glow :glow_radius 20 :glow_intensity 2)" + "(effect crt :line_spacing 3 :vignette_amount 0.3)" + "(effect scanlines :spacing 3 :intensity 0.4)" + "(effect film_grain :intensity 0.25)" + "(effect vignette :strength 0.6)" + "(effect noise :amount 40)" + "(effect rgb_split :offset_x 20)" + "(effect echo :num_echoes 4 :decay 0.5)" + "(effect trails :persistence 0.7)" + "(effect strobe :frequency 4)" + "(effect flip :direction \"horizontal\")" + "(effect tile_grid :rows 2 :cols 2)" + "(effect pixelsort :threshold_low 30 :threshold_high 220)" + "(effect datamosh :corruption 0.5 :block_size 24)" +) + +if [ "$EFFECT_NUM" -lt 0 ] || [ "$EFFECT_NUM" -ge ${#EFFECTS[@]} ]; then + echo "Effect number must be 0-$((${#EFFECTS[@]}-1))" + exit 1 +fi + +EFFECT="${EFFECTS[$EFFECT_NUM]}" +echo "Running effect $EFFECT_NUM: $EFFECT" + +# Create temp recipe with selected effect +cat > /tmp/recipe-temp.sexp << EOF +(recipe "effect-test" + :version "1.0" + :encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30) + + (effect ascii_art :path "sexp_effects/effects/ascii_art.sexp") + (effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp") + (effect bloom :path "sexp_effects/effects/bloom.sexp") + (effect blur :path "sexp_effects/effects/blur.sexp") + (effect brightness :path "sexp_effects/effects/brightness.sexp") + (effect color-adjust :path "sexp_effects/effects/color-adjust.sexp") + (effect color_cycle :path "sexp_effects/effects/color_cycle.sexp") + (effect contrast :path "sexp_effects/effects/contrast.sexp") + (effect crt :path "sexp_effects/effects/crt.sexp") + (effect datamosh :path "sexp_effects/effects/datamosh.sexp") + (effect echo :path "sexp_effects/effects/echo.sexp") + (effect edge_detect :path "sexp_effects/effects/edge_detect.sexp") + (effect emboss :path "sexp_effects/effects/emboss.sexp") + (effect film_grain :path "sexp_effects/effects/film_grain.sexp") + (effect fisheye :path "sexp_effects/effects/fisheye.sexp") + (effect flip :path "sexp_effects/effects/flip.sexp") + (effect grayscale :path "sexp_effects/effects/grayscale.sexp") + (effect hue_shift :path "sexp_effects/effects/hue_shift.sexp") + (effect invert :path "sexp_effects/effects/invert.sexp") + (effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp") + (effect mirror :path "sexp_effects/effects/mirror.sexp") + (effect neon_glow :path "sexp_effects/effects/neon_glow.sexp") + (effect noise :path "sexp_effects/effects/noise.sexp") + (effect outline :path "sexp_effects/effects/outline.sexp") + (effect pixelate :path "sexp_effects/effects/pixelate.sexp") + (effect pixelsort :path "sexp_effects/effects/pixelsort.sexp") + (effect posterize :path "sexp_effects/effects/posterize.sexp") + (effect rgb_split :path "sexp_effects/effects/rgb_split.sexp") + (effect ripple :path "sexp_effects/effects/ripple.sexp") + (effect rotate :path "sexp_effects/effects/rotate.sexp") + (effect saturation :path "sexp_effects/effects/saturation.sexp") + (effect scanlines :path "sexp_effects/effects/scanlines.sexp") + (effect sepia :path "sexp_effects/effects/sepia.sexp") + (effect sharpen :path "sexp_effects/effects/sharpen.sexp") + (effect strobe :path "sexp_effects/effects/strobe.sexp") + (effect swirl :path "sexp_effects/effects/swirl.sexp") + (effect threshold :path "sexp_effects/effects/threshold.sexp") + (effect tile_grid :path "sexp_effects/effects/tile_grid.sexp") + (effect trails :path "sexp_effects/effects/trails.sexp") + (effect vignette :path "sexp_effects/effects/vignette.sexp") + (effect wave :path "sexp_effects/effects/wave.sexp") + (effect zoom :path "sexp_effects/effects/zoom.sexp") + + (def video (source :path "monday.webm")) + (def audio (source :path "dizzy.mp3")) + (def clip (-> video (segment :start 0 :duration 10))) + (def audio-clip (-> audio (segment :start 0 :duration 10))) + (def result (-> clip $EFFECT)) + (mux result audio-clip)) +EOF + +python3 plan.py /tmp/recipe-temp.sexp | python3 execute.py - -d . -o "effect-${EFFECT_NUM}.mp4" +echo "Output: effect-${EFFECT_NUM}.mp4" diff --git a/run-file.sh b/run-file.sh new file mode 100755 index 0000000..adacb4b --- /dev/null +++ b/run-file.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Run the full pipeline: analyze -> plan -> execute -> play +# Usage: ./run.sh recipe.sexp + +RECIPE="${1:-recipe-bound.sexp}" + +python analyze.py "$RECIPE" | python plan.py "$RECIPE" -a - | python execute.py - -d "$(dirname "$RECIPE")" -o output.mp4 diff --git a/run.py b/run.py new file mode 100755 index 0000000..23703c7 --- /dev/null +++ b/run.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +""" +Run a recipe: plan then execute. + +This is a convenience wrapper that: +1. Generates a plan (runs analyzers, expands SLICE_ON) +2. Executes the plan (produces video output) +""" + +import json +import sys +import tempfile +from pathlib import Path + +# Add artdag to path +sys.path.insert(0, str(Path(__file__).parent.parent / "artdag")) + +from artdag.sexp import compile_string +from artdag.sexp.planner import create_plan +from artdag.sexp.parser import Binding + +# Import execute functionality +from execute import execute_plan + + +class PlanEncoder(json.JSONEncoder): + """JSON encoder that handles Binding objects.""" + def default(self, obj): + if isinstance(obj, Binding): + return { + "_bind": obj.analysis_ref, + "range_min": obj.range_min, + "range_max": obj.range_max, + } + return super().default(obj) + + +def run_recipe(recipe_path: Path, output_path: Path = None): + """Run a recipe file: plan then execute.""" + + recipe_text = recipe_path.read_text() + recipe_dir = recipe_path.parent + + print(f"=== COMPILE ===") + print(f"Recipe: {recipe_path}") + compiled = compile_string(recipe_text) + print(f"Name: {compiled.name} v{compiled.version}") + print(f"Nodes: {len(compiled.nodes)}") + + # Track analysis results + analysis_data = {} + + def on_analysis(node_id, results): + analysis_data[node_id] = results + times = results.get("times", []) + print(f" Analysis: {len(times)} beat times @ {results.get('tempo', 0):.1f} BPM") + + # Generate plan + print(f"\n=== PLAN ===") + plan = create_plan( + compiled, + inputs={}, + recipe_dir=recipe_dir, + on_analysis=on_analysis, + ) + + print(f"Plan ID: {plan.plan_id[:16]}...") + print(f"Steps: {len(plan.steps)}") + + # Write plan to temp file for execute + plan_dict = { + "plan_id": plan.plan_id, + "recipe_id": compiled.name, + "recipe_hash": plan.recipe_hash, + "encoding": compiled.encoding, + "output_step_id": plan.output_step_id, + "steps": [], + } + + for step in plan.steps: + step_dict = { + "step_id": step.step_id, + "node_type": step.node_type, + "config": step.config, + "inputs": step.inputs, + "level": step.level, + "cache_id": step.cache_id, + } + if step.node_type == "ANALYZE" and step.step_id in analysis_data: + step_dict["config"]["analysis_results"] = analysis_data[step.step_id] + plan_dict["steps"].append(step_dict) + + # Save plan + work_dir = Path(tempfile.mkdtemp(prefix="artdag_run_")) + plan_file = work_dir / "plan.json" + with open(plan_file, "w") as f: + json.dump(plan_dict, f, indent=2, cls=PlanEncoder) + + print(f"Plan saved: {plan_file}") + + # Execute plan + print(f"\n=== EXECUTE ===") + result = execute_plan(plan_file, output_path, recipe_dir) + + print(f"\n=== DONE ===") + print(f"Output: {result}") + return result + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Usage: run.py [output.mp4]") + print() + print("Commands:") + print(" run.py - Plan and execute recipe") + print(" plan.py - Generate plan only") + print(" execute.py - Execute pre-generated plan") + sys.exit(1) + + recipe_path = Path(sys.argv[1]) + output_path = Path(sys.argv[2]) if len(sys.argv) > 2 else None + + if not recipe_path.exists(): + print(f"Recipe not found: {recipe_path}") + sys.exit(1) + + run_recipe(recipe_path, output_path) diff --git a/run.sh b/run.sh new file mode 100755 index 0000000..b65e5f4 --- /dev/null +++ b/run.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Run the full pipeline: analyze -> plan -> execute -> play +# Usage: ./run.sh recipe.sexp + +RECIPE="${1:-recipe-bound.sexp}" + +python3 analyze.py "$RECIPE" | python plan.py "$RECIPE" -a - | python execute.py - -d "$(dirname "$RECIPE")" | xargs mpv --fs diff --git a/sexp_effects/__init__.py b/sexp_effects/__init__.py new file mode 100644 index 0000000..b001c71 --- /dev/null +++ b/sexp_effects/__init__.py @@ -0,0 +1,32 @@ +""" +S-Expression Effects System + +Safe, shareable effects defined in S-expressions. +""" + +from .parser import parse, parse_file, Symbol, Keyword +from .interpreter import ( + Interpreter, + get_interpreter, + load_effect, + load_effects_dir, + run_effect, + list_effects, + make_process_frame, +) +from .primitives import PRIMITIVES + +__all__ = [ + 'parse', + 'parse_file', + 'Symbol', + 'Keyword', + 'Interpreter', + 'get_interpreter', + 'load_effect', + 'load_effects_dir', + 'run_effect', + 'list_effects', + 'make_process_frame', + 'PRIMITIVES', +] diff --git a/sexp_effects/effects/ascii_art.sexp b/sexp_effects/effects/ascii_art.sexp new file mode 100644 index 0000000..91f811e --- /dev/null +++ b/sexp_effects/effects/ascii_art.sexp @@ -0,0 +1,14 @@ +;; ASCII Art effect - converts image to ASCII characters +;; @param char_size int [4, 32] default 8 +;; @param alphabet string default "standard" +;; @param color_mode string default "color" +;; @param contrast float [1, 3] default 1.5 +;; @param background list default (0 0 0) + +(define-effect ascii_art + ((char_size 8) (alphabet "standard") (color_mode "color") (contrast 1.5) (background (list 0 0 0))) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + (chars (luminance-to-chars luminances alphabet contrast))) + (render-char-grid frame chars colors char_size color_mode background))) diff --git a/sexp_effects/effects/ascii_zones.sexp b/sexp_effects/effects/ascii_zones.sexp new file mode 100644 index 0000000..12173d9 --- /dev/null +++ b/sexp_effects/effects/ascii_zones.sexp @@ -0,0 +1,28 @@ +;; ASCII Zones effect - different character sets for different brightness zones +;; Dark areas use simple chars, mid uses standard, bright uses blocks +;; @param char_size int [4, 32] default 8 +;; @param dark_threshold int [0, 128] default 80 +;; @param bright_threshold int [128, 255] default 180 +;; @param color_mode string default "color" + +(define-effect ascii_zones + ((char_size 8) (dark_threshold 80) (bright_threshold 180) (color_mode "color")) + (let* ((sample (cell-sample frame char_size)) + (colors (nth sample 0)) + (luminances (nth sample 1)) + ;; Start with simple chars as base + (base-chars (luminance-to-chars luminances "simple" 1.2)) + ;; Map each cell to appropriate alphabet based on brightness zone + (zoned-chars (map-char-grid base-chars luminances + (lambda (r c ch lum) + (cond + ;; Bright zones: use block characters + ((> lum bright_threshold) + (alphabet-char "blocks" (floor (/ (- lum bright_threshold) 15)))) + ;; Dark zones: use simple sparse chars + ((< lum dark_threshold) + (alphabet-char " .-" (floor (/ lum 30)))) + ;; Mid zones: use standard ASCII + (else + (alphabet-char "standard" (floor (/ lum 4))))))))) + (render-char-grid frame zoned-chars colors char_size color_mode (list 0 0 0)))) diff --git a/sexp_effects/effects/blend.sexp b/sexp_effects/effects/blend.sexp new file mode 100644 index 0000000..bd0cb58 --- /dev/null +++ b/sexp_effects/effects/blend.sexp @@ -0,0 +1,48 @@ +;; Blend effect - combines two video streams +;; Multi-input effect: uses frame-a and frame-b +;; Params: +;; mode - blend mode (add, multiply, screen, overlay, difference, lighten, darken, alpha) +;; opacity - blend amount (0-1) +;; resize-mode - how to resize frame-b to match frame-a (fit, crop, stretch) +;; priority - which dimension takes priority (width, height) +;; pad-color - color for padding in fit mode [r g b] + +(define-effect blend + ((mode "overlay") (opacity 0.5) (resize-mode "fit") (priority "width") (pad-color (list 0 0 0))) + (let [a frame-a + a-w (width a) + a-h (height a) + b-raw frame-b + b-w (width b-raw) + b-h (height b-raw) + ;; Calculate scale based on resize mode and priority + scale-w (/ a-w b-w) + scale-h (/ a-h b-h) + scale (if (= resize-mode "stretch") + 1 ;; Will use explicit dimensions + (if (= resize-mode "crop") + (max scale-w scale-h) ;; Scale to cover, then crop + (if (= priority "width") + scale-w + scale-h))) + ;; For stretch, use target dimensions directly + new-w (if (= resize-mode "stretch") a-w (round (* b-w scale))) + new-h (if (= resize-mode "stretch") a-h (round (* b-h scale))) + ;; Resize b + b-resized (resize b-raw new-w new-h "linear") + ;; Handle fit (pad) or crop to exact size + b (if (= resize-mode "crop") + ;; Crop to center + (let [cx (/ (- new-w a-w) 2) + cy (/ (- new-h a-h) 2)] + (crop b-resized cx cy a-w a-h)) + (if (and (= resize-mode "fit") (or (!= new-w a-w) (!= new-h a-h))) + ;; Pad to center + (let [pad-x (/ (- a-w new-w) 2) + pad-y (/ (- a-h new-h) 2) + canvas (make-image a-w a-h pad-color)] + (paste canvas b-resized pad-x pad-y)) + b-resized))] + (if (= mode "alpha") + (blend-images a b opacity) + (blend-images a (blend-mode a b mode) opacity)))) diff --git a/sexp_effects/effects/bloom.sexp b/sexp_effects/effects/bloom.sexp new file mode 100644 index 0000000..f6ed31b --- /dev/null +++ b/sexp_effects/effects/bloom.sexp @@ -0,0 +1,14 @@ +;; Bloom effect - glow on bright areas +;; @param intensity float [0, 2] default 0.5 +;; @param threshold int [0, 255] default 200 +;; @param radius int [1, 50] default 15 + +(define-effect bloom + ((intensity 0.5) (threshold 200) (radius 15)) + (let* ((bright (map-pixels frame + (lambda (x y c) + (if (> (luminance c) threshold) + c + (rgb 0 0 0))))) + (blurred (blur bright radius))) + (blend-mode frame blurred "add"))) diff --git a/sexp_effects/effects/blur.sexp b/sexp_effects/effects/blur.sexp new file mode 100644 index 0000000..ad2ad69 --- /dev/null +++ b/sexp_effects/effects/blur.sexp @@ -0,0 +1,6 @@ +;; Blur effect - gaussian blur +;; @param radius int [1, 50] default 5 + +(define-effect blur + ((radius 5)) + (blur frame (max 1 radius))) diff --git a/sexp_effects/effects/brightness.sexp b/sexp_effects/effects/brightness.sexp new file mode 100644 index 0000000..63a203c --- /dev/null +++ b/sexp_effects/effects/brightness.sexp @@ -0,0 +1,7 @@ +;; Brightness effect - adjusts overall brightness +;; @param amount float [-255, 255] default 0 +;; Uses vectorized adjust primitive for fast processing + +(define-effect brightness + ((amount 0)) + (adjust frame amount 1)) diff --git a/sexp_effects/effects/color-adjust.sexp b/sexp_effects/effects/color-adjust.sexp new file mode 100644 index 0000000..672a9b8 --- /dev/null +++ b/sexp_effects/effects/color-adjust.sexp @@ -0,0 +1,8 @@ +;; Color adjustment effect - replaces TRANSFORM node +;; Params: brightness (-255 to 255), contrast (0 to 3+), saturation (0 to 2+) + +(define-effect color-adjust + ((brightness 0) (contrast 1) (saturation 1)) + (-> frame + (adjust :brightness brightness :contrast contrast) + (shift-hsv :s saturation))) diff --git a/sexp_effects/effects/color_cycle.sexp b/sexp_effects/effects/color_cycle.sexp new file mode 100644 index 0000000..5ee9bbe --- /dev/null +++ b/sexp_effects/effects/color_cycle.sexp @@ -0,0 +1,11 @@ +;; Color Cycle effect - animated hue rotation +;; @param speed float [0, 10] default 1 + +(define-effect color_cycle + ((speed 1)) + (let ((shift (* t speed 360))) + (map-pixels frame + (lambda (x y c) + (let* ((hsv (rgb->hsv c)) + (new-h (mod (+ (first hsv) shift) 360))) + (hsv->rgb (list new-h (nth hsv 1) (nth hsv 2)))))))) diff --git a/sexp_effects/effects/contrast.sexp b/sexp_effects/effects/contrast.sexp new file mode 100644 index 0000000..dd3e809 --- /dev/null +++ b/sexp_effects/effects/contrast.sexp @@ -0,0 +1,7 @@ +;; Contrast effect - adjusts image contrast +;; @param amount float [0.5, 3] default 1 +;; Uses vectorized adjust primitive for fast processing + +(define-effect contrast + ((amount 1)) + (adjust frame 0 amount)) diff --git a/sexp_effects/effects/crt.sexp b/sexp_effects/effects/crt.sexp new file mode 100644 index 0000000..17c6229 --- /dev/null +++ b/sexp_effects/effects/crt.sexp @@ -0,0 +1,28 @@ +;; CRT effect - old monitor simulation +;; @param line_spacing int [1, 10] default 2 +;; @param line_opacity float [0, 1] default 0.3 +;; @param vignette float [0, 1] default 0.2 + +(define-effect crt + ((line_spacing 2) (line_opacity 0.3) (vignette_amount 0.2)) + (let* ((w (width frame)) + (h (height frame)) + (cx (/ w 2)) + (cy (/ h 2)) + (max-dist (sqrt (+ (* cx cx) (* cy cy))))) + (map-pixels frame + (lambda (x y c) + (let* (;; Scanline darkening + (scanline-factor (if (= 0 (mod y line_spacing)) + (- 1 line_opacity) + 1)) + ;; Vignette + (dx (- x cx)) + (dy (- y cy)) + (dist (sqrt (+ (* dx dx) (* dy dy)))) + (vignette-factor (- 1 (* (/ dist max-dist) vignette_amount))) + ;; Combined + (factor (* scanline-factor vignette-factor))) + (rgb (* (red c) factor) + (* (green c) factor) + (* (blue c) factor))))))) diff --git a/sexp_effects/effects/datamosh.sexp b/sexp_effects/effects/datamosh.sexp new file mode 100644 index 0000000..f185061 --- /dev/null +++ b/sexp_effects/effects/datamosh.sexp @@ -0,0 +1,13 @@ +;; Datamosh effect - glitch block corruption +;; @param block_size int [8, 128] default 32 +;; @param corruption float [0, 1] default 0.3 +;; @param max_offset int [0, 200] default 50 +;; @param color_corrupt bool default true + +(define-effect datamosh + ((block_size 32) (corruption 0.3) (max_offset 50) (color_corrupt true)) + ;; Get previous frame from state, or use current frame if none + (let ((prev (state-get "prev_frame" frame))) + (begin + (state-set "prev_frame" (copy frame)) + (datamosh frame prev block_size corruption max_offset color_corrupt)))) diff --git a/sexp_effects/effects/echo.sexp b/sexp_effects/effects/echo.sexp new file mode 100644 index 0000000..d7e74da --- /dev/null +++ b/sexp_effects/effects/echo.sexp @@ -0,0 +1,17 @@ +;; Echo effect - motion trails using frame buffer +;; @param num_echoes int [1, 20] default 4 +;; @param decay float [0, 1] default 0.5 + +(define-effect echo + ((num_echoes 4) (decay 0.5)) + (let* ((buffer (state-get 'buffer (list))) + (new-buffer (take (cons frame buffer) (+ num_echoes 1)))) + (begin + (state-set 'buffer new-buffer) + ;; Blend frames with decay + (if (< (length new-buffer) 2) + frame + (let ((result (copy frame))) + ;; Simple blend of first two frames for now + ;; Full version would fold over all frames + (blend-images frame (nth new-buffer 1) (* decay 0.5))))))) diff --git a/sexp_effects/effects/edge_detect.sexp b/sexp_effects/effects/edge_detect.sexp new file mode 100644 index 0000000..9312afa --- /dev/null +++ b/sexp_effects/effects/edge_detect.sexp @@ -0,0 +1,7 @@ +;; Edge detection effect - highlights edges +;; @param low int [10, 100] default 50 +;; @param high int [50, 300] default 150 + +(define-effect edge_detect + ((low 50) (high 150)) + (edges frame low high)) diff --git a/sexp_effects/effects/emboss.sexp b/sexp_effects/effects/emboss.sexp new file mode 100644 index 0000000..b0db5fd --- /dev/null +++ b/sexp_effects/effects/emboss.sexp @@ -0,0 +1,11 @@ +;; Emboss effect - creates raised/3D appearance +;; @param strength float [0.5, 3] default 1 +;; @param blend float [0, 1] default 0.3 + +(define-effect emboss + ((strength 1) (blend 0.3)) + (let* ((kernel (list (list (- strength) (- strength) 0) + (list (- strength) 1 strength) + (list 0 strength strength))) + (embossed (convolve frame kernel))) + (blend-images embossed frame blend))) diff --git a/sexp_effects/effects/film_grain.sexp b/sexp_effects/effects/film_grain.sexp new file mode 100644 index 0000000..affcd9c --- /dev/null +++ b/sexp_effects/effects/film_grain.sexp @@ -0,0 +1,17 @@ +;; Film Grain effect - adds film grain texture +;; @param intensity float [0, 1] default 0.2 +;; @param colored bool default false + +(define-effect film_grain + ((intensity 0.2) (colored false)) + (let ((grain-amount (* intensity 50))) + (map-pixels frame + (lambda (x y c) + (if colored + (rgb (clamp (+ (red c) (gaussian 0 grain-amount)) 0 255) + (clamp (+ (green c) (gaussian 0 grain-amount)) 0 255) + (clamp (+ (blue c) (gaussian 0 grain-amount)) 0 255)) + (let ((n (gaussian 0 grain-amount))) + (rgb (clamp (+ (red c) n) 0 255) + (clamp (+ (green c) n) 0 255) + (clamp (+ (blue c) n) 0 255)))))))) diff --git a/sexp_effects/effects/fisheye.sexp b/sexp_effects/effects/fisheye.sexp new file mode 100644 index 0000000..698eb48 --- /dev/null +++ b/sexp_effects/effects/fisheye.sexp @@ -0,0 +1,14 @@ +;; Fisheye effect - barrel/pincushion lens distortion +;; @param strength float [-1, 1] default 0.3 +;; @param center_x float [0, 1] default 0.5 +;; @param center_y float [0, 1] default 0.5 +;; @param zoom_correct bool default true + +(define-effect fisheye + ((strength 0.3) (center_x 0.5) (center_y 0.5) (zoom_correct true)) + (let* ((w (width frame)) + (h (height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (coords (fisheye-displace w h strength cx cy zoom_correct))) + (remap frame (coords-x coords) (coords-y coords)))) diff --git a/sexp_effects/effects/flip.sexp b/sexp_effects/effects/flip.sexp new file mode 100644 index 0000000..72afc9b --- /dev/null +++ b/sexp_effects/effects/flip.sexp @@ -0,0 +1,14 @@ +;; Flip effect - flips image horizontally or vertically +;; @param horizontal bool default true +;; @param vertical bool default false + +(define-effect flip + ((horizontal true) (vertical false)) + (let ((result frame)) + (if horizontal + (set! result (flip-h result)) + nil) + (if vertical + (set! result (flip-v result)) + nil) + result)) diff --git a/sexp_effects/effects/grayscale.sexp b/sexp_effects/effects/grayscale.sexp new file mode 100644 index 0000000..b25dcc3 --- /dev/null +++ b/sexp_effects/effects/grayscale.sexp @@ -0,0 +1,5 @@ +;; Grayscale effect - converts to grayscale +;; Uses vectorized mix-gray primitive for fast processing + +(define-effect grayscale () + (mix-gray frame 1)) diff --git a/sexp_effects/effects/hue_shift.sexp b/sexp_effects/effects/hue_shift.sexp new file mode 100644 index 0000000..8d3e19c --- /dev/null +++ b/sexp_effects/effects/hue_shift.sexp @@ -0,0 +1,9 @@ +;; Hue shift effect - rotates hue values +;; @param degrees float [0, 360] default 0 +;; @param speed float default 0 - rotation per second +;; Uses vectorized shift-hsv primitive for fast processing + +(define-effect hue_shift + ((degrees 0) (speed 0)) + (let ((shift (+ degrees (* speed t)))) + (shift-hsv frame shift 1 1))) diff --git a/sexp_effects/effects/invert.sexp b/sexp_effects/effects/invert.sexp new file mode 100644 index 0000000..e5917fd --- /dev/null +++ b/sexp_effects/effects/invert.sexp @@ -0,0 +1,5 @@ +;; Invert effect - inverts all colors +;; Uses vectorized invert-img primitive for fast processing + +(define-effect invert () + (invert-img frame)) diff --git a/sexp_effects/effects/kaleidoscope.sexp b/sexp_effects/effects/kaleidoscope.sexp new file mode 100644 index 0000000..e487d00 --- /dev/null +++ b/sexp_effects/effects/kaleidoscope.sexp @@ -0,0 +1,18 @@ +;; Kaleidoscope effect - mandala-like symmetry patterns +;; @param segments int [3, 16] default 6 +;; @param rotation float [0, 360] default 0 +;; @param rotation_speed float [-180, 180] default 0 +;; @param center_x float [0, 1] default 0.5 +;; @param center_y float [0, 1] default 0.5 +;; @param zoom float [0.5, 3] default 1 + +(define-effect kaleidoscope + ((segments 6) (rotation 0) (rotation_speed 0) (center_x 0.5) (center_y 0.5) (zoom 1)) + (let* ((w (width frame)) + (h (height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + ;; Total rotation including time-based animation + (total_rot (+ rotation (* rotation_speed (or _time 0)))) + (coords (kaleidoscope-displace w h segments total_rot cx cy zoom))) + (remap frame (coords-x coords) (coords-y coords)))) diff --git a/sexp_effects/effects/layer.sexp b/sexp_effects/effects/layer.sexp new file mode 100644 index 0000000..90154fb --- /dev/null +++ b/sexp_effects/effects/layer.sexp @@ -0,0 +1,21 @@ +;; Layer effect - composite frame-b over frame-a at position +;; Multi-input effect: uses frame-a (background) and frame-b (overlay) +;; Params: x, y (position), opacity (0-1), mode (blend mode) + +(define-effect layer + ((x 0) (y 0) (opacity 1.0) (mode "alpha")) + (let [bg (copy frame-a) + fg frame-b + ;; Resize fg if needed to fit + fg-w (width fg) + fg-h (height fg)] + (if (= opacity 1.0) + ;; Simple paste + (paste bg fg x y) + ;; Blend with opacity + (let [blended (if (= mode "alpha") + (blend-images (crop bg x y fg-w fg-h) fg opacity) + (blend-images (crop bg x y fg-w fg-h) + (blend-mode (crop bg x y fg-w fg-h) fg mode) + opacity))] + (paste bg blended x y))))) diff --git a/sexp_effects/effects/mirror.sexp b/sexp_effects/effects/mirror.sexp new file mode 100644 index 0000000..0bcfce3 --- /dev/null +++ b/sexp_effects/effects/mirror.sexp @@ -0,0 +1,31 @@ +;; Mirror effect - mirrors half of image +;; @param mode string default "left_right" + +(define-effect mirror + ((mode "left_right")) + (let* ((w (width frame)) + (h (height frame)) + (hw (floor (/ w 2))) + (hh (floor (/ h 2)))) + (cond + ((= mode "left_right") + (let ((left (crop frame 0 0 hw h)) + (result (copy frame))) + (paste result (flip-h left) hw 0))) + + ((= mode "right_left") + (let ((right (crop frame hw 0 hw h)) + (result (copy frame))) + (paste result (flip-h right) 0 0))) + + ((= mode "top_bottom") + (let ((top (crop frame 0 0 w hh)) + (result (copy frame))) + (paste result (flip-v top) 0 hh))) + + ((= mode "bottom_top") + (let ((bottom (crop frame 0 hh w hh)) + (result (copy frame))) + (paste result (flip-v bottom) 0 0))) + + (else frame)))) diff --git a/sexp_effects/effects/neon_glow.sexp b/sexp_effects/effects/neon_glow.sexp new file mode 100644 index 0000000..77de9fc --- /dev/null +++ b/sexp_effects/effects/neon_glow.sexp @@ -0,0 +1,22 @@ +;; Neon Glow effect - glowing edge effect +;; @param edge_low int [10, 200] default 50 +;; @param edge_high int [50, 300] default 150 +;; @param glow_radius int [1, 50] default 15 +;; @param glow_intensity float [0.5, 5] default 2 +;; @param background float [0, 1] default 0.3 + +(define-effect neon_glow + ((edge_low 50) (edge_high 150) (glow_radius 15) + (glow_intensity 2) (background 0.3)) + (let* ((edge-img (edges frame edge_low edge_high)) + (glow (blur edge-img glow_radius)) + ;; Intensify the glow + (bright-glow (map-pixels glow + (lambda (x y c) + (rgb (clamp (* (red c) glow_intensity) 0 255) + (clamp (* (green c) glow_intensity) 0 255) + (clamp (* (blue c) glow_intensity) 0 255)))))) + (blend-mode (blend-images frame (make-image (width frame) (height frame) (list 0 0 0)) + (- 1 background)) + bright-glow + "screen"))) diff --git a/sexp_effects/effects/noise.sexp b/sexp_effects/effects/noise.sexp new file mode 100644 index 0000000..34b363d --- /dev/null +++ b/sexp_effects/effects/noise.sexp @@ -0,0 +1,7 @@ +;; Noise effect - adds random noise +;; @param amount float [0, 100] default 20 +;; Uses vectorized add-noise primitive for fast processing + +(define-effect noise + ((amount 20)) + (add-noise frame amount)) diff --git a/sexp_effects/effects/outline.sexp b/sexp_effects/effects/outline.sexp new file mode 100644 index 0000000..e34a85c --- /dev/null +++ b/sexp_effects/effects/outline.sexp @@ -0,0 +1,22 @@ +;; Outline effect - shows only edges +;; @param thickness int [1, 10] default 2 +;; @param threshold int [20, 300] default 100 +;; @param color list default (0 0 0) +;; @param fill_mode string default "original" + +(define-effect outline + ((thickness 2) (threshold 100) (color (list 0 0 0)) (fill_mode "original")) + (let* ((edge-img (edges frame (/ threshold 2) threshold)) + (dilated (if (> thickness 1) + (dilate edge-img thickness) + edge-img)) + (base (cond + ((= fill_mode "original") (copy frame)) + ((= fill_mode "white") (make-image (width frame) (height frame) (list 255 255 255))) + (else (make-image (width frame) (height frame) (list 0 0 0)))))) + (map-pixels base + (lambda (x y c) + (let ((edge-val (luminance (pixel dilated x y)))) + (if (> edge-val 128) + color + c)))))) diff --git a/sexp_effects/effects/pixelate.sexp b/sexp_effects/effects/pixelate.sexp new file mode 100644 index 0000000..0f215ad --- /dev/null +++ b/sexp_effects/effects/pixelate.sexp @@ -0,0 +1,11 @@ +;; Pixelate effect - creates blocky pixels +;; @param block_size int [2, 64] default 8 + +(define-effect pixelate + ((block_size 8)) + (let* ((w (width frame)) + (h (height frame)) + (small-w (max 1 (floor (/ w block_size)))) + (small-h (max 1 (floor (/ h block_size)))) + (small (resize frame small-w small-h "area"))) + (resize small w h "nearest"))) diff --git a/sexp_effects/effects/pixelsort.sexp b/sexp_effects/effects/pixelsort.sexp new file mode 100644 index 0000000..b13c539 --- /dev/null +++ b/sexp_effects/effects/pixelsort.sexp @@ -0,0 +1,10 @@ +;; Pixelsort effect - glitch art pixel sorting +;; @param sort_by string default "lightness" +;; @param threshold_low float [0, 255] default 50 +;; @param threshold_high float [0, 255] default 200 +;; @param angle float [0, 180] default 0 +;; @param reverse bool default false + +(define-effect pixelsort + ((sort_by "lightness") (threshold_low 50) (threshold_high 200) (angle 0) (reverse false)) + (pixelsort frame sort_by threshold_low threshold_high angle reverse)) diff --git a/sexp_effects/effects/posterize.sexp b/sexp_effects/effects/posterize.sexp new file mode 100644 index 0000000..b82f084 --- /dev/null +++ b/sexp_effects/effects/posterize.sexp @@ -0,0 +1,11 @@ +;; Posterize effect - reduces color levels +;; @param levels int [2, 32] default 8 + +(define-effect posterize + ((levels 8)) + (let ((step (floor (/ 256 levels)))) + (map-pixels frame + (lambda (x y c) + (rgb (* (floor (/ (red c) step)) step) + (* (floor (/ (green c) step)) step) + (* (floor (/ (blue c) step)) step)))))) diff --git a/sexp_effects/effects/resize-frame.sexp b/sexp_effects/effects/resize-frame.sexp new file mode 100644 index 0000000..2c0b868 --- /dev/null +++ b/sexp_effects/effects/resize-frame.sexp @@ -0,0 +1,7 @@ +;; Resize effect - replaces RESIZE node +;; Params: width, height, mode (linear, nearest, area) +;; Note: uses target-w/target-h to avoid conflict with width/height primitives + +(define-effect resize-frame + ((target-w 640) (target-h 480) (mode "linear")) + (resize frame target-w target-h mode)) diff --git a/sexp_effects/effects/rgb_split.sexp b/sexp_effects/effects/rgb_split.sexp new file mode 100644 index 0000000..57c789e --- /dev/null +++ b/sexp_effects/effects/rgb_split.sexp @@ -0,0 +1,12 @@ +;; RGB Split effect - chromatic aberration +;; @param offset_x float [-50, 50] default 10 +;; @param offset_y float [-50, 50] default 0 + +(define-effect rgb_split + ((offset_x 10) (offset_y 0)) + (let* ((r (channel frame 0)) + (g (channel frame 1)) + (b (channel frame 2)) + (r-shifted (translate (merge-channels r r r) offset_x offset_y)) + (b-shifted (translate (merge-channels b b b) (- offset_x) (- offset_y)))) + (merge-channels (channel r-shifted 0) g (channel b-shifted 0)))) diff --git a/sexp_effects/effects/ripple.sexp b/sexp_effects/effects/ripple.sexp new file mode 100644 index 0000000..6a9b433 --- /dev/null +++ b/sexp_effects/effects/ripple.sexp @@ -0,0 +1,17 @@ +;; Ripple effect - radial wave distortion from center +;; @param frequency float [1, 20] default 5 +;; @param amplitude float [0, 50] default 10 +;; @param center_x float [0, 1] default 0.5 +;; @param center_y float [0, 1] default 0.5 +;; @param decay float [0, 5] default 1 +;; @param speed float [0, 10] default 1 + +(define-effect ripple + ((frequency 5) (amplitude 10) (center_x 0.5) (center_y 0.5) (decay 1) (speed 1)) + (let* ((w (width frame)) + (h (height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (phase (* (or _time 0) speed 2 pi)) + (coords (ripple-displace w h frequency amplitude cx cy decay phase))) + (remap frame (coords-x coords) (coords-y coords)))) diff --git a/sexp_effects/effects/rotate.sexp b/sexp_effects/effects/rotate.sexp new file mode 100644 index 0000000..ad2a1c0 --- /dev/null +++ b/sexp_effects/effects/rotate.sexp @@ -0,0 +1,8 @@ +;; Rotate effect - rotates image +;; @param angle float [-360, 360] default 0 +;; @param speed float default 0 - rotation per second + +(define-effect rotate + ((angle 0) (speed 0)) + (let ((total-angle (+ angle (* speed t)))) + (rotate-img frame total-angle))) diff --git a/sexp_effects/effects/saturation.sexp b/sexp_effects/effects/saturation.sexp new file mode 100644 index 0000000..8af6121 --- /dev/null +++ b/sexp_effects/effects/saturation.sexp @@ -0,0 +1,7 @@ +;; Saturation effect - adjusts color saturation +;; @param amount float [0, 3] default 1 +;; Uses vectorized shift-hsv primitive for fast processing + +(define-effect saturation + ((amount 1)) + (shift-hsv frame 0 amount 1)) diff --git a/sexp_effects/effects/scanlines.sexp b/sexp_effects/effects/scanlines.sexp new file mode 100644 index 0000000..e10705d --- /dev/null +++ b/sexp_effects/effects/scanlines.sexp @@ -0,0 +1,13 @@ +;; Scanlines effect - VHS-style horizontal line shifting +;; @param amplitude float [0, 100] default 10 +;; @param frequency float [1, 100] default 10 +;; @param randomness float [0, 1] default 0.5 + +(define-effect scanlines + ((amplitude 10) (frequency 10) (randomness 0.5)) + (map-rows frame + (lambda (y row) + (let* ((sine-shift (* amplitude (sin (/ (* y 6.28) (max 1 frequency))))) + (rand-shift (random (- amplitude) amplitude)) + (shift (floor (lerp sine-shift rand-shift randomness)))) + (roll row shift 0))))) diff --git a/sexp_effects/effects/sepia.sexp b/sexp_effects/effects/sepia.sexp new file mode 100644 index 0000000..2bf0ba2 --- /dev/null +++ b/sexp_effects/effects/sepia.sexp @@ -0,0 +1,8 @@ +;; Sepia effect - applies sepia tone +;; Classic warm vintage look + +(define-effect sepia () + (color-matrix frame + (list (list 0.393 0.769 0.189) + (list 0.349 0.686 0.168) + (list 0.272 0.534 0.131)))) diff --git a/sexp_effects/effects/sharpen.sexp b/sexp_effects/effects/sharpen.sexp new file mode 100644 index 0000000..192a2f9 --- /dev/null +++ b/sexp_effects/effects/sharpen.sexp @@ -0,0 +1,9 @@ +;; Sharpen effect - sharpens edges +;; @param amount float [0, 5] default 1 + +(define-effect sharpen + ((amount 1)) + (let ((kernel (list (list 0 (- amount) 0) + (list (- amount) (+ 1 (* 4 amount)) (- amount)) + (list 0 (- amount) 0)))) + (convolve frame kernel))) diff --git a/sexp_effects/effects/strobe.sexp b/sexp_effects/effects/strobe.sexp new file mode 100644 index 0000000..b05e347 --- /dev/null +++ b/sexp_effects/effects/strobe.sexp @@ -0,0 +1,14 @@ +;; Strobe effect - holds frames for choppy look +;; @param frame_rate float [1, 60] default 12 + +(define-effect strobe + ((frame_rate 12)) + (let* ((held (state-get 'held nil)) + (held-until (state-get 'held-until 0)) + (frame-duration (/ 1 frame_rate))) + (if (or (= held nil) (>= t held-until)) + (begin + (state-set 'held (copy frame)) + (state-set 'held-until (+ t frame-duration)) + frame) + held))) diff --git a/sexp_effects/effects/swirl.sexp b/sexp_effects/effects/swirl.sexp new file mode 100644 index 0000000..c841a2a --- /dev/null +++ b/sexp_effects/effects/swirl.sexp @@ -0,0 +1,15 @@ +;; Swirl effect - spiral vortex distortion +;; @param strength float [-10, 10] default 1 +;; @param radius float [0.1, 2] default 0.5 +;; @param center_x float [0, 1] default 0.5 +;; @param center_y float [0, 1] default 0.5 +;; @param falloff string default "quadratic" + +(define-effect swirl + ((strength 1) (radius 0.5) (center_x 0.5) (center_y 0.5) (falloff "quadratic")) + (let* ((w (width frame)) + (h (height frame)) + (cx (* w center_x)) + (cy (* h center_y)) + (coords (swirl-displace w h strength radius cx cy falloff))) + (remap frame (coords-x coords) (coords-y coords)))) diff --git a/sexp_effects/effects/threshold.sexp b/sexp_effects/effects/threshold.sexp new file mode 100644 index 0000000..3f4c943 --- /dev/null +++ b/sexp_effects/effects/threshold.sexp @@ -0,0 +1,13 @@ +;; Threshold effect - converts to black and white +;; @param level int [0, 255] default 128 +;; @param invert bool default false + +(define-effect threshold + ((level 128) (invert false)) + (map-pixels frame + (lambda (x y c) + (let* ((lum (luminance c)) + (above (if invert (< lum level) (> lum level)))) + (if above + (rgb 255 255 255) + (rgb 0 0 0)))))) diff --git a/sexp_effects/effects/tile_grid.sexp b/sexp_effects/effects/tile_grid.sexp new file mode 100644 index 0000000..e473164 --- /dev/null +++ b/sexp_effects/effects/tile_grid.sexp @@ -0,0 +1,27 @@ +;; Tile Grid effect - tiles image in grid +;; @param rows int [1, 10] default 2 +;; @param cols int [1, 10] default 2 +;; @param gap int [0, 50] default 0 + +(define-effect tile_grid + ((rows 2) (cols 2) (gap 0)) + (let* ((w (width frame)) + (h (height frame)) + (tile-w (floor (/ (- w (* gap (- cols 1))) cols))) + (tile-h (floor (/ (- h (* gap (- rows 1))) rows))) + (tile (resize frame tile-w tile-h "area")) + (result (make-image w h (list 0 0 0)))) + (begin + ;; Manually place tiles using nested iteration + ;; This is a simplified version - full version would loop + (paste result tile 0 0) + (if (> cols 1) + (paste result tile (+ tile-w gap) 0) + nil) + (if (> rows 1) + (paste result tile 0 (+ tile-h gap)) + nil) + (if (and (> cols 1) (> rows 1)) + (paste result tile (+ tile-w gap) (+ tile-h gap)) + nil) + result))) diff --git a/sexp_effects/effects/trails.sexp b/sexp_effects/effects/trails.sexp new file mode 100644 index 0000000..6ece7ba --- /dev/null +++ b/sexp_effects/effects/trails.sexp @@ -0,0 +1,18 @@ +;; Trails effect - persistent motion trails +;; @param persistence float [0, 0.99] default 0.8 + +(define-effect trails + ((persistence 0.8)) + (let* ((buffer (state-get 'buffer nil)) + (current frame)) + (if (= buffer nil) + (begin + (state-set 'buffer (copy frame)) + frame) + (let* ((faded (blend-images buffer + (make-image (width frame) (height frame) (list 0 0 0)) + (- 1 persistence))) + (result (blend-mode faded current "lighten"))) + (begin + (state-set 'buffer result) + result))))) diff --git a/sexp_effects/effects/vignette.sexp b/sexp_effects/effects/vignette.sexp new file mode 100644 index 0000000..dc73c5c --- /dev/null +++ b/sexp_effects/effects/vignette.sexp @@ -0,0 +1,21 @@ +;; Vignette effect - darkens corners +;; @param strength float [0, 1] default 0.5 +;; @param radius float [0.5, 2] default 1 + +(define-effect vignette + ((strength 0.5) (radius 1)) + (let* ((w (width frame)) + (h (height frame)) + (cx (/ w 2)) + (cy (/ h 2)) + (max-dist (* (sqrt (+ (* cx cx) (* cy cy))) radius))) + (map-pixels frame + (lambda (x y c) + (let* ((dx (- x cx)) + (dy (- y cy)) + (dist (sqrt (+ (* dx dx) (* dy dy)))) + (factor (- 1 (* (/ dist max-dist) strength))) + (factor (clamp factor 0 1))) + (rgb (* (red c) factor) + (* (green c) factor) + (* (blue c) factor))))))) diff --git a/sexp_effects/effects/wave.sexp b/sexp_effects/effects/wave.sexp new file mode 100644 index 0000000..b6be14f --- /dev/null +++ b/sexp_effects/effects/wave.sexp @@ -0,0 +1,20 @@ +;; Wave effect - sine wave displacement distortion +;; @param amplitude float [0, 100] default 10 +;; @param wavelength float [10, 500] default 50 +;; @param speed float [0, 10] default 1 +;; @param direction string default "horizontal" + +(define-effect wave + ((amplitude 10) (wavelength 50) (speed 1) (direction "horizontal")) + (let* ((w (width frame)) + (h (height frame)) + ;; Use _time for animation phase + (phase (* (or _time 0) speed 2 pi)) + ;; Calculate frequency: waves per dimension + (freq (/ (if (= direction "vertical") w h) wavelength)) + (axis (cond + ((= direction "horizontal") "x") + ((= direction "vertical") "y") + (else "both"))) + (coords (wave-displace w h axis freq amplitude phase))) + (remap frame (coords-x coords) (coords-y coords)))) diff --git a/sexp_effects/effects/zoom.sexp b/sexp_effects/effects/zoom.sexp new file mode 100644 index 0000000..77c4974 --- /dev/null +++ b/sexp_effects/effects/zoom.sexp @@ -0,0 +1,6 @@ +;; Zoom effect - zooms in/out from center +;; @param amount float [0.1, 5] default 1 + +(define-effect zoom + ((amount 1)) + (scale-img frame amount amount)) diff --git a/sexp_effects/interpreter.py b/sexp_effects/interpreter.py new file mode 100644 index 0000000..46b4663 --- /dev/null +++ b/sexp_effects/interpreter.py @@ -0,0 +1,537 @@ +""" +S-Expression Effect Interpreter + +Interprets effect definitions written in S-expressions. +Only allows safe primitives - no arbitrary code execution. +""" + +import numpy as np +from typing import Any, Dict, List, Optional, Callable +from pathlib import Path + +from .parser import Symbol, Keyword, parse, parse_file +from .primitives import PRIMITIVES, reset_rng + + +class Environment: + """Lexical environment for variable bindings.""" + + def __init__(self, parent: 'Environment' = None): + self.bindings: Dict[str, Any] = {} + self.parent = parent + + def get(self, name: str) -> Any: + if name in self.bindings: + return self.bindings[name] + if self.parent: + return self.parent.get(name) + raise NameError(f"Undefined variable: {name}") + + def set(self, name: str, value: Any): + self.bindings[name] = value + + def has(self, name: str) -> bool: + if name in self.bindings: + return True + if self.parent: + return self.parent.has(name) + return False + + +class Lambda: + """A user-defined function (lambda).""" + + def __init__(self, params: List[str], body: Any, env: Environment): + self.params = params + self.body = body + self.env = env # Closure environment + + def __repr__(self): + return f"" + + +class EffectDefinition: + """A parsed effect definition.""" + + def __init__(self, name: str, params: Dict[str, Any], body: Any): + self.name = name + self.params = params # {name: (type, default)} + self.body = body + + def __repr__(self): + return f"" + + +class Interpreter: + """ + S-Expression interpreter for effects. + + Provides a safe execution environment where only + whitelisted primitives can be called. + """ + + def __init__(self): + # Base environment with primitives + self.global_env = Environment() + + # Load primitives + for name, fn in PRIMITIVES.items(): + self.global_env.set(name, fn) + + # Special values + self.global_env.set('true', True) + self.global_env.set('false', False) + self.global_env.set('nil', None) + + # Loaded effect definitions + self.effects: Dict[str, EffectDefinition] = {} + + def eval(self, expr: Any, env: Environment = None) -> Any: + """Evaluate an S-expression.""" + if env is None: + env = self.global_env + + # Atoms + if isinstance(expr, (int, float, str, bool)): + return expr + + if expr is None: + return None + + if isinstance(expr, Symbol): + return env.get(expr.name) + + if isinstance(expr, Keyword): + return expr # Keywords evaluate to themselves + + if isinstance(expr, np.ndarray): + return expr # Images pass through + + # Lists (function calls / special forms) + if isinstance(expr, list): + if not expr: + return [] + + head = expr[0] + + # Special forms + if isinstance(head, Symbol): + form = head.name + + # Quote + if form == 'quote': + return expr[1] + + # Define + if form == 'define': + name = expr[1] + if isinstance(name, Symbol): + value = self.eval(expr[2], env) + self.global_env.set(name.name, value) + return value + else: + raise SyntaxError(f"define requires symbol, got {name}") + + # Define-effect + if form == 'define-effect': + return self._define_effect(expr, env) + + # Lambda + if form == 'lambda' or form == 'λ': + params = [p.name if isinstance(p, Symbol) else p for p in expr[1]] + body = expr[2] + return Lambda(params, body, env) + + # Let + if form == 'let': + return self._eval_let(expr, env) + + # Let* + if form == 'let*': + return self._eval_let_star(expr, env) + + # If + if form == 'if': + cond = self.eval(expr[1], env) + if cond: + return self.eval(expr[2], env) + elif len(expr) > 3: + return self.eval(expr[3], env) + return None + + # Cond + if form == 'cond': + return self._eval_cond(expr, env) + + # And + if form == 'and': + result = True + for e in expr[1:]: + result = self.eval(e, env) + if not result: + return False + return result + + # Or + if form == 'or': + for e in expr[1:]: + result = self.eval(e, env) + if result: + return result + return False + + # Not + if form == 'not': + return not self.eval(expr[1], env) + + # Begin (sequence) + if form == 'begin': + result = None + for e in expr[1:]: + result = self.eval(e, env) + return result + + # Thread-first macro: (-> x (f a) (g b)) => (g (f x a) b) + if form == '->': + result = self.eval(expr[1], env) + for form_expr in expr[2:]: + if isinstance(form_expr, list): + # Insert result as first arg: (f a b) => (f result a b) + result = self.eval([form_expr[0], result] + form_expr[1:], env) + else: + # Just a symbol: f => (f result) + result = self.eval([form_expr, result], env) + return result + + # Set! (mutation) + if form == 'set!': + name = expr[1].name if isinstance(expr[1], Symbol) else expr[1] + value = self.eval(expr[2], env) + # Find and update in appropriate scope + scope = env + while scope: + if name in scope.bindings: + scope.bindings[name] = value + return value + scope = scope.parent + raise NameError(f"Cannot set undefined variable: {name}") + + # State-get / state-set (for effect state) + if form == 'state-get': + state = env.get('__state__') + key = self.eval(expr[1], env) + if isinstance(key, Symbol): + key = key.name + default = self.eval(expr[2], env) if len(expr) > 2 else None + return state.get(key, default) + + if form == 'state-set': + state = env.get('__state__') + key = self.eval(expr[1], env) + if isinstance(key, Symbol): + key = key.name + value = self.eval(expr[2], env) + state[key] = value + return value + + # Function call + fn = self.eval(head, env) + args = [self.eval(arg, env) for arg in expr[1:]] + + # Handle keyword arguments + pos_args = [] + kw_args = {} + i = 0 + while i < len(args): + if isinstance(args[i], Keyword): + kw_args[args[i].name] = args[i + 1] if i + 1 < len(args) else None + i += 2 + else: + pos_args.append(args[i]) + i += 1 + + return self._apply(fn, pos_args, kw_args, env) + + raise TypeError(f"Cannot evaluate: {expr}") + + def _wrap_lambda(self, lam: 'Lambda') -> Callable: + """Wrap a Lambda in a Python callable for use by primitives.""" + def wrapper(*args): + new_env = Environment(lam.env) + for i, param in enumerate(lam.params): + if i < len(args): + new_env.set(param, args[i]) + else: + new_env.set(param, None) + return self.eval(lam.body, new_env) + return wrapper + + def _apply(self, fn: Any, args: List[Any], kwargs: Dict[str, Any], env: Environment) -> Any: + """Apply a function to arguments.""" + if isinstance(fn, Lambda): + # User-defined function + new_env = Environment(fn.env) + for i, param in enumerate(fn.params): + if i < len(args): + new_env.set(param, args[i]) + else: + new_env.set(param, None) + return self.eval(fn.body, new_env) + + elif callable(fn): + # Wrap any Lambda arguments so primitives can call them + wrapped_args = [] + for arg in args: + if isinstance(arg, Lambda): + wrapped_args.append(self._wrap_lambda(arg)) + else: + wrapped_args.append(arg) + + # Primitive function + if kwargs: + return fn(*wrapped_args, **kwargs) + return fn(*wrapped_args) + + else: + raise TypeError(f"Cannot call: {fn}") + + def _parse_bindings(self, bindings: list) -> list: + """Parse bindings in either Scheme or Clojure style. + + Scheme: ((x 1) (y 2)) -> [(x, 1), (y, 2)] + Clojure: [x 1 y 2] -> [(x, 1), (y, 2)] + """ + if not bindings: + return [] + + # Check if Clojure style (flat list with symbols and values alternating) + if isinstance(bindings[0], Symbol): + # Clojure style: [x 1 y 2] + pairs = [] + i = 0 + while i < len(bindings) - 1: + name = bindings[i].name if isinstance(bindings[i], Symbol) else bindings[i] + value = bindings[i + 1] + pairs.append((name, value)) + i += 2 + return pairs + else: + # Scheme style: ((x 1) (y 2)) + pairs = [] + for binding in bindings: + name = binding[0].name if isinstance(binding[0], Symbol) else binding[0] + value = binding[1] + pairs.append((name, value)) + return pairs + + def _eval_let(self, expr: Any, env: Environment) -> Any: + """Evaluate let expression: (let ((x 1) (y 2)) body) or (let [x 1 y 2] body) + + Note: Uses sequential binding (like Clojure let / Scheme let*) so each + binding can reference previous bindings. + """ + bindings = expr[1] + body = expr[2] + + new_env = Environment(env) + for name, value_expr in self._parse_bindings(bindings): + value = self.eval(value_expr, new_env) # Sequential: can see previous bindings + new_env.set(name, value) + + return self.eval(body, new_env) + + def _eval_let_star(self, expr: Any, env: Environment) -> Any: + """Evaluate let* expression: sequential bindings.""" + bindings = expr[1] + body = expr[2] + + new_env = Environment(env) + for name, value_expr in self._parse_bindings(bindings): + value = self.eval(value_expr, new_env) # Evaluate in current env + new_env.set(name, value) + + return self.eval(body, new_env) + + def _eval_cond(self, expr: Any, env: Environment) -> Any: + """Evaluate cond expression.""" + for clause in expr[1:]: + test = clause[0] + if isinstance(test, Symbol) and test.name == 'else': + return self.eval(clause[1], env) + if self.eval(test, env): + return self.eval(clause[1], env) + return None + + def _define_effect(self, expr: Any, env: Environment) -> EffectDefinition: + """ + Parse effect definition: + (define-effect name + ((param1 default1) (param2 default2) ...) + body) + """ + name = expr[1].name if isinstance(expr[1], Symbol) else expr[1] + params_list = expr[2] if len(expr) > 2 else [] + body = expr[3] if len(expr) > 3 else expr[2] + + # Parse parameters + params = {} + if isinstance(params_list, list): + for p in params_list: + if isinstance(p, list) and len(p) >= 2: + pname = p[0].name if isinstance(p[0], Symbol) else p[0] + pdefault = p[1] + params[pname] = pdefault + elif isinstance(p, Symbol): + params[p.name] = None + + effect = EffectDefinition(name, params, body) + self.effects[name] = effect + return effect + + def load_effect(self, path: str) -> EffectDefinition: + """Load an effect definition from a .sexp file.""" + expr = parse_file(path) + + # Handle multiple top-level expressions + if isinstance(expr, list) and expr and isinstance(expr[0], list): + for e in expr: + self.eval(e) + else: + self.eval(expr) + + # Return the last defined effect + if self.effects: + return list(self.effects.values())[-1] + return None + + def run_effect(self, name: str, frame, params: Dict[str, Any], + state: Dict[str, Any]) -> tuple: + """ + Run an effect on frame(s). + + Args: + name: Effect name + frame: Input frame (H, W, 3) RGB uint8, or list of frames for multi-input + params: Effect parameters (overrides defaults) + state: Persistent state dict + + Returns: + (output_frame, new_state) + """ + if name not in self.effects: + raise ValueError(f"Unknown effect: {name}") + + effect = self.effects[name] + + # Create environment for this run + env = Environment(self.global_env) + + # Bind frame(s) - support both single frame and list of frames + if isinstance(frame, list): + # Multi-input effect + frames = frame + env.set('frame', frames[0] if frames else None) # Backwards compat + env.set('inputs', frames) + # Named frame bindings + for i, f in enumerate(frames): + env.set(f'frame-{chr(ord("a") + i)}', f) # frame-a, frame-b, etc. + else: + # Single-input effect + env.set('frame', frame) + + # Bind state + if state is None: + state = {} + env.set('__state__', state) + + # Bind parameters (defaults + overrides) + for pname, pdefault in effect.params.items(): + value = params.get(pname) + if value is None: + # Evaluate default if it's an expression (list) + if isinstance(pdefault, list): + value = self.eval(pdefault, env) + else: + value = pdefault + env.set(pname, value) + + # Reset RNG with seed if provided + seed = params.get('seed', 42) + reset_rng(int(seed)) + + # Bind time if provided + time_val = params.get('_time', 0) + env.set('t', time_val) + env.set('_time', time_val) + + # Evaluate body + result = self.eval(effect.body, env) + + # Ensure result is an image + if not isinstance(result, np.ndarray): + result = frame + + return result, state + + +# ============================================================================= +# Convenience Functions +# ============================================================================= + +_interpreter = None + + +def get_interpreter() -> Interpreter: + """Get or create the global interpreter.""" + global _interpreter + if _interpreter is None: + _interpreter = Interpreter() + return _interpreter + + +def load_effect(path: str) -> EffectDefinition: + """Load an effect from a .sexp file.""" + return get_interpreter().load_effect(path) + + +def load_effects_dir(directory: str): + """Load all .sexp effects from a directory.""" + interp = get_interpreter() + dir_path = Path(directory) + for path in dir_path.glob('*.sexp'): + try: + interp.load_effect(str(path)) + except Exception as e: + print(f"Warning: Failed to load {path}: {e}") + + +def run_effect(name: str, frame: np.ndarray, params: Dict[str, Any], + state: Dict[str, Any] = None) -> tuple: + """Run an effect.""" + return get_interpreter().run_effect(name, frame, params, state or {}) + + +def list_effects() -> List[str]: + """List loaded effect names.""" + return list(get_interpreter().effects.keys()) + + +# ============================================================================= +# Adapter for existing effect system +# ============================================================================= + +def make_process_frame(effect_path: str) -> Callable: + """ + Create a process_frame function from a .sexp effect. + + This allows S-expression effects to be used with the existing + effect system. + """ + interp = get_interpreter() + interp.load_effect(effect_path) + effect_name = Path(effect_path).stem + + def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple: + return interp.run_effect(effect_name, frame, params, state) + + return process_frame diff --git a/sexp_effects/parser.py b/sexp_effects/parser.py new file mode 100644 index 0000000..12bedfd --- /dev/null +++ b/sexp_effects/parser.py @@ -0,0 +1,168 @@ +""" +S-Expression Parser + +Parses S-expressions into Python data structures: +- Lists become Python lists +- Symbols become Symbol objects +- Numbers become int/float +- Strings become str +- Keywords (:foo) become Keyword objects +""" + +import re +from dataclasses import dataclass +from typing import Any, List, Union + + +@dataclass(frozen=True) +class Symbol: + """A symbol (identifier) in the S-expression.""" + name: str + + def __repr__(self): + return self.name + + +@dataclass(frozen=True) +class Keyword: + """A keyword like :foo in the S-expression.""" + name: str + + def __repr__(self): + return f":{self.name}" + + +# Token patterns +TOKEN_PATTERNS = [ + (r'\s+', None), # Whitespace (skip) + (r';[^\n]*', None), # Comments (skip) + (r'\(', 'LPAREN'), + (r'\)', 'RPAREN'), + (r'\[', 'LBRACKET'), + (r'\]', 'RBRACKET'), + (r"'", 'QUOTE'), + (r'"([^"\\]|\\.)*"', 'STRING'), + (r':[a-zA-Z_][a-zA-Z0-9_\-]*', 'KEYWORD'), + (r'-?[0-9]+\.[0-9]+', 'FLOAT'), + (r'-?[0-9]+', 'INT'), + (r'#t|#f|true|false', 'BOOL'), + (r'[a-zA-Z_+\-*/<>=!?][a-zA-Z0-9_+\-*/<>=!?]*', 'SYMBOL'), +] + +TOKEN_REGEX = '|'.join(f'(?P<{name}>{pattern})' if name else f'(?:{pattern})' + for pattern, name in TOKEN_PATTERNS) + + +def tokenize(source: str) -> List[tuple]: + """Tokenize S-expression source code.""" + tokens = [] + for match in re.finditer(TOKEN_REGEX, source): + kind = match.lastgroup + value = match.group() + if kind: + tokens.append((kind, value)) + return tokens + + +def parse(source: str) -> Any: + """Parse S-expression source into Python data structures.""" + tokens = tokenize(source) + pos = [0] # Use list for mutability in nested function + + def parse_expr(): + if pos[0] >= len(tokens): + raise SyntaxError("Unexpected end of input") + + kind, value = tokens[pos[0]] + + if kind == 'LPAREN': + pos[0] += 1 + items = [] + while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RPAREN': + items.append(parse_expr()) + if pos[0] >= len(tokens): + raise SyntaxError("Missing closing parenthesis") + pos[0] += 1 # Skip RPAREN + return items + + if kind == 'LBRACKET': + pos[0] += 1 + items = [] + while pos[0] < len(tokens) and tokens[pos[0]][0] != 'RBRACKET': + items.append(parse_expr()) + if pos[0] >= len(tokens): + raise SyntaxError("Missing closing bracket") + pos[0] += 1 # Skip RBRACKET + return items + + elif kind == 'RPAREN': + raise SyntaxError("Unexpected closing parenthesis") + + elif kind == 'QUOTE': + pos[0] += 1 + return [Symbol('quote'), parse_expr()] + + elif kind == 'STRING': + pos[0] += 1 + # Remove quotes and unescape + return value[1:-1].replace('\\"', '"').replace('\\n', '\n') + + elif kind == 'INT': + pos[0] += 1 + return int(value) + + elif kind == 'FLOAT': + pos[0] += 1 + return float(value) + + elif kind == 'BOOL': + pos[0] += 1 + return value in ('#t', 'true') + + elif kind == 'KEYWORD': + pos[0] += 1 + return Keyword(value[1:]) # Remove leading : + + elif kind == 'SYMBOL': + pos[0] += 1 + return Symbol(value) + + else: + raise SyntaxError(f"Unknown token: {kind} {value}") + + result = parse_expr() + + # Check for multiple top-level expressions + if pos[0] < len(tokens): + # Allow multiple top-level expressions, return as list + results = [result] + while pos[0] < len(tokens): + results.append(parse_expr()) + return results + + return result + + +def parse_file(path: str) -> Any: + """Parse an S-expression file.""" + with open(path, 'r') as f: + return parse(f.read()) + + +# Convenience for pretty-printing +def to_sexp(obj: Any) -> str: + """Convert Python object back to S-expression string.""" + if isinstance(obj, list): + return '(' + ' '.join(to_sexp(x) for x in obj) + ')' + elif isinstance(obj, Symbol): + return obj.name + elif isinstance(obj, Keyword): + return f':{obj.name}' + elif isinstance(obj, str): + return f'"{obj}"' + elif isinstance(obj, bool): + return '#t' if obj else '#f' + elif isinstance(obj, (int, float)): + return str(obj) + else: + return repr(obj) diff --git a/sexp_effects/primitives.py b/sexp_effects/primitives.py new file mode 100644 index 0000000..0bafa4d --- /dev/null +++ b/sexp_effects/primitives.py @@ -0,0 +1,1867 @@ +""" +Safe Primitives for S-Expression Effects + +These are the building blocks that user-defined effects can use. +All primitives operate only on image data - no filesystem, network, etc. +""" + +import numpy as np +import cv2 +from typing import Any, Callable, Dict, List, Tuple, Optional +import math + + +class DeterministicRNG: + """Seeded RNG for reproducible effects.""" + + def __init__(self, seed: int = 42): + self._rng = np.random.RandomState(seed) + + def random(self, low: float = 0, high: float = 1) -> float: + return self._rng.uniform(low, high) + + def randint(self, low: int, high: int) -> int: + return self._rng.randint(low, high + 1) + + def gaussian(self, mean: float = 0, std: float = 1) -> float: + return self._rng.normal(mean, std) + + +# Global RNG instance (reset per frame with seed param) +_rng = DeterministicRNG() + + +def reset_rng(seed: int): + """Reset the global RNG with a new seed.""" + global _rng + _rng = DeterministicRNG(seed) + + +# ============================================================================= +# Image Primitives +# ============================================================================= + +def prim_width(img: np.ndarray) -> int: + """Get image width.""" + return img.shape[1] + + +def prim_height(img: np.ndarray) -> int: + """Get image height.""" + return img.shape[0] + + +def prim_make_image(w: int, h: int, color: List[int]) -> np.ndarray: + """Create a new image filled with color.""" + img = np.zeros((int(h), int(w), 3), dtype=np.uint8) + if color: + img[:, :] = color[:3] + return img + + +def prim_copy(img: np.ndarray) -> np.ndarray: + """Copy an image.""" + return img.copy() + + +def prim_pixel(img: np.ndarray, x: int, y: int) -> List[int]: + """Get pixel at (x, y) as [r, g, b].""" + h, w = img.shape[:2] + x, y = int(x), int(y) + if 0 <= x < w and 0 <= y < h: + return list(img[y, x]) + return [0, 0, 0] + + +def prim_set_pixel(img: np.ndarray, x: int, y: int, color: List[int]) -> np.ndarray: + """Set pixel at (x, y). Returns modified image.""" + h, w = img.shape[:2] + x, y = int(x), int(y) + if 0 <= x < w and 0 <= y < h: + img[y, x] = color[:3] + return img + + +def prim_sample(img: np.ndarray, x: float, y: float) -> List[float]: + """Bilinear sample at float coordinates.""" + h, w = img.shape[:2] + x = np.clip(x, 0, w - 1) + y = np.clip(y, 0, h - 1) + + x0, y0 = int(x), int(y) + x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1) + fx, fy = x - x0, y - y0 + + c00 = img[y0, x0].astype(float) + c10 = img[y0, x1].astype(float) + c01 = img[y1, x0].astype(float) + c11 = img[y1, x1].astype(float) + + c = (c00 * (1 - fx) * (1 - fy) + + c10 * fx * (1 - fy) + + c01 * (1 - fx) * fy + + c11 * fx * fy) + + return list(c) + + +def prim_channel(img: np.ndarray, c: int) -> np.ndarray: + """Extract a single channel as 2D array.""" + return img[:, :, int(c)].copy() + + +def prim_merge_channels(r: np.ndarray, g: np.ndarray, b: np.ndarray) -> np.ndarray: + """Merge three channels into RGB image.""" + return np.stack([r, g, b], axis=-1).astype(np.uint8) + + +def prim_resize(img: np.ndarray, w: int, h: int, mode: str = "linear") -> np.ndarray: + """Resize image. Mode: linear, nearest, area.""" + w, h = int(w), int(h) + if w < 1 or h < 1: + return img + interp = { + "linear": cv2.INTER_LINEAR, + "nearest": cv2.INTER_NEAREST, + "area": cv2.INTER_AREA, + }.get(mode, cv2.INTER_LINEAR) + return cv2.resize(img, (w, h), interpolation=interp) + + +def prim_crop(img: np.ndarray, x: int, y: int, w: int, h: int) -> np.ndarray: + """Crop a region from image.""" + ih, iw = img.shape[:2] + x, y, w, h = int(x), int(y), int(w), int(h) + x = max(0, min(x, iw)) + y = max(0, min(y, ih)) + w = max(0, min(w, iw - x)) + h = max(0, min(h, ih - y)) + return img[y:y + h, x:x + w].copy() + + +def prim_paste(dst: np.ndarray, src: np.ndarray, x: int, y: int) -> np.ndarray: + """Paste src onto dst at position (x, y).""" + dh, dw = dst.shape[:2] + sh, sw = src.shape[:2] + x, y = int(x), int(y) + + # Calculate valid regions + sx1 = max(0, -x) + sy1 = max(0, -y) + sx2 = min(sw, dw - x) + sy2 = min(sh, dh - y) + + dx1 = max(0, x) + dy1 = max(0, y) + dx2 = dx1 + (sx2 - sx1) + dy2 = dy1 + (sy2 - sy1) + + if dx2 > dx1 and dy2 > dy1: + dst[dy1:dy2, dx1:dx2] = src[sy1:sy2, sx1:sx2] + + return dst + + +# ============================================================================= +# Color Primitives +# ============================================================================= + +def prim_rgb(r: float, g: float, b: float) -> List[int]: + """Create RGB color.""" + return [int(np.clip(r, 0, 255)), + int(np.clip(g, 0, 255)), + int(np.clip(b, 0, 255))] + + +def prim_red(c: List[int]) -> int: + return c[0] if c else 0 + + +def prim_green(c: List[int]) -> int: + return c[1] if len(c) > 1 else 0 + + +def prim_blue(c: List[int]) -> int: + return c[2] if len(c) > 2 else 0 + + +def prim_luminance(c: List[int]) -> float: + """Calculate luminance (grayscale value).""" + if not c: + return 0 + return 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2] + + +def prim_rgb_to_hsv(c: List[int]) -> List[float]: + """Convert RGB to HSV.""" + r, g, b = c[0] / 255, c[1] / 255, c[2] / 255 + mx, mn = max(r, g, b), min(r, g, b) + diff = mx - mn + + if diff == 0: + h = 0 + elif mx == r: + h = (60 * ((g - b) / diff) + 360) % 360 + elif mx == g: + h = (60 * ((b - r) / diff) + 120) % 360 + else: + h = (60 * ((r - g) / diff) + 240) % 360 + + s = 0 if mx == 0 else diff / mx + v = mx + + return [h, s * 100, v * 100] + + +def prim_hsv_to_rgb(hsv: List[float]) -> List[int]: + """Convert HSV to RGB.""" + h, s, v = hsv[0], hsv[1] / 100, hsv[2] / 100 + c = v * s + x = c * (1 - abs((h / 60) % 2 - 1)) + m = v - c + + if h < 60: + r, g, b = c, x, 0 + elif h < 120: + r, g, b = x, c, 0 + elif h < 180: + r, g, b = 0, c, x + elif h < 240: + r, g, b = 0, x, c + elif h < 300: + r, g, b = x, 0, c + else: + r, g, b = c, 0, x + + return [int((r + m) * 255), int((g + m) * 255), int((b + m) * 255)] + + +def prim_blend_color(c1: List[int], c2: List[int], alpha: float) -> List[int]: + """Blend two colors.""" + alpha = np.clip(alpha, 0, 1) + return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)] + + +def prim_average_color(img: np.ndarray) -> List[int]: + """Get average color of image/region.""" + return [int(x) for x in img.mean(axis=(0, 1))] + + +# ============================================================================= +# Image Operations (Bulk) +# ============================================================================= + +def prim_map_pixels(img: np.ndarray, fn: Callable) -> np.ndarray: + """Apply function to each pixel: fn(x, y, [r,g,b]) -> [r,g,b].""" + result = img.copy() + h, w = img.shape[:2] + for y in range(h): + for x in range(w): + color = list(img[y, x]) + new_color = fn(x, y, color) + if new_color is not None: + result[y, x] = new_color[:3] + return result + + +def prim_map_rows(img: np.ndarray, fn: Callable) -> np.ndarray: + """Apply function to each row: fn(y, row) -> row.""" + result = img.copy() + h = img.shape[0] + for y in range(h): + row = img[y].copy() + new_row = fn(y, row) + if new_row is not None: + result[y] = new_row + return result + + +def prim_for_grid(img: np.ndarray, cell_size: int, fn: Callable) -> np.ndarray: + """Iterate over grid cells: fn(gx, gy, cell_img) for side effects.""" + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + for gy in range(rows): + for gx in range(cols): + y, x = gy * cell_size, gx * cell_size + cell = img[y:y + cell_size, x:x + cell_size] + fn(gx, gy, cell) + + return img + + +def prim_fold_pixels(img: np.ndarray, init: Any, fn: Callable) -> Any: + """Fold over pixels: fn(acc, x, y, color) -> acc.""" + acc = init + h, w = img.shape[:2] + for y in range(h): + for x in range(w): + color = list(img[y, x]) + acc = fn(acc, x, y, color) + return acc + + +# ============================================================================= +# Convolution / Filters +# ============================================================================= + +def prim_convolve(img: np.ndarray, kernel: List[List[float]]) -> np.ndarray: + """Apply convolution kernel.""" + k = np.array(kernel, dtype=np.float32) + return cv2.filter2D(img, -1, k) + + +def prim_blur(img: np.ndarray, radius: int) -> np.ndarray: + """Gaussian blur.""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.GaussianBlur(img, (ksize, ksize), 0) + + +def prim_box_blur(img: np.ndarray, radius: int) -> np.ndarray: + """Box blur (faster than Gaussian).""" + radius = max(1, int(radius)) + ksize = radius * 2 + 1 + return cv2.blur(img, (ksize, ksize)) + + +def prim_edges(img: np.ndarray, low: int = 50, high: int = 150) -> np.ndarray: + """Canny edge detection, returns grayscale edges.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) + edges = cv2.Canny(gray, int(low), int(high)) + return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) + + +def prim_sobel(img: np.ndarray) -> np.ndarray: + """Sobel edge detection.""" + gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).astype(np.float32) + sx = cv2.Sobel(gray, cv2.CV_32F, 1, 0) + sy = cv2.Sobel(gray, cv2.CV_32F, 0, 1) + magnitude = np.sqrt(sx ** 2 + sy ** 2) + magnitude = np.clip(magnitude, 0, 255).astype(np.uint8) + return cv2.cvtColor(magnitude, cv2.COLOR_GRAY2RGB) + + +def prim_dilate(img: np.ndarray, size: int = 1) -> np.ndarray: + """Morphological dilation.""" + kernel = np.ones((size, size), np.uint8) + return cv2.dilate(img, kernel, iterations=1) + + +def prim_erode(img: np.ndarray, size: int = 1) -> np.ndarray: + """Morphological erosion.""" + kernel = np.ones((size, size), np.uint8) + return cv2.erode(img, kernel, iterations=1) + + +# ============================================================================= +# Geometric Transforms +# ============================================================================= + +def prim_translate(img: np.ndarray, dx: float, dy: float) -> np.ndarray: + """Translate image.""" + h, w = img.shape[:2] + M = np.float32([[1, 0, dx], [0, 1, dy]]) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_rotate(img: np.ndarray, angle: float, cx: float = None, cy: float = None) -> np.ndarray: + """Rotate image around center.""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_scale(img: np.ndarray, sx: float, sy: float, cx: float = None, cy: float = None) -> np.ndarray: + """Scale image around center.""" + h, w = img.shape[:2] + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + M = np.float32([ + [sx, 0, cx * (1 - sx)], + [0, sy, cy * (1 - sy)] + ]) + return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT) + + +def prim_flip_h(img: np.ndarray) -> np.ndarray: + """Flip horizontally.""" + return cv2.flip(img, 1) + + +def prim_flip_v(img: np.ndarray) -> np.ndarray: + """Flip vertically.""" + return cv2.flip(img, 0) + + +def prim_remap(img: np.ndarray, map_x: np.ndarray, map_y: np.ndarray) -> np.ndarray: + """Remap using coordinate maps.""" + return cv2.remap(img, map_x.astype(np.float32), map_y.astype(np.float32), + cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT) + + +def prim_make_coords(w: int, h: int) -> Tuple[np.ndarray, np.ndarray]: + """Create coordinate grid (map_x, map_y).""" + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + return map_x, map_y + + +# ============================================================================= +# Blending +# ============================================================================= + +def prim_blend_images(a: np.ndarray, b: np.ndarray, alpha: float) -> np.ndarray: + """Blend two images. Auto-resizes b to match a if sizes differ.""" + alpha = np.clip(alpha, 0, 1) + # Auto-resize b to match a if different sizes + if a.shape[:2] != b.shape[:2]: + b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR) + return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8) + + +def prim_blend_mode(a: np.ndarray, b: np.ndarray, mode: str) -> np.ndarray: + """Blend with various modes: add, multiply, screen, overlay, difference. + Auto-resizes b to match a if sizes differ.""" + # Auto-resize b to match a if different sizes + if a.shape[:2] != b.shape[:2]: + b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR) + af = a.astype(float) / 255 + bf = b.astype(float) / 255 + + if mode == "add": + result = af + bf + elif mode == "multiply": + result = af * bf + elif mode == "screen": + result = 1 - (1 - af) * (1 - bf) + elif mode == "overlay": + mask = af < 0.5 + result = np.where(mask, 2 * af * bf, 1 - 2 * (1 - af) * (1 - bf)) + elif mode == "difference": + result = np.abs(af - bf) + elif mode == "lighten": + result = np.maximum(af, bf) + elif mode == "darken": + result = np.minimum(af, bf) + else: + result = af + + return (np.clip(result, 0, 1) * 255).astype(np.uint8) + + +def prim_mask(img: np.ndarray, mask_img: np.ndarray) -> np.ndarray: + """Apply grayscale mask to image.""" + if len(mask_img.shape) == 3: + mask = cv2.cvtColor(mask_img, cv2.COLOR_RGB2GRAY) + else: + mask = mask_img + mask_f = mask.astype(float) / 255 + result = img.astype(float) * mask_f[:, :, np.newaxis] + return result.astype(np.uint8) + + +# ============================================================================= +# Drawing +# ============================================================================= + +# Simple font (5x7 bitmap characters) +FONT_5X7 = { + ' ': [0, 0, 0, 0, 0, 0, 0], + '.': [0, 0, 0, 0, 0, 0, 4], + ':': [0, 0, 4, 0, 4, 0, 0], + '-': [0, 0, 0, 14, 0, 0, 0], + '=': [0, 0, 14, 0, 14, 0, 0], + '+': [0, 4, 4, 31, 4, 4, 0], + '*': [0, 4, 21, 14, 21, 4, 0], + '#': [10, 31, 10, 10, 31, 10, 0], + '%': [19, 19, 4, 8, 25, 25, 0], + '@': [14, 17, 23, 21, 23, 16, 14], + '0': [14, 17, 19, 21, 25, 17, 14], + '1': [4, 12, 4, 4, 4, 4, 14], + '2': [14, 17, 1, 2, 4, 8, 31], + '3': [31, 2, 4, 2, 1, 17, 14], + '4': [2, 6, 10, 18, 31, 2, 2], + '5': [31, 16, 30, 1, 1, 17, 14], + '6': [6, 8, 16, 30, 17, 17, 14], + '7': [31, 1, 2, 4, 8, 8, 8], + '8': [14, 17, 17, 14, 17, 17, 14], + '9': [14, 17, 17, 15, 1, 2, 12], +} + +# Add uppercase letters +for i, c in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ'): + FONT_5X7[c] = [0] * 7 # Placeholder + + +def prim_draw_char(img: np.ndarray, char: str, x: int, y: int, + size: int, color: List[int]) -> np.ndarray: + """Draw a character at position.""" + # Use OpenCV's built-in font for simplicity + font = cv2.FONT_HERSHEY_SIMPLEX + scale = size / 20.0 + thickness = max(1, int(size / 10)) + cv2.putText(img, char, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness) + return img + + +def prim_draw_text(img: np.ndarray, text: str, x: int, y: int, + size: int, color: List[int]) -> np.ndarray: + """Draw text at position.""" + font = cv2.FONT_HERSHEY_SIMPLEX + scale = size / 20.0 + thickness = max(1, int(size / 10)) + cv2.putText(img, text, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness) + return img + + +def prim_fill_rect(img: np.ndarray, x: int, y: int, w: int, h: int, + color: List[int]) -> np.ndarray: + """Fill rectangle.""" + x, y, w, h = int(x), int(y), int(w), int(h) + img[y:y + h, x:x + w] = color[:3] + return img + + +def prim_draw_line(img: np.ndarray, x1: int, y1: int, x2: int, y2: int, + color: List[int], thickness: int = 1) -> np.ndarray: + """Draw line.""" + cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), tuple(color[:3]), int(thickness)) + return img + + +# ============================================================================= +# Math Primitives +# ============================================================================= + +def prim_sin(x: float) -> float: + return math.sin(x) + + +def prim_cos(x: float) -> float: + return math.cos(x) + + +def prim_tan(x: float) -> float: + return math.tan(x) + + +def prim_atan2(y: float, x: float) -> float: + return math.atan2(y, x) + + +def prim_sqrt(x: float) -> float: + return math.sqrt(max(0, x)) + + +def prim_pow(x: float, y: float) -> float: + return math.pow(x, y) + + +def prim_abs(x: float) -> float: + return abs(x) + + +def prim_floor(x: float) -> int: + return int(math.floor(x)) + + +def prim_ceil(x: float) -> int: + return int(math.ceil(x)) + + +def prim_round(x: float) -> int: + return int(round(x)) + + +def prim_min(*args) -> float: + return min(args) + + +def prim_max(*args) -> float: + return max(args) + + +def prim_clamp(x: float, lo: float, hi: float) -> float: + return max(lo, min(hi, x)) + + +def prim_lerp(a: float, b: float, t: float) -> float: + """Linear interpolation.""" + return a + (b - a) * t + + +def prim_mod(a: float, b: float) -> float: + return a % b + + +def prim_random(lo: float = 0, hi: float = 1) -> float: + """Random number from global RNG.""" + return _rng.random(lo, hi) + + +def prim_randint(lo: int, hi: int) -> int: + """Random integer from global RNG.""" + return _rng.randint(lo, hi) + + +def prim_gaussian(mean: float = 0, std: float = 1) -> float: + """Gaussian random from global RNG.""" + return _rng.gaussian(mean, std) + + +def prim_assert(condition, message: str = "Assertion failed"): + """Assert that condition is true, raise error with message if false.""" + if not condition: + raise RuntimeError(f"Assertion error: {message}") + return True + + +# ============================================================================= +# Array/List Primitives +# ============================================================================= + +def prim_length(seq) -> int: + return len(seq) + + +def prim_nth(seq, i: int): + i = int(i) + if 0 <= i < len(seq): + return seq[i] + return None + + +def prim_first(seq): + return seq[0] if seq else None + + +def prim_rest(seq): + return seq[1:] if seq else [] + + +def prim_take(seq, n: int): + return seq[:int(n)] + + +def prim_drop(seq, n: int): + return seq[int(n):] + + +def prim_cons(x, seq): + return [x] + list(seq) + + +def prim_append(*seqs): + result = [] + for s in seqs: + result.extend(s) + return result + + +def prim_reverse(seq): + return list(reversed(seq)) + + +def prim_range(start: int, end: int, step: int = 1) -> List[int]: + return list(range(int(start), int(end), int(step))) + + +def prim_roll(arr: np.ndarray, shift: int, axis: int = 0) -> np.ndarray: + """Circular roll of array.""" + return np.roll(arr, int(shift), axis=int(axis)) + + +def prim_list(*args) -> list: + """Create a list.""" + return list(args) + + +# ============================================================================= +# Primitive Registry +# ============================================================================= + +def prim_add(*args): + return sum(args) + +def prim_sub(a, b=None): + if b is None: + return -a # Unary negation + return a - b + +def prim_mul(*args): + result = 1 + for x in args: + result *= x + return result + +def prim_div(a, b): + return a / b if b != 0 else 0 + +def prim_lt(a, b): + return a < b + +def prim_gt(a, b): + return a > b + +def prim_le(a, b): + return a <= b + +def prim_ge(a, b): + return a >= b + +def prim_eq(a, b): + # Handle None/nil comparisons with numpy arrays + if a is None: + return b is None + if b is None: + return a is None + if isinstance(a, np.ndarray) or isinstance(b, np.ndarray): + if isinstance(a, np.ndarray) and isinstance(b, np.ndarray): + return np.array_equal(a, b) + return False # array vs non-array + return a == b + +def prim_ne(a, b): + return not prim_eq(a, b) + + +# ============================================================================= +# Vectorized Bulk Operations (true primitives for composing effects) +# ============================================================================= + +def prim_color_matrix(img: np.ndarray, matrix: List[List[float]]) -> np.ndarray: + """Apply a 3x3 color transformation matrix to all pixels.""" + m = np.array(matrix, dtype=np.float32) + result = img.astype(np.float32) @ m.T + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_adjust(img: np.ndarray, brightness: float = 0, contrast: float = 1) -> np.ndarray: + """Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+.""" + result = (img.astype(np.float32) - 128) * contrast + 128 + brightness + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_mix_gray(img: np.ndarray, amount: float) -> np.ndarray: + """Mix image with its grayscale version. 0=original, 1=grayscale.""" + gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2] + gray_rgb = np.stack([gray, gray, gray], axis=-1) + result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_invert_img(img: np.ndarray) -> np.ndarray: + """Invert all pixel values.""" + return (255 - img).astype(np.uint8) + + +def prim_add_noise(img: np.ndarray, amount: float) -> np.ndarray: + """Add gaussian noise to image.""" + noise = _rng._rng.normal(0, amount, img.shape) + result = img.astype(np.float32) + noise + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_quantize(img: np.ndarray, levels: int) -> np.ndarray: + """Reduce to N color levels per channel.""" + levels = max(2, int(levels)) + factor = 256 / levels + result = (img // factor) * factor + factor // 2 + return np.clip(result, 0, 255).astype(np.uint8) + + +def prim_shift_hsv(img: np.ndarray, h: float = 0, s: float = 1, v: float = 1) -> np.ndarray: + """Shift HSV: h=degrees offset, s/v=multipliers.""" + hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32) + hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180 + hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255) + hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255) + return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB) + + +# ============================================================================= +# Array Math Primitives (vectorized operations on coordinate arrays) +# ============================================================================= + +def prim_arr_add(a: np.ndarray, b) -> np.ndarray: + """Element-wise addition. b can be array or scalar.""" + return (np.asarray(a) + np.asarray(b)).astype(np.float32) + + +def prim_arr_sub(a: np.ndarray, b) -> np.ndarray: + """Element-wise subtraction. b can be array or scalar.""" + return (np.asarray(a) - np.asarray(b)).astype(np.float32) + + +def prim_arr_mul(a: np.ndarray, b) -> np.ndarray: + """Element-wise multiplication. b can be array or scalar.""" + return (np.asarray(a) * np.asarray(b)).astype(np.float32) + + +def prim_arr_div(a: np.ndarray, b) -> np.ndarray: + """Element-wise division. b can be array or scalar.""" + b = np.asarray(b) + # Avoid division by zero + with np.errstate(divide='ignore', invalid='ignore'): + result = np.asarray(a) / np.where(b == 0, 1e-10, b) + return result.astype(np.float32) + + +def prim_arr_mod(a: np.ndarray, b) -> np.ndarray: + """Element-wise modulo.""" + return (np.asarray(a) % np.asarray(b)).astype(np.float32) + + +def prim_arr_sin(a: np.ndarray) -> np.ndarray: + """Element-wise sine.""" + return np.sin(np.asarray(a)).astype(np.float32) + + +def prim_arr_cos(a: np.ndarray) -> np.ndarray: + """Element-wise cosine.""" + return np.cos(np.asarray(a)).astype(np.float32) + + +def prim_arr_tan(a: np.ndarray) -> np.ndarray: + """Element-wise tangent.""" + return np.tan(np.asarray(a)).astype(np.float32) + + +def prim_arr_sqrt(a: np.ndarray) -> np.ndarray: + """Element-wise square root.""" + return np.sqrt(np.maximum(0, np.asarray(a))).astype(np.float32) + + +def prim_arr_pow(a: np.ndarray, b) -> np.ndarray: + """Element-wise power.""" + return np.power(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_abs(a: np.ndarray) -> np.ndarray: + """Element-wise absolute value.""" + return np.abs(np.asarray(a)).astype(np.float32) + + +def prim_arr_neg(a: np.ndarray) -> np.ndarray: + """Element-wise negation.""" + return (-np.asarray(a)).astype(np.float32) + + +def prim_arr_exp(a: np.ndarray) -> np.ndarray: + """Element-wise exponential.""" + return np.exp(np.asarray(a)).astype(np.float32) + + +def prim_arr_atan2(y: np.ndarray, x: np.ndarray) -> np.ndarray: + """Element-wise atan2(y, x).""" + return np.arctan2(np.asarray(y), np.asarray(x)).astype(np.float32) + + +def prim_arr_min(a: np.ndarray, b) -> np.ndarray: + """Element-wise minimum.""" + return np.minimum(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_max(a: np.ndarray, b) -> np.ndarray: + """Element-wise maximum.""" + return np.maximum(np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_clip(a: np.ndarray, lo, hi) -> np.ndarray: + """Element-wise clip to range.""" + return np.clip(np.asarray(a), lo, hi).astype(np.float32) + + +def prim_arr_where(cond: np.ndarray, a, b) -> np.ndarray: + """Element-wise conditional: where cond is true, use a, else b.""" + return np.where(np.asarray(cond), np.asarray(a), np.asarray(b)).astype(np.float32) + + +def prim_arr_floor(a: np.ndarray) -> np.ndarray: + """Element-wise floor.""" + return np.floor(np.asarray(a)).astype(np.float32) + + +def prim_arr_lerp(a: np.ndarray, b: np.ndarray, t) -> np.ndarray: + """Element-wise linear interpolation.""" + a, b = np.asarray(a), np.asarray(b) + return (a + (b - a) * t).astype(np.float32) + + +# ============================================================================= +# Coordinate Transformation Primitives +# ============================================================================= + +def prim_polar_from_center(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create polar coordinates (r, theta) from image center. + + Usage: + (polar-from-center img) ; center of image + (polar-from-center img cx cy) ; custom center + (polar-from-center w h cx cy) ; explicit dimensions + + Returns: (r, theta) tuple of arrays + """ + if isinstance(img_or_w, np.ndarray): + h, w = img_or_w.shape[:2] + if h_or_cx is None: + cx, cy = w / 2, h / 2 + else: + cx, cy = h_or_cx, cx if cx is not None else h / 2 + else: + w = int(img_or_w) + h = int(h_or_cx) + cx = cx if cx is not None else w / 2 + cy = cy if cy is not None else h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + r = np.sqrt(dx**2 + dy**2) + theta = np.arctan2(dy, dx) + + return (r, theta) + + +def prim_cart_from_polar(r: np.ndarray, theta: np.ndarray, cx: float, cy: float) -> Tuple[np.ndarray, np.ndarray]: + """ + Convert polar coordinates back to Cartesian. + + Args: + r: radius array + theta: angle array + cx, cy: center point + + Returns: (x, y) tuple of coordinate arrays + """ + x = (cx + r * np.cos(theta)).astype(np.float32) + y = (cy + r * np.sin(theta)).astype(np.float32) + return (x, y) + + +def prim_normalize_coords(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create normalized coordinates (-1 to 1) from center. + + Returns: (x_norm, y_norm) tuple of arrays where center is (0,0) + """ + if isinstance(img_or_w, np.ndarray): + h, w = img_or_w.shape[:2] + if h_or_cx is None: + cx, cy = w / 2, h / 2 + else: + cx, cy = h_or_cx, cx if cx is not None else h / 2 + else: + w = int(img_or_w) + h = int(h_or_cx) + cx = cx if cx is not None else w / 2 + cy = cy if cy is not None else h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + + return (x_norm, y_norm) + + +def prim_coords_x(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray: + """Get x/first component from coordinate tuple.""" + return coords[0] + + +def prim_coords_y(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray: + """Get y/second component from coordinate tuple.""" + return coords[1] + + +def prim_make_coords_centered(w: int, h: int, cx: float = None, cy: float = None) -> Tuple[np.ndarray, np.ndarray]: + """ + Create coordinate grids centered at (cx, cy). + Like make-coords but returns coordinates relative to center. + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + return (x_coords - cx, y_coords - cy) + + +# ============================================================================= +# Specialized Distortion Primitives +# ============================================================================= + +def prim_wave_displace(w: int, h: int, axis: str, freq: float, amp: float, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create wave displacement maps. + + Args: + w, h: dimensions + axis: "x" (horizontal waves) or "y" (vertical waves) + freq: wave frequency (waves per image width/height) + amp: wave amplitude in pixels + phase: phase offset in radians + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1)) + map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w)) + + if axis == "x" or axis == "horizontal": + # Horizontal waves: displace x based on y + wave = np.sin(2 * np.pi * freq * map_y / h + phase) * amp + map_x = map_x + wave + elif axis == "y" or axis == "vertical": + # Vertical waves: displace y based on x + wave = np.sin(2 * np.pi * freq * map_x / w + phase) * amp + map_y = map_y + wave + elif axis == "both": + wave_x = np.sin(2 * np.pi * freq * map_y / h + phase) * amp + wave_y = np.sin(2 * np.pi * freq * map_x / w + phase) * amp + map_x = map_x + wave_x + map_y = map_y + wave_y + + return (map_x, map_y) + + +def prim_swirl_displace(w: int, h: int, strength: float, radius: float = 0.5, + cx: float = None, cy: float = None, falloff: str = "quadratic") -> Tuple[np.ndarray, np.ndarray]: + """ + Create swirl displacement maps. + + Args: + w, h: dimensions + strength: swirl strength in radians + radius: effect radius as fraction of max dimension + cx, cy: center (defaults to image center) + falloff: "linear", "quadratic", or "gaussian" + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + radius_px = max(w, h) * radius + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + angle = np.arctan2(dy, dx) + + # Normalized distance for falloff + norm_dist = dist / radius_px + + # Calculate falloff factor + if falloff == "linear": + factor = np.maximum(0, 1 - norm_dist) + elif falloff == "gaussian": + factor = np.exp(-norm_dist**2 * 2) + else: # quadratic + factor = np.maximum(0, 1 - norm_dist**2) + + # Apply swirl rotation + new_angle = angle + strength * factor + + # Calculate new coordinates + map_x = (cx + dist * np.cos(new_angle)).astype(np.float32) + map_y = (cy + dist * np.sin(new_angle)).astype(np.float32) + + return (map_x, map_y) + + +def prim_fisheye_displace(w: int, h: int, strength: float, cx: float = None, cy: float = None, + zoom_correct: bool = True) -> Tuple[np.ndarray, np.ndarray]: + """ + Create fisheye/barrel distortion displacement maps. + + Args: + w, h: dimensions + strength: distortion strength (-1 to 1, positive=bulge, negative=pinch) + cx, cy: center (defaults to image center) + zoom_correct: auto-zoom to hide black edges + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Normalize coordinates + x_norm = (x_coords - cx) / (w / 2) + y_norm = (y_coords - cy) / (h / 2) + r = np.sqrt(x_norm**2 + y_norm**2) + + # Apply barrel/pincushion distortion + if strength > 0: + r_distorted = r * (1 + strength * r**2) + else: + r_distorted = r / (1 - strength * r**2 + 0.001) + + # Calculate scale factor + with np.errstate(divide='ignore', invalid='ignore'): + scale = np.where(r > 0, r_distorted / r, 1) + + # Apply zoom correction + if zoom_correct and strength > 0: + zoom = 1 + strength * 0.5 + scale = scale / zoom + + # Calculate new coordinates + map_x = (x_norm * scale * (w / 2) + cx).astype(np.float32) + map_y = (y_norm * scale * (h / 2) + cy).astype(np.float32) + + return (map_x, map_y) + + +def prim_kaleidoscope_displace(w: int, h: int, segments: int, rotation: float = 0, + cx: float = None, cy: float = None, zoom: float = 1.0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create kaleidoscope displacement maps. + + Args: + w, h: dimensions + segments: number of symmetry segments (3-16) + rotation: rotation angle in degrees + cx, cy: center (defaults to image center) + zoom: zoom factor + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + segments = max(3, min(int(segments), 16)) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + segment_angle = 2 * np.pi / segments + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + + # Translate to center + x_centered = x_coords - cx + y_centered = y_coords - cy + + # Convert to polar + r = np.sqrt(x_centered**2 + y_centered**2) + theta = np.arctan2(y_centered, x_centered) + + # Apply rotation + theta = theta - np.deg2rad(rotation) + + # Fold angle into first segment and mirror + theta_normalized = theta % (2 * np.pi) + segment_idx = (theta_normalized / segment_angle).astype(int) + theta_in_segment = theta_normalized - segment_idx * segment_angle + + # Mirror alternating segments + mirror_mask = (segment_idx % 2) == 1 + theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment) + + # Apply zoom + r = r / zoom + + # Convert back to Cartesian + map_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32) + map_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32) + + return (map_x, map_y) + + +# ============================================================================= +# Character/ASCII Art Primitives +# ============================================================================= + +# Character sets ordered by visual density (light to dark) +CHAR_ALPHABETS = { + "standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$", + "blocks": " ░▒▓█", + "simple": " .-:=+*#%@", + "digits": " 0123456789", +} + +# Global atlas cache +_char_atlas_cache = {} + + +def _get_char_atlas(alphabet: str, cell_size: int) -> dict: + """Get or create character atlas for alphabet.""" + cache_key = f"{alphabet}_{cell_size}" + if cache_key in _char_atlas_cache: + return _char_atlas_cache[cache_key] + + chars = CHAR_ALPHABETS.get(alphabet, alphabet) # Use as literal if not found + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + atlas = {} + for char in chars: + char_img = np.zeros((cell_size, cell_size), dtype=np.uint8) + if char != ' ': + try: + (text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness) + text_x = max(0, (cell_size - text_w) // 2) + text_y = (cell_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA) + except: + pass + atlas[char] = char_img + + _char_atlas_cache[cache_key] = atlas + return atlas + + +def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]: + """ + Sample image into cell grid, returning average colors and luminances. + + Args: + img: source image + cell_size: size of each cell in pixels + + Returns: (colors, luminances) tuple + - colors: (rows, cols, 3) array of average RGB per cell + - luminances: (rows, cols) array of average brightness 0-255 + """ + cell_size = max(1, int(cell_size)) + h, w = img.shape[:2] + rows = h // cell_size + cols = w // cell_size + + if rows < 1 or cols < 1: + return (np.zeros((1, 1, 3), dtype=np.uint8), + np.zeros((1, 1), dtype=np.float32)) + + # Crop to grid + grid_h, grid_w = rows * cell_size, cols * cell_size + cropped = img[:grid_h, :grid_w] + + # Reshape and average + reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3) + colors = reshaped.mean(axis=(1, 3)).astype(np.uint8) + + # Compute luminance + luminances = (0.299 * colors[:, :, 0] + + 0.587 * colors[:, :, 1] + + 0.114 * colors[:, :, 2]).astype(np.float32) + + return (colors, luminances) + + +def prim_luminance_to_chars(luminances: np.ndarray, alphabet: str, contrast: float = 1.0) -> List[List[str]]: + """ + Map luminance values to characters from alphabet. + + Args: + luminances: (rows, cols) array of brightness values 0-255 + alphabet: character set name or literal string (light to dark) + contrast: contrast boost factor + + Returns: 2D list of single-character strings + """ + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + num_chars = len(chars) + + # Apply contrast + lum = luminances.astype(np.float32) + if contrast != 1.0: + lum = (lum - 128) * contrast + 128 + lum = np.clip(lum, 0, 255) + + # Map to indices + indices = ((lum / 255) * (num_chars - 1)).astype(np.int32) + indices = np.clip(indices, 0, num_chars - 1) + + # Convert to character array + rows, cols = indices.shape + result = [] + for r in range(rows): + row = [] + for c in range(cols): + row.append(chars[indices[r, c]]) + result.append(row) + + return result + + +def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray, + cell_size: int, color_mode: str = "color", + background: List[int] = None) -> np.ndarray: + """ + Render a grid of characters onto an image. + + Args: + img: source image (for dimensions) + chars: 2D list of single characters + colors: (rows, cols, 3) array of colors per cell + cell_size: size of each cell + color_mode: "color", "mono", or "invert" + background: RGB background color + + Returns: rendered image + """ + cell_size = max(1, int(cell_size)) + + if not chars or not chars[0]: + return img.copy() + + rows = len(chars) + cols = len(chars[0]) + h, w = rows * cell_size, cols * cell_size + + # Default background + if background is None: + background = [0, 0, 0] + bg = list(background)[:3] + + result = np.full((h, w, 3), bg, dtype=np.uint8) + + # Collect all unique characters to build minimal atlas + unique_chars = set() + for row in chars: + for ch in row: + unique_chars.add(ch) + + # Build atlas for unique chars + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = cell_size / 20.0 + thickness = max(1, int(cell_size / 10)) + + atlas = {} + for char in unique_chars: + char_img = np.zeros((cell_size, cell_size), dtype=np.uint8) + if char and char != ' ': + try: + (text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness) + text_x = max(0, (cell_size - text_w) // 2) + text_y = (cell_size + text_h) // 2 + cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA) + except: + pass + atlas[char] = char_img + + # Render characters + for r in range(rows): + for c in range(cols): + char = chars[r][c] + if not char or char == ' ': + continue + + y1, x1 = r * cell_size, c * cell_size + char_mask = atlas.get(char) + + if char_mask is None: + continue + + if color_mode == "mono": + color = np.array([255, 255, 255], dtype=np.uint8) + elif color_mode == "invert": + result[y1:y1+cell_size, x1:x1+cell_size] = colors[r, c] + color = np.array([0, 0, 0], dtype=np.uint8) + else: # color + color = colors[r, c] + + mask = char_mask > 0 + result[y1:y1+cell_size, x1:x1+cell_size][mask] = color + + # Resize to match original if needed + orig_h, orig_w = img.shape[:2] + if result.shape[0] != orig_h or result.shape[1] != orig_w: + padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8) + copy_h = min(h, orig_h) + copy_w = min(w, orig_w) + padded[:copy_h, :copy_w] = result[:copy_h, :copy_w] + result = padded + + return result + + +def prim_make_char_grid(rows: int, cols: int, fill_char: str = " ") -> List[List[str]]: + """Create a character grid filled with a character.""" + return [[fill_char for _ in range(cols)] for _ in range(rows)] + + +def prim_set_char(chars: List[List[str]], row: int, col: int, char: str) -> List[List[str]]: + """Set a character at position (returns modified copy).""" + result = [r[:] for r in chars] # shallow copy rows + if 0 <= row < len(result) and 0 <= col < len(result[0]): + result[row][col] = char + return result + + +def prim_get_char(chars: List[List[str]], row: int, col: int) -> str: + """Get character at position.""" + if 0 <= row < len(chars) and 0 <= col < len(chars[0]): + return chars[row][col] + return " " + + +def prim_char_grid_dimensions(chars: List[List[str]]) -> Tuple[int, int]: + """Get (rows, cols) of character grid.""" + if not chars: + return (0, 0) + return (len(chars), len(chars[0]) if chars[0] else 0) + + +def prim_alphabet_char(alphabet: str, index: int) -> str: + """Get character at index from alphabet (wraps around).""" + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + if not chars: + return " " + return chars[int(index) % len(chars)] + + +def prim_alphabet_length(alphabet: str) -> int: + """Get length of alphabet.""" + chars = CHAR_ALPHABETS.get(alphabet, alphabet) + return len(chars) + + +def prim_map_char_grid(chars: List[List[str]], luminances: np.ndarray, fn: Callable) -> List[List[str]]: + """ + Map a function over character grid. + + fn receives (row, col, char, luminance) and returns new character. + This allows per-cell character selection based on position, brightness, etc. + + Example: + (map-char-grid chars luminances + (lambda (r c ch lum) + (if (> lum 128) + (alphabet-char "blocks" (floor (/ lum 50))) + ch))) + """ + if not chars or not chars[0]: + return chars + + rows = len(chars) + cols = len(chars[0]) + result = [] + + for r in range(rows): + row = [] + for c in range(cols): + ch = chars[r][c] + lum = float(luminances[r, c]) if r < luminances.shape[0] and c < luminances.shape[1] else 0 + new_ch = fn(r, c, ch, lum) + row.append(str(new_ch) if new_ch else " ") + result.append(row) + + return result + + +def prim_map_colors(colors: np.ndarray, fn: Callable) -> np.ndarray: + """ + Map a function over color grid. + + fn receives (row, col, color) and returns new [r, g, b]. + Color is a list [r, g, b]. + """ + if colors.size == 0: + return colors + + rows, cols = colors.shape[:2] + result = colors.copy() + + for r in range(rows): + for c in range(cols): + color = list(colors[r, c]) + new_color = fn(r, c, color) + if new_color is not None: + result[r, c] = new_color[:3] + + return result + + +# ============================================================================= +# Glitch Art Primitives +# ============================================================================= + +def prim_pixelsort(img: np.ndarray, sort_by: str = "lightness", + threshold_low: float = 50, threshold_high: float = 200, + angle: float = 0, reverse: bool = False) -> np.ndarray: + """ + Pixel sorting glitch effect. + + Args: + img: source image + sort_by: "lightness", "hue", "saturation", "red", "green", "blue" + threshold_low: pixels below this aren't sorted + threshold_high: pixels above this aren't sorted + angle: 0 = horizontal, 90 = vertical + reverse: reverse sort order + """ + h, w = img.shape[:2] + + # Rotate for vertical sorting + if 45 <= (angle % 180) <= 135: + frame = np.transpose(img, (1, 0, 2)) + h, w = frame.shape[:2] + rotated = True + else: + frame = img + rotated = False + + result = frame.copy() + + # Get sort values + if sort_by == "lightness": + sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + elif sort_by == "hue": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + sort_values = hsv[:, :, 0].astype(np.float32) + elif sort_by == "saturation": + hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV) + sort_values = hsv[:, :, 1].astype(np.float32) + elif sort_by == "red": + sort_values = frame[:, :, 0].astype(np.float32) + elif sort_by == "green": + sort_values = frame[:, :, 1].astype(np.float32) + elif sort_by == "blue": + sort_values = frame[:, :, 2].astype(np.float32) + else: + sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32) + + # Create mask + mask = (sort_values >= threshold_low) & (sort_values <= threshold_high) + + # Sort each row + for y in range(h): + row = result[y].copy() + row_mask = mask[y] + row_values = sort_values[y] + + # Find contiguous segments + segments = [] + start = None + for i, val in enumerate(row_mask): + if val and start is None: + start = i + elif not val and start is not None: + segments.append((start, i)) + start = None + if start is not None: + segments.append((start, len(row_mask))) + + # Sort each segment + for seg_start, seg_end in segments: + if seg_end - seg_start > 1: + segment_values = row_values[seg_start:seg_end] + sort_indices = np.argsort(segment_values) + if reverse: + sort_indices = sort_indices[::-1] + row[seg_start:seg_end] = row[seg_start:seg_end][sort_indices] + + result[y] = row + + # Rotate back + if rotated: + result = np.transpose(result, (1, 0, 2)) + + return np.ascontiguousarray(result) + + +def prim_datamosh(img: np.ndarray, prev_frame: np.ndarray, + block_size: int = 32, corruption: float = 0.3, + max_offset: int = 50, color_corrupt: bool = True) -> np.ndarray: + """ + Datamosh/glitch block corruption effect. + + Args: + img: current frame + prev_frame: previous frame (or None) + block_size: size of corruption blocks + corruption: probability 0-1 of corrupting each block + max_offset: maximum pixel shift + color_corrupt: also apply color channel shifts + """ + if corruption <= 0: + return img.copy() + + block_size = max(8, min(int(block_size), 128)) + h, w = img.shape[:2] + result = img.copy() + + for by in range(0, h, block_size): + for bx in range(0, w, block_size): + bh = min(block_size, h - by) + bw = min(block_size, w - bx) + + if _rng.random() < corruption: + corruption_type = _rng.randint(0, 3) + + if corruption_type == 0 and max_offset > 0: + # Shift + ox = _rng.randint(-max_offset, max_offset) + oy = _rng.randint(-max_offset, max_offset) + src_x = max(0, min(bx + ox, w - bw)) + src_y = max(0, min(by + oy, h - bh)) + result[by:by+bh, bx:bx+bw] = img[src_y:src_y+bh, src_x:src_x+bw] + + elif corruption_type == 1 and prev_frame is not None: + # Duplicate from previous frame + if prev_frame.shape == img.shape: + result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw] + + elif corruption_type == 2 and color_corrupt: + # Color channel shift + block = result[by:by+bh, bx:bx+bw].copy() + shift = _rng.randint(1, 3) + channel = _rng.randint(0, 2) + block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0) + result[by:by+bh, bx:bx+bw] = block + + else: + # Swap with another block + other_bx = _rng.randint(0, max(0, w - bw)) + other_by = _rng.randint(0, max(0, h - bh)) + temp = result[by:by+bh, bx:bx+bw].copy() + result[by:by+bh, bx:bx+bw] = img[other_by:other_by+bh, other_bx:other_bx+bw] + result[other_by:other_by+bh, other_bx:other_bx+bw] = temp + + return result + + +def prim_ripple_displace(w: int, h: int, freq: float, amp: float, cx: float = None, cy: float = None, + decay: float = 0, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]: + """ + Create radial ripple displacement maps. + + Args: + w, h: dimensions + freq: ripple frequency + amp: ripple amplitude in pixels + cx, cy: center + decay: how fast ripples decay with distance (0 = no decay) + phase: phase offset + + Returns: (map_x, map_y) for use with remap + """ + w, h = int(w), int(h) + if cx is None: + cx = w / 2 + if cy is None: + cy = h / 2 + + y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32) + dx = x_coords - cx + dy = y_coords - cy + dist = np.sqrt(dx**2 + dy**2) + + # Calculate ripple displacement (radial) + ripple = np.sin(2 * np.pi * freq * dist / max(w, h) + phase) * amp + + # Apply decay + if decay > 0: + ripple = ripple * np.exp(-dist * decay / max(w, h)) + + # Displace along radial direction + with np.errstate(divide='ignore', invalid='ignore'): + norm_dx = np.where(dist > 0, dx / dist, 0) + norm_dy = np.where(dist > 0, dy / dist, 0) + + map_x = (x_coords + ripple * norm_dx).astype(np.float32) + map_y = (y_coords + ripple * norm_dy).astype(np.float32) + + return (map_x, map_y) + + +PRIMITIVES = { + # Arithmetic + '+': prim_add, + '-': prim_sub, + '*': prim_mul, + '/': prim_div, + + # Comparison + '<': prim_lt, + '>': prim_gt, + '<=': prim_le, + '>=': prim_ge, + '=': prim_eq, + '!=': prim_ne, + + # Image + 'width': prim_width, + 'height': prim_height, + 'make-image': prim_make_image, + 'copy': prim_copy, + 'pixel': prim_pixel, + 'set-pixel': prim_set_pixel, + 'sample': prim_sample, + 'channel': prim_channel, + 'merge-channels': prim_merge_channels, + 'resize': prim_resize, + 'crop': prim_crop, + 'paste': prim_paste, + + # Color + 'rgb': prim_rgb, + 'red': prim_red, + 'green': prim_green, + 'blue': prim_blue, + 'luminance': prim_luminance, + 'rgb->hsv': prim_rgb_to_hsv, + 'hsv->rgb': prim_hsv_to_rgb, + 'blend-color': prim_blend_color, + 'average-color': prim_average_color, + + # Vectorized bulk operations + 'color-matrix': prim_color_matrix, + 'adjust': prim_adjust, + 'mix-gray': prim_mix_gray, + 'invert-img': prim_invert_img, + 'add-noise': prim_add_noise, + 'quantize': prim_quantize, + 'shift-hsv': prim_shift_hsv, + + # Bulk operations + 'map-pixels': prim_map_pixels, + 'map-rows': prim_map_rows, + 'for-grid': prim_for_grid, + 'fold-pixels': prim_fold_pixels, + + # Filters + 'convolve': prim_convolve, + 'blur': prim_blur, + 'box-blur': prim_box_blur, + 'edges': prim_edges, + 'sobel': prim_sobel, + 'dilate': prim_dilate, + 'erode': prim_erode, + + # Geometry + 'translate': prim_translate, + 'rotate-img': prim_rotate, + 'scale-img': prim_scale, + 'flip-h': prim_flip_h, + 'flip-v': prim_flip_v, + 'remap': prim_remap, + 'make-coords': prim_make_coords, + + # Blending + 'blend-images': prim_blend_images, + 'blend-mode': prim_blend_mode, + 'mask': prim_mask, + + # Drawing + 'draw-char': prim_draw_char, + 'draw-text': prim_draw_text, + 'fill-rect': prim_fill_rect, + 'draw-line': prim_draw_line, + + # Math + 'sin': prim_sin, + 'cos': prim_cos, + 'tan': prim_tan, + 'atan2': prim_atan2, + 'sqrt': prim_sqrt, + 'pow': prim_pow, + 'abs': prim_abs, + 'floor': prim_floor, + 'ceil': prim_ceil, + 'round': prim_round, + 'min': prim_min, + 'max': prim_max, + 'clamp': prim_clamp, + 'lerp': prim_lerp, + 'mod': prim_mod, + 'random': prim_random, + 'randint': prim_randint, + 'gaussian': prim_gaussian, + 'assert': prim_assert, + 'pi': math.pi, + 'tau': math.tau, + + # Array + 'length': prim_length, + 'len': prim_length, # alias + 'nth': prim_nth, + 'first': prim_first, + 'rest': prim_rest, + 'take': prim_take, + 'drop': prim_drop, + 'cons': prim_cons, + 'append': prim_append, + 'reverse': prim_reverse, + 'range': prim_range, + 'roll': prim_roll, + 'list': prim_list, + + # Array math (vectorized operations on coordinate arrays) + 'arr+': prim_arr_add, + 'arr-': prim_arr_sub, + 'arr*': prim_arr_mul, + 'arr/': prim_arr_div, + 'arr-mod': prim_arr_mod, + 'arr-sin': prim_arr_sin, + 'arr-cos': prim_arr_cos, + 'arr-tan': prim_arr_tan, + 'arr-sqrt': prim_arr_sqrt, + 'arr-pow': prim_arr_pow, + 'arr-abs': prim_arr_abs, + 'arr-neg': prim_arr_neg, + 'arr-exp': prim_arr_exp, + 'arr-atan2': prim_arr_atan2, + 'arr-min': prim_arr_min, + 'arr-max': prim_arr_max, + 'arr-clip': prim_arr_clip, + 'arr-where': prim_arr_where, + 'arr-floor': prim_arr_floor, + 'arr-lerp': prim_arr_lerp, + + # Coordinate transformations + 'polar-from-center': prim_polar_from_center, + 'cart-from-polar': prim_cart_from_polar, + 'normalize-coords': prim_normalize_coords, + 'coords-x': prim_coords_x, + 'coords-y': prim_coords_y, + 'make-coords-centered': prim_make_coords_centered, + + # Specialized distortion maps + 'wave-displace': prim_wave_displace, + 'swirl-displace': prim_swirl_displace, + 'fisheye-displace': prim_fisheye_displace, + 'kaleidoscope-displace': prim_kaleidoscope_displace, + 'ripple-displace': prim_ripple_displace, + + # Character/ASCII art + 'cell-sample': prim_cell_sample, + 'luminance-to-chars': prim_luminance_to_chars, + 'render-char-grid': prim_render_char_grid, + 'make-char-grid': prim_make_char_grid, + 'set-char': prim_set_char, + 'get-char': prim_get_char, + 'char-grid-dimensions': prim_char_grid_dimensions, + 'alphabet-char': prim_alphabet_char, + 'alphabet-length': prim_alphabet_length, + 'map-char-grid': prim_map_char_grid, + 'map-colors': prim_map_colors, + + # Glitch art + 'pixelsort': prim_pixelsort, + 'datamosh': prim_datamosh, +} diff --git a/sexp_effects/test_interpreter.py b/sexp_effects/test_interpreter.py new file mode 100644 index 0000000..7f9d2ef --- /dev/null +++ b/sexp_effects/test_interpreter.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python3 +""" +Test the S-expression effect interpreter. +""" + +import numpy as np +import sys +from pathlib import Path + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from sexp_effects import ( + get_interpreter, + load_effects_dir, + run_effect, + list_effects, + parse, +) + + +def test_parser(): + """Test S-expression parser.""" + print("Testing parser...") + + # Simple expressions + assert parse("42") == 42 + assert parse("3.14") == 3.14 + assert parse('"hello"') == "hello" + assert parse("true") == True + + # Lists + assert parse("(+ 1 2)")[0].name == "+" + assert parse("(+ 1 2)")[1] == 1 + + # Nested + expr = parse("(define x (+ 1 2))") + assert expr[0].name == "define" + + print(" Parser OK") + + +def test_interpreter_basics(): + """Test basic interpreter operations.""" + print("Testing interpreter basics...") + + interp = get_interpreter() + + # Math + assert interp.eval(parse("(+ 1 2)")) == 3 + assert interp.eval(parse("(* 3 4)")) == 12 + assert interp.eval(parse("(- 10 3)")) == 7 + + # Comparison + assert interp.eval(parse("(< 1 2)")) == True + assert interp.eval(parse("(> 1 2)")) == False + + # Let binding + assert interp.eval(parse("(let ((x 5)) x)")) == 5 + assert interp.eval(parse("(let ((x 5) (y 3)) (+ x y))")) == 8 + + # Lambda + result = interp.eval(parse("((lambda (x) (* x 2)) 5)")) + assert result == 10 + + # If + assert interp.eval(parse("(if true 1 2)")) == 1 + assert interp.eval(parse("(if false 1 2)")) == 2 + + print(" Interpreter basics OK") + + +def test_primitives(): + """Test image primitives.""" + print("Testing primitives...") + + interp = get_interpreter() + + # Create test image + img = np.zeros((100, 100, 3), dtype=np.uint8) + img[50, 50] = [255, 128, 64] + + interp.global_env.set('test_img', img) + + # Width/height + assert interp.eval(parse("(width test_img)")) == 100 + assert interp.eval(parse("(height test_img)")) == 100 + + # Pixel + pixel = interp.eval(parse("(pixel test_img 50 50)")) + assert pixel == [255, 128, 64] + + # RGB + color = interp.eval(parse("(rgb 100 150 200)")) + assert color == [100, 150, 200] + + # Luminance + lum = interp.eval(parse("(luminance (rgb 100 100 100))")) + assert abs(lum - 100) < 1 + + print(" Primitives OK") + + +def test_effect_loading(): + """Test loading effects from .sexp files.""" + print("Testing effect loading...") + + # Load all effects + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + effects = list_effects() + print(f" Loaded {len(effects)} effects: {', '.join(sorted(effects))}") + + assert len(effects) > 0 + print(" Effect loading OK") + + +def test_effect_execution(): + """Test running effects on images.""" + print("Testing effect execution...") + + # Create test image + img = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) + + # Load effects + effects_dir = Path(__file__).parent / "effects" + load_effects_dir(str(effects_dir)) + + # Test each effect + effects = list_effects() + passed = 0 + failed = [] + + for name in sorted(effects): + try: + result, state = run_effect(name, img.copy(), {'_time': 0.5}, {}) + assert isinstance(result, np.ndarray) + assert result.shape == img.shape + passed += 1 + print(f" {name}: OK") + except Exception as e: + failed.append((name, str(e))) + print(f" {name}: FAILED - {e}") + + print(f" Passed: {passed}/{len(effects)}") + if failed: + print(f" Failed: {[f[0] for f in failed]}") + + return passed, failed + + +def main(): + print("=" * 60) + print("S-Expression Effect Interpreter Tests") + print("=" * 60) + + test_parser() + test_interpreter_basics() + test_primitives() + test_effect_loading() + passed, failed = test_effect_execution() + + print("=" * 60) + if not failed: + print("All tests passed!") + else: + print(f"Tests completed with {len(failed)} failures") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/test_effects_pipeline.py b/test_effects_pipeline.py new file mode 100644 index 0000000..d1c8870 --- /dev/null +++ b/test_effects_pipeline.py @@ -0,0 +1,258 @@ +#!/usr/bin/env python3 +""" +Test the full effects pipeline: segment -> effect -> output + +This tests that effects can be applied to video segments without +producing "No video stream found" errors. +""" + +import subprocess +import tempfile +import sys +from pathlib import Path + +# Add parent to path +sys.path.insert(0, str(Path(__file__).parent)) + +import numpy as np +from sexp_effects import ( + get_interpreter, + load_effects_dir, + run_effect, + list_effects, +) + + +def create_test_video(path: Path, duration: float = 1.0, size: str = "64x64") -> bool: + """Create a short test video using ffmpeg.""" + cmd = [ + "ffmpeg", "-y", + "-f", "lavfi", "-i", f"testsrc=duration={duration}:size={size}:rate=10", + "-c:v", "libx264", "-preset", "ultrafast", + str(path) + ] + result = subprocess.run(cmd, capture_output=True) + if result.returncode != 0: + print(f"Failed to create test video: {result.stderr.decode()}") + return False + return True + + +def segment_video(input_path: Path, output_path: Path, start: float, duration: float) -> bool: + """Segment a video file.""" + cmd = [ + "ffmpeg", "-y", "-i", str(input_path), + "-ss", str(start), "-t", str(duration), + "-c:v", "libx264", "-preset", "ultrafast", + "-c:a", "aac", + str(output_path) + ] + result = subprocess.run(cmd, capture_output=True) + if result.returncode != 0: + print(f"Failed to segment video: {result.stderr.decode()}") + return False + + # Verify output has video stream + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(output_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + import json + probe_data = json.loads(probe_result.stdout) + + has_video = any( + s.get("codec_type") == "video" + for s in probe_data.get("streams", []) + ) + if not has_video: + print(f"Segment has no video stream!") + return False + + return True + + +def run_effect_on_video(effect_name: str, input_path: Path, output_path: Path) -> bool: + """Run a sexp effect on a video file using frame processing.""" + import json + + # Get video info + probe_cmd = [ + "ffprobe", "-v", "quiet", "-print_format", "json", + "-show_streams", str(input_path) + ] + probe_result = subprocess.run(probe_cmd, capture_output=True, text=True) + probe_data = json.loads(probe_result.stdout) + + video_stream = None + for stream in probe_data.get("streams", []): + if stream.get("codec_type") == "video": + video_stream = stream + break + + if not video_stream: + print(f" Input has no video stream: {input_path}") + return False + + width = int(video_stream["width"]) + height = int(video_stream["height"]) + fps_str = video_stream.get("r_frame_rate", "10/1") + if "/" in fps_str: + num, den = fps_str.split("/") + fps = float(num) / float(den) + else: + fps = float(fps_str) + + # Read frames, process, write + read_cmd = ["ffmpeg", "-i", str(input_path), "-f", "rawvideo", "-pix_fmt", "rgb24", "-"] + write_cmd = [ + "ffmpeg", "-y", + "-f", "rawvideo", "-pix_fmt", "rgb24", + "-s", f"{width}x{height}", "-r", str(fps), + "-i", "-", + "-c:v", "libx264", "-preset", "ultrafast", + str(output_path) + ] + + read_proc = subprocess.Popen(read_cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + write_proc = subprocess.Popen(write_cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE) + + frame_size = width * height * 3 + frame_count = 0 + state = {} + + while True: + frame_data = read_proc.stdout.read(frame_size) + if len(frame_data) < frame_size: + break + + frame = np.frombuffer(frame_data, dtype=np.uint8).reshape((height, width, 3)) + processed, state = run_effect(effect_name, frame, {'_time': frame_count / fps}, state) + write_proc.stdin.write(processed.tobytes()) + frame_count += 1 + + read_proc.stdout.close() + write_proc.stdin.close() + read_proc.wait() + write_proc.wait() + + if write_proc.returncode != 0: + print(f" FFmpeg encode failed: {write_proc.stderr.read().decode()}") + return False + + return frame_count > 0 + + +def test_effect_pipeline(effect_name: str, tmpdir: Path) -> tuple: + """ + Test full pipeline: create video -> segment -> apply effect + + Returns (success, error_message) + """ + # Create test video + source_video = tmpdir / "source.mp4" + if not create_test_video(source_video, duration=1.0, size="64x64"): + return False, "Failed to create source video" + + # Segment it (simulate what the recipe does) + segment_video_path = tmpdir / "segment.mp4" + if not segment_video(source_video, segment_video_path, start=0.2, duration=0.5): + return False, "Failed to segment video" + + # Check segment file exists and has content + if not segment_video_path.exists(): + return False, "Segment file doesn't exist" + if segment_video_path.stat().st_size < 100: + return False, f"Segment file too small: {segment_video_path.stat().st_size} bytes" + + # Apply effect + output_video = tmpdir / "output.mp4" + try: + if not run_effect_on_video(effect_name, segment_video_path, output_video): + return False, "Effect processing failed" + except Exception as e: + return False, str(e) + + # Verify output + if not output_video.exists(): + return False, "Output file doesn't exist" + if output_video.stat().st_size < 100: + return False, f"Output file too small: {output_video.stat().st_size} bytes" + + return True, None + + +def main(): + print("=" * 60) + print("Effects Pipeline Test") + print("=" * 60) + + # Load effects + effects_dir = Path(__file__).parent / "sexp_effects" / "effects" + load_effects_dir(str(effects_dir)) + + effects = list_effects() + print(f"Testing {len(effects)} effects through segment->effect pipeline\n") + + passed = [] + failed = [] + + # Test multi-input effects separately + multi_input_effects = ("blend", "layer") + print("\nTesting multi-input effects...") + from sexp_effects.interpreter import get_interpreter + interp = get_interpreter() + frame_a = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8) + frame_b = np.random.randint(0, 255, (64, 64, 3), dtype=np.uint8) + + for name in multi_input_effects: + try: + interp.global_env.set('frame-a', frame_a.copy()) + interp.global_env.set('frame-b', frame_b.copy()) + interp.global_env.set('frame', frame_a.copy()) + result, state = interp.run_effect(name, frame_a.copy(), {'_time': 0.5}, {}) + if isinstance(result, np.ndarray) and result.shape == frame_a.shape: + passed.append(name) + print(f" {name}: OK") + else: + failed.append((name, f"Bad output shape: {result.shape if hasattr(result, 'shape') else type(result)}")) + print(f" {name}: FAILED - bad shape") + except Exception as e: + failed.append((name, str(e))) + print(f" {name}: FAILED - {e}") + + print("\nTesting single-input effects through pipeline...") + + # Test each effect + for effect_name in sorted(effects): + # Skip multi-input effects (already tested above) + if effect_name in multi_input_effects: + continue + + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = Path(tmpdir) + + success, error = test_effect_pipeline(effect_name, tmpdir) + + if success: + passed.append(effect_name) + print(f" {effect_name}: OK") + else: + failed.append((effect_name, error)) + print(f" {effect_name}: FAILED - {error}") + + print() + print("=" * 60) + print(f"Pipeline test: {len(passed)} passed, {len(failed)} failed") + if failed: + print("\nFailed effects:") + for name, error in failed: + print(f" {name}: {error}") + print("=" * 60) + + return len(failed) == 0 + + +if __name__ == "__main__": + success = main() + sys.exit(0 if success else 1)