Add generic streaming interpreter with configurable sources/audio

- Add stream_sexp_generic.py: fully generic sexp interpreter
- Add streaming primitives for video sources and audio analysis
- Add config system for external sources and audio files
- Add templates for reusable scans and macros
- Fix video/audio stream mapping in file output
- Add dynamic source cycling based on sources array length
- Remove old Python effect files (migrated to sexp)
- Update sexp effects to use namespaced primitives

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
gilesb
2026-02-02 17:48:04 +00:00
parent d241e2a663
commit 95fcc67dcc
179 changed files with 3935 additions and 8226 deletions

File diff suppressed because one or more lines are too long

17
configs/audio-dizzy.sexp Normal file
View File

@@ -0,0 +1,17 @@
;; Audio Configuration - dizzy.mp3
;;
;; Defines audio analyzer and playback for a recipe.
;; Pass to recipe with: --audio configs/audio-dizzy.sexp
;;
;; Provides:
;; - music: audio analyzer for beat/energy detection
;; - audio-playback: path for synchronized playback
(require-primitives "streaming")
;; Audio analyzer (provides beat detection and energy levels)
;; Paths relative to working directory (project root)
(def music (streaming:make-audio-analyzer "dizzy.mp3"))
;; Audio playback path (for sync with video output)
(audio-playback "dizzy.mp3")

View File

@@ -0,0 +1,17 @@
;; Audio Configuration - dizzy.mp3
;;
;; Defines audio analyzer and playback for a recipe.
;; Pass to recipe with: --audio configs/audio-dizzy.sexp
;;
;; Provides:
;; - music: audio analyzer for beat/energy detection
;; - audio-playback: path for synchronized playback
(require-primitives "streaming")
;; Audio analyzer (provides beat detection and energy levels)
;; Paths relative to working directory (project root)
(def music (streaming:make-audio-analyzer "woods_half/halleluwah.webm"))
;; Audio playback path (for sync with video output)
(audio-playback "woods_half/halleluwah.webm")

View File

@@ -0,0 +1,38 @@
;; Default Sources Configuration
;;
;; Defines video sources and per-pair effect configurations.
;; Pass to recipe with: --sources configs/sources-default.sexp
;;
;; Required by recipes using process-pair macro:
;; - sources: array of video sources
;; - pair-configs: array of effect configurations per source
(require-primitives "streaming")
;; Video sources array
;; Paths relative to working directory (project root)
(def sources [
(streaming:make-video-source "monday.webm" 30)
(streaming:make-video-source "escher.webm" 30)
(streaming:make-video-source "2.webm" 30)
(streaming:make-video-source "disruptors.webm" 30)
(streaming:make-video-source "4.mp4" 30)
(streaming:make-video-source "ecstacy.mp4" 30)
(streaming:make-video-source "dopple.webm" 30)
(streaming:make-video-source "5.mp4" 30)
])
;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges
;; :dir = rotation direction (1 or -1)
;; :rot-a, :rot-b = max rotation angles for clip A and B
;; :zoom-a, :zoom-b = max zoom amounts for clip A and B
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4
{:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller)
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed)
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5
])

View File

@@ -0,0 +1,19 @@
;; Half-resolution Woods Sources (960x540)
;;
;; Pass to recipe with: --sources configs/sources-woods-half.sexp
(require-primitives "streaming")
(def sources [
(streaming:make-video-source "woods_half/1.webm" 30)
(streaming:make-video-source "woods_half/2.webm" 30)
(streaming:make-video-source "woods_half/3.webm" 30)
(streaming:make-video-source "woods_half/4.webm" 30)
])
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5}
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5}
])

View File

@@ -0,0 +1,39 @@
;; Default Sources Configuration
;;
;; Defines video sources and per-pair effect configurations.
;; Pass to recipe with: --sources configs/sources-default.sexp
;;
;; Required by recipes using process-pair macro:
;; - sources: array of video sources
;; - pair-configs: array of effect configurations per source
(require-primitives "streaming")
;; Video sources array
;; Paths relative to working directory (project root)
(def sources [
(streaming:make-video-source "woods/1.webm" 10)
(streaming:make-video-source "woods/2.webm" 10)
(streaming:make-video-source "woods/3.webm" 10)
(streaming:make-video-source "woods/4.webm" 10)
(streaming:make-video-source "woods/5.webm" 10)
(streaming:make-video-source "woods/6.webm" 10)
(streaming:make-video-source "woods/7.webm" 10)
(streaming:make-video-source "woods/8.webm" 10)
])
;; Per-pair effect config: rotation direction, rotation ranges, zoom ranges
;; :dir = rotation direction (1 or -1)
;; :rot-a, :rot-b = max rotation angles for clip A and B
;; :zoom-a, :zoom-b = max zoom amounts for clip A and B
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
])

View File

@@ -1,23 +0,0 @@
;; beat-alternate construct
;; Alternates between sources on each beat
;;
;; Usage in recipe:
;; (construct beat-alternate :path "constructs/beat-alternate.sexp")
;; (def segments (beat-alternate beats-data (list video-a video-b)))
(define-construct beat-alternate
:params (
(analysis :type any :desc "Analysis data with :times")
(sources :type any :desc "List of source nodes to alternate between")
)
;; Body: map over time pairs, return segment descriptors
(let [times (get analysis :times)
pairs (zip-pairs (cons 0 times))
n-sources (len sources)]
(map-indexed
(fn [i pair]
(dict :source (nth sources (mod i n-sources))
:start (first pair)
:end (nth pair 1)
:effects (list)))
pairs)))

View File

@@ -1,158 +0,0 @@
;; cycle-effects-preset construct
;; Data-driven effect cycling using preset definitions
;;
;; Preset format (flat, no nested :params):
;; {:effect "brightness" :amount {:bind "bass" :range [-80 80]}}
;; {:effect "blur" :radius 5}
;;
;; Binding specs {:bind "analyzer" :range [min max]} are resolved to actual bindings
(define-construct cycle-effects-preset
:params (
(preset :type any :desc "List of effect preset definitions")
(videos :type any :desc "List of video source nodes")
(video_infos :type any :desc "List of video info analysis results")
(beats :type any :desc "Beat analysis data with :times")
(beats_per_segment :type int :default 4 :desc "Number of beats per segment")
)
(let [num-effects (len preset)
num-videos (len videos)
;; Extract durations from video-info analysis results
durations (map (fn [info] (get info :duration)) video_infos)
times (get beats :times)
grouped (chunk-every times beats_per_segment)
;; Resolve a param value - if it's a binding spec dict, create actual Binding
;; Note: pass the analyzer NAME (string) to bind, not the data - it will be
;; looked up at execution time in analysis_data
resolve-param (fn [param-value]
(if (dict? param-value)
(let [bind-name (get param-value :bind)]
(if bind-name
(let [range-spec (get param-value :range (list 0 1))]
(bind bind-name :range range-spec))
param-value))
param-value))
;; Process effect spec - resolve any binding specs in params
;; Effect spec is flat: {:effect "name" :param1 val1 :param2 {:bind ...}}
make-effect (fn [effect-spec]
(let [effect-name (get effect-spec :effect)]
;; Build effect dict with resolved params
;; Since we can't iterate dict keys, we check known params
;; Only include params that exist (non-nil) in the spec
(let [result {:effect effect-name}
;; Check each known param
amount (get effect-spec :amount nil)
degrees (get effect-spec :degrees nil)
speed (get effect-spec :speed nil)
level (get effect-spec :level nil)
levels (get effect-spec :levels nil)
radius (get effect-spec :radius nil)
intensity (get effect-spec :intensity nil)
contrast (get effect-spec :contrast nil)
brightness (get effect-spec :brightness nil)
strength (get effect-spec :strength nil)
amplitude (get effect-spec :amplitude nil)
wavelength (get effect-spec :wavelength nil)
frequency (get effect-spec :frequency nil)
segments-p (get effect-spec :segments nil)
rotation_speed (get effect-spec :rotation_speed nil)
factor (get effect-spec :factor nil)
angle (get effect-spec :angle nil)
direction (get effect-spec :direction nil)
block_size (get effect-spec :block_size nil)
char_size (get effect-spec :char_size nil)
color_mode (get effect-spec :color_mode nil)
low (get effect-spec :low nil)
high (get effect-spec :high nil)
thickness (get effect-spec :thickness nil)
glow_radius (get effect-spec :glow_radius nil)
glow_intensity (get effect-spec :glow_intensity nil)
line_spacing (get effect-spec :line_spacing nil)
vignette_amount (get effect-spec :vignette_amount nil)
spacing (get effect-spec :spacing nil)
offset_x (get effect-spec :offset_x nil)
num_echoes (get effect-spec :num_echoes nil)
decay (get effect-spec :decay nil)
persistence (get effect-spec :persistence nil)
rows (get effect-spec :rows nil)
cols (get effect-spec :cols nil)
threshold_low (get effect-spec :threshold_low nil)
threshold_high (get effect-spec :threshold_high nil)
corruption (get effect-spec :corruption nil)]
;; Only add non-nil params to result
;; Use cond to build up the dict (since we can't dynamically add keys)
;; This is ugly but necessary without dict iteration
{:effect effect-name
:amount (if (nil? amount) nil (resolve-param amount))
:degrees (if (nil? degrees) nil (resolve-param degrees))
:speed speed
:level level
:levels levels
:radius (if (nil? radius) nil (resolve-param radius))
:intensity (if (nil? intensity) nil (resolve-param intensity))
:contrast (if (nil? contrast) nil (resolve-param contrast))
:brightness (if (nil? brightness) nil (resolve-param brightness))
:strength (if (nil? strength) nil (resolve-param strength))
:amplitude (if (nil? amplitude) nil (resolve-param amplitude))
:wavelength wavelength
:frequency frequency
:segments segments-p
:rotation_speed rotation_speed
:factor (if (nil? factor) nil (resolve-param factor))
:angle (if (nil? angle) nil (resolve-param angle))
:direction direction
:block_size (if (nil? block_size) nil (resolve-param block_size))
:char_size char_size
:color_mode color_mode
:low low
:high high
:thickness thickness
:glow_radius glow_radius
:glow_intensity glow_intensity
:line_spacing line_spacing
:vignette_amount (if (nil? vignette_amount) nil (resolve-param vignette_amount))
:spacing spacing
:offset_x (if (nil? offset_x) nil (resolve-param offset_x))
:num_echoes num_echoes
:decay decay
:persistence persistence
:rows rows
:cols cols
:threshold_low threshold_low
:threshold_high threshold_high
:corruption (if (nil? corruption) nil (resolve-param corruption))})))
find-valid-video (fn [preferred-idx seg-duration]
(cond
(>= (nth durations preferred-idx) seg-duration) preferred-idx
(>= (nth durations (mod (+ preferred-idx 1) num-videos)) seg-duration) (mod (+ preferred-idx 1) num-videos)
(>= (nth durations (mod (+ preferred-idx 2) num-videos)) seg-duration) (mod (+ preferred-idx 2) num-videos)
:else nil))]
(nth
(reduce
(fn [state group]
(let [acc (first state)
segments (nth state 1)
audio-start (first group)
audio-end (last group)
seg-duration (- audio-end audio-start)
vid-idx (find-valid-video (mod acc num-videos) seg-duration)]
(if (nil? vid-idx)
(list (inc acc) segments)
(let [src (nth videos vid-idx)
src-duration (nth durations vid-idx)
wrapped-start (mod audio-start src-duration)
effect-idx (mod acc num-effects)
effect-spec (nth preset effect-idx)
fx (make-effect effect-spec)
segment (dict :source src
:start wrapped-start
:duration seg-duration
:effects (list fx))]
(list (inc acc) (append segments segment))))))
(list 0 (list))
grouped)
1)))

View File

@@ -1,56 +0,0 @@
;; slice-every-n construct - group every N beats into one segment
;;
;; Usage:
;; (construct slice-every-n :path "constructs/slice-every-n.sexp")
;; (def segments (slice-every-n beats-data 4
;; :init 0
;; :reducer (fn [acc i start end]
;; {:source video-a
;; :effects (list {:effect invert})
;; :acc (inc acc)})))
;;
;; Groups every N analysis times into one segment, calling reducer once per group
(define-construct slice-every-n
:params (
(analysis :type any :desc "Analysis data with :times")
(n :type int :default 4 :desc "Number of beats per segment")
(init :type any :default 0 :desc "Initial accumulator value")
(reducer :type any :desc "Reducer function (fn [acc i start end] ...)")
)
;; Reducer receives: (acc, i, start, end) where start/end are audio beat times
;; Reducer returns: {:source src :effects fx :acc new-acc}
;; Optionally include :start/:end to override (e.g., for wrapping/randomizing)
;; :duration is calculated from start/end (use :duration to override)
;; Return :skip true to skip this segment
(let [times (get analysis :times)
;; Group times into chunks of n
grouped (chunk-every times n)]
(nth
(reduce
(fn [state group]
(let [acc (first state)
segments (nth state 1)
i (len segments)
audio-start (first group)
audio-end (last group)
audio-duration (- audio-end audio-start)
;; Call user's reducer with audio beat times
result (reducer acc i audio-start audio-end)
new-acc (get result :acc)]
;; Skip if reducer returns :skip true
(if (get result :skip false)
(list new-acc segments)
(let [;; Use reducer's start/end/duration if provided, else use audio times
seg-start (get result :start audio-start)
seg-end (get result :end audio-end)
seg-duration (get result :duration (- seg-end seg-start))
segment (dict :source (get result :source)
:start seg-start
:end seg-end
:duration seg-duration
:effects (get result :effects))]
(list new-acc (append segments segment))))))
(list init (list))
grouped)
1)))

View File

@@ -1,47 +0,0 @@
;; slice-on construct - iterate over analysis times with user function
;;
;; Usage:
;; (construct slice-on :path "constructs/slice-on.sexp")
;; (def segments (slice-on beats-data
;; :init 0
;; :reducer (fn [acc i start end]
;; {:source (nth (list video-a video-b) (mod acc 2))
;; :effects (list)
;; :acc (inc acc)})))
;;
;; The construct receives:
;; - First positional arg as 'analysis' (the analysis data with :times)
;; - :init as 'init' (initial accumulator value)
;; - :reducer as 'reducer' (the reducer lambda)
(define-construct slice-on
:params (
(analysis :type any :desc "Analysis data with :times")
(init :type any :default 0 :desc "Initial accumulator value")
(reducer :type any :desc "Reducer function (fn [acc i start end] ...)")
)
;; Get times from analysis data
(let [times (get analysis :times)
pairs (zip-pairs (cons 0 times))]
;; Use nth to get second element of reduce result (the segments list)
(nth
(reduce
(fn [state pair]
(let [acc (first state)
segments (nth state 1)
i (len segments)
start (first pair)
end (nth pair 1)
;; Call user's reducer function
result (reducer acc i start end)
;; Extract new acc and build segment
new-acc (get result :acc)
segment (dict :source (get result :source)
:start start
:end end
:duration (- end start)
:effects (get result :effects))]
(list new-acc (append segments segment))))
(list init (list))
pairs)
1)))

View File

@@ -1,65 +0,0 @@
;; All 42 Effects Preset
;; Flat format - params are direct keys, not nested under :params
;; Binding format: {:bind "analyzer-name" :range [min max]}
(def all-42-preset
(list
;; Color effects 0-9
{:effect "invert"}
{:effect "grayscale"}
{:effect "sepia"}
{:effect "brightness" :amount {:bind "bass" :range [-80 80]}}
{:effect "contrast" :amount {:bind "energy" :range [0.5 2.5]}}
{:effect "saturation" :amount {:bind "bass" :range [0.2 3.0]}}
{:effect "hue_shift" :degrees {:bind "energy" :range [0 360]}}
{:effect "color_cycle" :speed 2}
{:effect "threshold" :level 128}
{:effect "posterize" :levels 6}
;; Blur/sharpen 10-13
{:effect "blur" :radius {:bind "bass" :range [1 30]}}
{:effect "sharpen" :amount {:bind "energy" :range [0.5 4]}}
{:effect "bloom" :intensity 0.6 :radius 20}
{:effect "color-adjust" :brightness 20 :contrast 1.2}
;; Distortion 14-21
{:effect "swirl" :strength {:bind "bass" :range [-6 6]}}
{:effect "fisheye" :strength {:bind "bass" :range [-0.5 0.8]}}
{:effect "wave" :amplitude {:bind "bass" :range [10 60]} :wavelength 60}
{:effect "ripple" :amplitude {:bind "bass" :range [10 40]} :frequency 6}
{:effect "kaleidoscope" :segments 6 :rotation_speed 30}
{:effect "zoom" :factor {:bind "bass" :range [0.8 1.5]}}
{:effect "rotate" :angle {:bind "energy" :range [-30 30]}}
{:effect "mirror" :direction "horizontal"}
;; Stylization 22-28
{:effect "pixelate" :block_size {:bind "bass" :range [4 32]}}
{:effect "ascii_art" :char_size 8 :color_mode "color"}
{:effect "ascii_zones" :char_size 10}
{:effect "edge_detect" :low 50 :high 150}
{:effect "emboss" :strength 1.5}
{:effect "outline" :thickness 2}
{:effect "neon_glow" :glow_radius 20 :glow_intensity 2}
;; Retro/film 29-33
{:effect "crt" :line_spacing 3 :vignette_amount 0.3}
{:effect "scanlines" :spacing 3 :intensity 0.4}
{:effect "film_grain" :intensity 0.25}
{:effect "vignette" :strength 0.6}
{:effect "noise" :amount {:bind "bass" :range [10 80]}}
;; Chromatic 34
{:effect "rgb_split" :offset_x {:bind "bass" :range [5 40]}}
;; Temporal 35-37
{:effect "echo" :num_echoes 4 :decay 0.5}
{:effect "trails" :persistence 0.7}
{:effect "strobe" :frequency 4}
;; Geometric 38-39
{:effect "flip" :direction "horizontal"}
{:effect "tile_grid" :rows 2 :cols 2}
;; Glitch 40-41
{:effect "pixelsort" :threshold_low 30 :threshold_high 220}
{:effect "datamosh" :corruption {:bind "bass" :range [0.2 0.8]} :block_size 24}))

View File

@@ -1,13 +0,0 @@
;; Chill/Subtle Preset
;; Gentle color shifts and soft effects
(def chill-subtle-preset
(list
{:effect "hue_shift" :params {:degrees {:bind "energy" :range [0 60]}}}
{:effect "saturation" :params {:amount {:bind "bass" :range [0.8 1.3]}}}
{:effect "brightness" :params {:amount {:bind "energy" :range [-10 15]}}}
{:effect "blur" :params {:radius {:bind "bass" :range [0 5]}}}
{:effect "vignette" :params {:strength {:bind "energy" :range [0.2 0.5]}}}
{:effect "film_grain" :params {:intensity {:bind "bass" :range [0.05 0.15]}}}
{:effect "bloom" :params {:intensity {:bind "energy" :range [0.2 0.5]} :radius 15}}
{:effect "color-adjust" :params {:brightness {:bind "bass" :range [-5 10]} :contrast 1.05}}))

View File

@@ -1,13 +0,0 @@
;; Glitch-Only Preset
;; Just the aggressive digital artifacts
(def glitch-only-preset
(list
{:effect "datamosh" :params {:corruption {:bind "bass" :range [0.3 0.9]} :block_size 16}}
{:effect "pixelsort" :params {:threshold_low 20 :threshold_high 200}}
{:effect "rgb_split" :params {:offset_x {:bind "bass" :range [10 60]}}}
{:effect "noise" :params {:amount {:bind "energy" :range [20 100]}}}
{:effect "scanlines" :params {:spacing 2 :intensity {:bind "bass" :range [0.3 0.8]}}}
{:effect "pixelate" :params {:block_size {:bind "bass" :range [8 48]}}}
{:effect "threshold" :params {:level {:bind "energy" :range [80 180]}}}
{:effect "posterize" :params {:levels {:bind "bass" :range [2 6]}}}))

View File

@@ -1,13 +0,0 @@
;; Retro VHS Preset
;; 80s/90s video aesthetic
(def retro-vhs-preset
(list
{:effect "crt" :params {:line_spacing 3 :vignette_amount {:bind "bass" :range [0.2 0.5]}}}
{:effect "scanlines" :params {:spacing 2 :intensity {:bind "energy" :range [0.3 0.6]}}}
{:effect "rgb_split" :params {:offset_x {:bind "bass" :range [2 12]}}}
{:effect "noise" :params {:amount {:bind "bass" :range [5 30]}}}
{:effect "saturation" :params {:amount {:bind "energy" :range [1.2 1.8]}}}
{:effect "blur" :params {:radius {:bind "bass" :range [0 3]}}}
{:effect "contrast" :params {:amount {:bind "energy" :range [1.1 1.4]}}}
{:effect "film_grain" :params {:intensity {:bind "bass" :range [0.1 0.3]}}}))

View File

@@ -1,69 +0,0 @@
;; ASCII with Alternating Effects - Checkerboard of blur and RGB split
;;
;; Demonstrates using existing sexp effects within cell_effect lambdas.
;; Even cells get blur, odd cells get RGB split - creating a checkerboard pattern.
(recipe "ascii_alternating_fx"
:version "1.0"
:description "ASCII art with alternating blur and RGB split effects per cell"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
:params (
(cols :type int :default 40 :range [20 100]
:desc "Number of character columns")
(blur_amount :type float :default 3 :range [1 10]
:desc "Blur radius for blur cells")
(rgb_offset :type int :default 3 :range [1 10]
:desc "RGB split offset for split cells")
)
;; Registry
(effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Source files
(def video (source :path "../monday.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis
(stage :analyze
:outputs [energy-data]
(def audio-clip (-> audio (segment :start 60 :duration 10)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process - apply effect with alternating cell effects
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [result audio-clip]
(def clip (-> video (segment :start 0 :duration 10)))
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Apply effect with cell_effect lambda
;; Checkerboard: (row + col) even = blur, odd = rgb_split
(def result (-> clip
(effect ascii_fx_zone
:cols cols
:char_size (bind energy-data values :range [12 24])
:color_mode "color"
:background "black"
;; Pass params to zone dict
:energy (bind energy-data values :range [0 1])
:blur_amount blur_amount
:rgb_offset rgb_offset
;; Cell effect: alternate between blur and rgb_split
;; Uses existing sexp effects - each cell is just a small frame
:cell_effect (lambda [cell zone]
(if (= (mod (+ (get zone "row") (get zone "col")) 2) 0)
;; Even cells: blur scaled by energy
(blur cell (* (get zone "blur_amount") (get zone "energy")))
;; Odd cells: rgb split scaled by energy
(rgb_split cell
(* (get zone "rgb_offset") (get zone "energy"))
0)))))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [result audio-clip]
(mux result audio-clip)))

View File

@@ -1,67 +0,0 @@
;; ASCII with Alternating Rotation Directions
;;
;; Checkerboard pattern: even cells rotate clockwise, odd cells rotate counter-clockwise
;; Rotation amount scaled by energy and position (more at top-right)
(recipe "ascii_alternating_rotate"
:version "1.0"
:description "ASCII art with alternating rotation directions per cell"
:minimal-primitives true
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
:params (
(cols :type int :default 50 :range [20 100]
:desc "Number of character columns")
(rotation_scale :type float :default 60 :range [0 180]
:desc "Max rotation in degrees")
(duration :type float :default 10 :range [1 300]
:desc "Clip duration in seconds")
)
;; Registry
(effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp")
;; Effects used in cell_effect lambda
(effect rotate :path "../sexp_effects/effects/rotate.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Source files
(def video (source :path "../monday.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis
(stage :analyze
:outputs [energy-data]
(def audio-clip (-> audio (segment :start 60 :duration duration)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [result audio-clip]
(def clip (-> video (segment :start 0 :duration duration)))
(def audio-clip (-> audio (segment :start 60 :duration duration)))
(def result (-> clip
(effect ascii_fx_zone
:cols cols
:char_size (bind energy-data values :range [10 20])
:color_mode "color"
:background "black"
:energy (bind energy-data values :range [0 1])
:rotation_scale rotation_scale
;; Alternating rotation: even cells clockwise, odd cells counter-clockwise
;; Scaled by energy * position (more at top-right)
:cell_effect (lambda [cell zone]
(rotate cell
(* (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) 1 -1)
(* (get zone "energy")
(get zone "rotation_scale")
(* 1.5 (+ (get zone "col-norm")
(- 1 (get zone "row-norm"))))))))))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [result audio-clip]
(mux result audio-clip)))

View File

@@ -1,205 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect ascii_art
@version 1.0.0
@author artdag
@description
ASCII art effect - converts video to ASCII character representation.
Renders the video using ASCII characters based on brightness,
creating a retro terminal aesthetic.
@param char_size int
@range 4 32
@default 8
Size of each character cell in pixels. Sync to bass for reactive sizing.
@param alphabet string
@enum standard blocks cyrillic greek japanese braille
@default standard
Character set to use for rendering. Each has different visual density.
@param color_mode string
@enum mono color invert
@default color
Color rendering mode:
- mono: white on black
- color: preserve source colors
- invert: dark text on colored background
@param contrast_boost float
@range 1 3
@default 1.5
Enhance contrast for better character separation.
@param background list
@default [0, 0, 0]
Background color RGB.
@example
(effect ascii_art :char_size 8 :color_mode "color")
@example
;; Japanese characters, reactive sizing
(effect ascii_art :alphabet "japanese" :char_size (bind bass :range [6 16]))
@example
;; Braille pattern for high detail
(effect ascii_art :alphabet "braille" :char_size 4)
"""
import numpy as np
import cv2
# Character sets ordered by visual density (light to dark)
ALPHABETS = {
# Classic ASCII gradient
"standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$",
# Unicode block elements - naturally ordered by fill
"blocks": " ░▒▓█",
# Cyrillic - ordered by visual complexity
"cyrillic": " ·гтпрсьоеаилнкчуцбдвжзмъыюяфщшэ",
# Greek - ordered by visual weight
"greek": " ·ιτορεαηυικλνσςπμβγδζθξφψωΣΩΨΦ",
# Japanese Katakana - ordered by stroke complexity
"japanese": " ·ノ一ヘイコニハヒフホメヨワヲンリルレロカキクケサシスセソタチツテト",
# Braille patterns - high detail
"braille": " ⠁⠂⠃⠄⠅⠆⠇⠈⠉⠊⠋⠌⠍⠎⠏⠐⠑⠒⠓⠔⠕⠖⠗⠘⠙⠚⠛⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫⠬⠭⠮⠯⠰⠱⠲⠳⠴⠵⠶⠷⠸⠹⠺⠻⠼⠽⠾⠿",
}
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply ASCII art effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- char_size: character cell size (default 8)
- alphabet: character set name (default "standard")
- color_mode: mono/color/invert (default "color")
- contrast_boost: enhance contrast (default 1.5)
- background: RGB tuple (default [0,0,0])
state: Persistent state dict (used for caching)
Returns:
Tuple of (processed_frame, new_state)
"""
char_size = max(4, min(int(params.get("char_size", 8)), 32))
alphabet_name = params.get("alphabet", "standard")
color_mode = params.get("color_mode", "color")
contrast_boost = max(1.0, min(params.get("contrast_boost", 1.5), 3.0))
background = params.get("background", [0, 0, 0])
if state is None:
state = {}
# Get character set
char_set = ALPHABETS.get(alphabet_name, ALPHABETS["standard"])
# Get or create character atlas cache
cache_key = f"{char_size}_{alphabet_name}"
if "atlas_cache" not in state or state.get("cache_key") != cache_key:
state["atlas"] = _create_char_atlas(char_size, char_set)
state["cache_key"] = cache_key
atlas = state["atlas"]
h, w = frame.shape[:2]
cols = w // char_size
rows = h // char_size
if cols < 1 or rows < 1:
return frame, state
# Crop frame to fit grid
grid_h, grid_w = rows * char_size, cols * char_size
frame_cropped = frame[:grid_h, :grid_w]
# Downsample to get average color per cell
reshaped = frame_cropped.reshape(rows, char_size, cols, char_size, 3)
cell_colors = reshaped.mean(axis=(1, 3)).astype(np.uint8)
# Convert to grayscale for brightness mapping
cell_gray = 0.299 * cell_colors[:,:,0] + 0.587 * cell_colors[:,:,1] + 0.114 * cell_colors[:,:,2]
# Apply contrast boost
if contrast_boost > 1:
cell_gray = (cell_gray - 128) * contrast_boost + 128
cell_gray = np.clip(cell_gray, 0, 255)
# Map brightness to character indices
char_indices = ((cell_gray / 255) * (len(char_set) - 1)).astype(np.int32)
char_indices = np.clip(char_indices, 0, len(char_set) - 1)
# Create output frame
if isinstance(background, (list, tuple)) and len(background) >= 3:
bg = background[:3]
else:
bg = [0, 0, 0]
result = np.full((grid_h, grid_w, 3), bg, dtype=np.uint8)
# Render characters
for row in range(rows):
for col in range(cols):
char_idx = char_indices[row, col]
char = char_set[char_idx]
char_mask = atlas.get(char)
if char_mask is None:
continue
y1, x1 = row * char_size, col * char_size
if color_mode == "mono":
color = np.array([255, 255, 255], dtype=np.uint8)
elif color_mode == "invert":
# Colored background, dark text
result[y1:y1+char_size, x1:x1+char_size] = cell_colors[row, col]
color = np.array([0, 0, 0], dtype=np.uint8)
else: # color
color = cell_colors[row, col]
# Apply character mask
if char != ' ':
mask = char_mask > 0
result[y1:y1+char_size, x1:x1+char_size][mask] = color
# Pad to original size if needed
if result.shape[0] < h or result.shape[1] < w:
padded = np.full((h, w, 3), bg, dtype=np.uint8)
padded[:grid_h, :grid_w] = result
result = padded
return result, state
def _create_char_atlas(char_size: int, char_set: str) -> dict:
"""Pre-render all characters as masks."""
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = char_size / 20.0
thickness = max(1, int(char_size / 10))
atlas = {}
for char in char_set:
char_img = np.zeros((char_size, char_size), dtype=np.uint8)
if char != ' ':
try:
(text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness)
text_x = (char_size - text_w) // 2
text_y = (char_size + text_h) // 2
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
except:
pass
atlas[char] = char_img
return atlas

View File

@@ -1,34 +0,0 @@
;; ASCII art effect - render as text characters (music reactive)
;; Usage: python3 plan.py effects/ascii_art.sexp | python3 execute.py - -d . -o - | mpv --fs -
;;
;; Parameters:
;; color_mode: coloring mode ("color", "green", "white", default: "color")
;; char_size is bound to energy (wobbles with overall loudness)
(recipe "ascii_art"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ascii_art :path "sexp_effects/effects/ascii_art.sexp")
(analyzer energy :path "../artdag-analyzers/energy/analyzer.py")
;; Default parameter
(def color_mode "color")
(def video (source :path "monday.webm"))
(def audio (source :path "dizzy.mp3"))
(def clip (-> video (segment :start 0 :duration 10)))
;; Audio from 60s where it's louder
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Analyze just the audio clip - times are 0-10s relative to clip
(def energy-data (-> audio-clip (analyze energy)))
;; char_size bound to energy - no offset needed since analysis is on clip
(def result (-> clip
(effect ascii_art
:char_size (bind energy-data values :range [2 32])
:color_mode color_mode)))
(mux result audio-clip))

View File

@@ -1,89 +0,0 @@
;; ASCII art FX effect with staged execution and per-character effects
;;
;; Run with --list-params to see all available parameters:
;; python3 run_staged.py effects/ascii_art_fx_staged.sexp --list-params
(recipe "ascii_art_fx_staged"
:version "1.0"
:description "ASCII art FX with per-character effects"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
:params (
;; Colors
(color_mode :type string :default "color"
:desc "Character color: color, mono, invert, or any color name/hex")
(background_color :type string :default "black"
:desc "Background color name or hex value")
(invert_colors :type int :default 0 :range [0 1]
:desc "Swap foreground and background colors")
;; Character sizing
(char_size :type int :default 12 :range [4 32]
:desc "Base character cell size in pixels")
;; Per-character effects
(char_jitter :type float :default 0 :range [0 20]
:desc "Position jitter amount in pixels")
(char_scale :type float :default 1.0 :range [0.5 2.0]
:desc "Character scale factor")
(char_rotation :type float :default 0 :range [0 180]
:desc "Rotation amount in degrees")
(char_hue_shift :type float :default 0 :range [0 360]
:desc "Hue shift in degrees")
;; Modulation sources
(jitter_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y random center_dist]
:desc "What drives jitter modulation")
(scale_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y random center_dist]
:desc "What drives scale modulation")
(rotation_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y random center_dist]
:desc "What drives rotation modulation")
(hue_source :type string :default "none"
:choices [none luminance inv_luminance saturation position_x position_y random center_dist]
:desc "What drives hue shift modulation")
)
;; Registry
(effect ascii_art_fx :path "../sexp_effects/effects/ascii_art_fx.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Source files (not parameterized for now)
(def video (source :path "../monday.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis
(stage :analyze
:outputs [energy-data]
(def audio-clip (-> audio (segment :start 60 :duration 10)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process - apply effect
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [result audio-clip]
(def clip (-> video (segment :start 0 :duration 10)))
(def audio-clip (-> audio (segment :start 60 :duration 10)))
(def result (-> clip
(effect ascii_art_fx
:char_size (bind energy-data values :range [8 24])
:color_mode color_mode
:background_color background_color
:invert_colors invert_colors
:char_jitter char_jitter
:char_scale char_scale
:char_rotation char_rotation
:char_hue_shift char_hue_shift
:jitter_source jitter_source
:scale_source scale_source
:rotation_source rotation_source
:hue_source hue_source))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [result audio-clip]
(mux result audio-clip)))

View File

@@ -1,59 +0,0 @@
;; ASCII art effect with staged execution
;;
;; Stages:
;; :analyze - Run energy analysis on audio (cacheable)
;; :process - Segment media and apply effect
;; :output - Mux video with audio
;;
;; Usage: python3 run_staged.py effects/ascii_art_staged.sexp
;;
;; Parameters:
;; color_mode: coloring mode ("color", "green", "white", default: "color")
;; char_size is bound to energy (wobbles with overall loudness)
(recipe "ascii_art_staged"
:version "1.0"
:description "ASCII art effect with staged execution"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
;; Registry: effects and analyzers
(effect ascii_art :path "../sexp_effects/effects/ascii_art.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Pre-stage definitions (available to all stages)
(def color_mode "color")
(def background_color "black")
(def invert_colors 0) ;; 0=false, 1=true
(def video (source :path "../monday.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis - extract energy from audio
;; This stage is expensive but cacheable - rerun with same input skips this
(stage :analyze
:outputs [energy-data]
;; Audio from 60s where it's louder
(def audio-clip (-> audio (segment :start 60 :duration 10)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process - apply ASCII art effect with energy binding
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [result audio-clip]
;; Video segment
(def clip (-> video (segment :start 0 :duration 10)))
;; Audio clip for muxing (same segment as analysis)
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Apply effect with char_size bound to energy
(def result (-> clip
(effect ascii_art
:char_size (bind energy-data values :range [2 32])
:color_mode color_mode
:background_color background_color
:invert_colors invert_colors))))
;; Stage 3: Output - combine video and audio
(stage :output
:requires [:process]
:inputs [result audio-clip]
(mux result audio-clip)))

View File

@@ -1,64 +0,0 @@
;; ASCII Cell Effect - Demonstrates arbitrary per-cell effects via lambda
;;
;; Each character cell is a mini-frame that can have any effects applied.
;; The lambda receives the cell image and zone context (including bound analysis data).
(recipe "ascii_cell_effect_staged"
:version "1.0"
:description "ASCII art with lambda-driven per-cell effects"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
:params (
(cols :type int :default 60 :range [20 200]
:desc "Number of character columns")
(rotation_scale :type float :default 45 :range [0 90]
:desc "Max rotation in degrees at top-right corner")
)
;; Registry
(effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Source files
(def video (source :path "../monday.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis
(stage :analyze
:outputs [energy-data]
(def audio-clip (-> audio (segment :start 60 :duration 10)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process - apply effect with cell_effect lambda
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [result audio-clip]
(def clip (-> video (segment :start 0 :duration 10)))
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Apply effect with cell_effect lambda
;; The lambda receives (cell zone) where:
;; cell = the rendered character as a small image
;; zone = dict with row, col, lum, sat, hue, energy, rotation_scale, etc.
(def result (-> clip
(effect ascii_fx_zone
:cols cols
:char_size (bind energy-data values :range [10 20])
:color_mode "color"
:background "black"
;; Pass bound values so they're available in zone dict
:energy (bind energy-data values :range [0 1])
:rotation_scale rotation_scale
;; Cell effect lambda: rotate each cell based on energy * position
:cell_effect (lambda [cell zone]
(rotate-cell cell
(* (* (get zone "energy") (get zone "rotation_scale"))
(* 1.5 (+ (get zone "col-norm")
(- 1 (get zone "row-norm")))))))))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [result audio-clip]
(mux result audio-clip)))

View File

@@ -1,99 +0,0 @@
;; ASCII Dual Blend
;;
;; Applies ASCII alternating rotation effect to two video sources,
;; blends them together, and muxes with audio.
;; All synced to the same audio analysis.
(recipe "ascii_dual_blend"
:version "1.0"
:description "Blend two ASCII-processed videos synced to audio"
:minimal-primitives true
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
:params (
(cols :type int :default 50 :range [20 100]
:desc "Number of character columns")
(rotation_scale :type float :default 60 :range [0 180]
:desc "Max rotation in degrees")
(blend_opacity :type float :default 0.5 :range [0 1]
:desc "Blend opacity (0=video-a only, 1=video-b only)")
(blend_mode :type string :default "overlay"
:desc "Blend mode: alpha, add, multiply, screen, overlay, difference")
(duration :type float :default 10 :range [1 300]
:desc "Clip duration in seconds")
)
;; Registry - effects and analyzers
(effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp")
(effect rotate :path "../sexp_effects/effects/rotate.sexp")
(effect blend :path "../sexp_effects/effects/blend.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Source files
(def video-a (source :path "../monday.webm"))
(def video-b (source :path "../new.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis
(stage :analyze
:outputs [energy-data]
(def audio-clip (-> audio (segment :start 60 :duration duration)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process both videos
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [blended audio-clip]
;; Get audio clip for final mux
(def audio-clip (-> audio (segment :start 60 :duration duration)))
;; Process video A with ASCII effect
(def clip-a (-> video-a (segment :start 0 :duration duration)))
(def ascii-a (-> clip-a
(effect ascii_fx_zone
:cols cols
:char_size (bind energy-data values :range [10 20])
:color_mode "color"
:background "black"
:energy (bind energy-data values :range [0 1])
:rotation_scale rotation_scale
:cell_effect (lambda [cell zone]
(rotate cell
(* (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) 1 -1)
(* (get zone "energy")
(get zone "rotation_scale")
(* 1.5 (+ (get zone "col-norm")
(- 1 (get zone "row-norm")))))))))))
;; Process video B with ASCII effect
(def clip-b (-> video-b (segment :start 0 :duration duration)))
(def ascii-b (-> clip-b
(effect ascii_fx_zone
:cols cols
:char_size (bind energy-data values :range [10 20])
:color_mode "color"
:background "black"
:energy (bind energy-data values :range [0 1])
:rotation_scale rotation_scale
:cell_effect (lambda [cell zone]
(rotate cell
(* (if (= (mod (+ (get zone "row") (get zone "col")) 2) 0) 1 -1)
(* (get zone "energy")
(get zone "rotation_scale")
(* 1.5 (+ (get zone "col-norm")
(- 1 (get zone "row-norm")))))))))))
;; Blend the two ASCII videos using consistent effect syntax
(def blended (-> ascii-a
(effect blend ascii-b
:mode blend_mode
:opacity blend_opacity
:resize_mode "fit"))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [blended audio-clip]
(mux blended audio-clip)))

View File

@@ -1,66 +0,0 @@
;; ASCII FX Zone effect with per-zone expression-driven effects
;;
;; Uses energy analysis to drive rotation based on position:
;; - Bottom-left = 0 rotation
;; - Top-right = max rotation (scaled by energy)
(recipe "ascii_fx_zone_staged"
:version "1.0"
:description "ASCII art with per-zone expression-driven effects"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
:params (
(cols :type int :default 80 :range [20 200]
:desc "Number of character columns")
(color_mode :type string :default "color"
:desc "Character color: color, mono, invert, or any color name/hex")
(background :type string :default "black"
:desc "Background color name or hex value")
(rotation_scale :type float :default 30 :range [0 90]
:desc "Max rotation in degrees at top-right corner")
)
;; Registry
(effect ascii_fx_zone :path "../sexp_effects/effects/ascii_fx_zone.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
;; Source files
(def video (source :path "../monday.webm"))
(def audio (source :path "../dizzy.mp3"))
;; Stage 1: Analysis
(stage :analyze
:outputs [energy-data]
(def audio-clip (-> audio (segment :start 60 :duration 10)))
(def energy-data (-> audio-clip (analyze energy))))
;; Stage 2: Process - apply effect with zone expressions
(stage :process
:requires [:analyze]
:inputs [energy-data]
:outputs [result audio-clip]
(def clip (-> video (segment :start 0 :duration 10)))
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Apply effect with lambdas
;; Lambda receives zone dict: {row, col, row-norm, col-norm, lum, sat, hue, r, g, b, char}
;; Plus any extra params like energy, rotation_scale
(def result (-> clip
(effect ascii_fx_zone
:char_size (bind energy-data values :range [8 24])
:color_mode color_mode
:background background
;; Pass energy as extra param so lambda can access it via zone dict
:energy (bind energy-data values :range [0 1])
:rotation_scale rotation_scale
;; Rotation: energy * scale * position (bottom-left=0, top-right=3)
:char_rotation (lambda [z]
(* (* (get z "energy") (get z "rotation_scale"))
(* 1.5 (+ (get z "col-norm")
(- 1 (get z "row-norm"))))))))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [result audio-clip]
(mux result audio-clip)))

View File

@@ -1,33 +0,0 @@
;; ASCII zones effect - region-based ASCII rendering
;; Usage: python3 plan.py effects/ascii_zones.sexp -p char_size=10 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; char_size: character cell size in pixels (4 to 20, default: 10)
(recipe "ascii_zones"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp")
(analyzer energy :path "../artdag-analyzers/energy/analyzer.py")
;; Default parameter (overridden by -p char_size=N)
(def char_size 10)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def audio (source :path "dizzy.mp3"))
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Analyze just the audio clip - times are 0-10s relative to clip
(def energy-data (-> audio-clip (analyze energy)))
;; char_size bound to energy - no offset needed since analysis is on clip
(def result (-> clip
(effect ascii_zones
:char_size (bind energy-data values :range [2 32]))))
(mux result audio-clip))

View File

@@ -1,164 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect beam
@version 1.0.0
@author artdag
@description
Beam effect. Creates animated light beams / lasers from a starting
point to an ending point with glow effect.
@param start_x float
@range 0 1
@default 0
Beam start X position (0-1).
@param start_y float
@range 0 1
@default 0.5
Beam start Y position (0-1).
@param end_x float
@range 0 1
@default 1
Beam end X position (0-1).
@param end_y float
@range 0 1
@default 0.5
Beam end Y position (0-1).
@param thickness float
@range 1 50
@default 5
Beam core thickness in pixels.
@param glow_radius float
@range 0 100
@default 20
Outer glow size in pixels.
@param color list
@default [0, 255, 255]
Beam color RGB (default cyan).
@param intensity float
@range 0 2
@default 1.0
Beam brightness.
@param pulse bool
@default false
Enable pulsing animation.
@param pulse_speed float
@range 0.1 10
@default 2.0
Pulse animation speed.
@example
(effect beam :start_x 0 :start_y 0.5 :end_x 1 :end_y 0.5)
@example
;; Reactive laser
(effect beam :intensity (bind bass :range [0.5 2]) :color [255 0 0])
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply beam effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
start_x = params.get("start_x", 0)
start_y = params.get("start_y", 0.5)
end_x = params.get("end_x", 1)
end_y = params.get("end_y", 0.5)
thickness = params.get("thickness", 5)
glow_radius = params.get("glow_radius", 20)
color = params.get("color", [0, 255, 255])
intensity = params.get("intensity", 1.0)
pulse = params.get("pulse", False)
pulse_speed = params.get("pulse_speed", 2.0)
t = params.get("_time", 0)
if state is None:
state = {}
h, w = frame.shape[:2]
result = frame.copy().astype(np.float32)
# Calculate beam endpoints in pixels
x1, y1 = int(start_x * w), int(start_y * h)
x2, y2 = int(end_x * w), int(end_y * h)
# Apply pulse modulation
if pulse:
pulse_mod = 0.5 + 0.5 * np.sin(t * pulse_speed * 2 * np.pi)
intensity = intensity * pulse_mod
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Calculate distance from each pixel to the line segment
line_vec = np.array([x2 - x1, y2 - y1], dtype=np.float32)
line_len = np.sqrt(line_vec[0]**2 + line_vec[1]**2)
if line_len < 1:
return frame, state
line_unit = line_vec / line_len
# Vector from start to each pixel
px = x_coords - x1
py = y_coords - y1
# Project onto line
proj_len = px * line_unit[0] + py * line_unit[1]
proj_len = np.clip(proj_len, 0, line_len)
# Closest point on line
closest_x = x1 + proj_len * line_unit[0]
closest_y = y1 + proj_len * line_unit[1]
# Distance to closest point
dist = np.sqrt((x_coords - closest_x)**2 + (y_coords - closest_y)**2)
# Get beam color
if isinstance(color, (list, tuple)) and len(color) >= 3:
beam_color = np.array(color[:3], dtype=np.float32)
else:
beam_color = np.array([0, 255, 255], dtype=np.float32)
# Core beam (bright center)
core_mask = dist < thickness
core_intensity = intensity * (1 - dist[core_mask] / max(1, thickness))
for c in range(3):
result[core_mask, c] = np.clip(
result[core_mask, c] + beam_color[c] * core_intensity,
0, 255
)
# Glow (fading outer region)
glow_mask = (dist >= thickness) & (dist < thickness + glow_radius)
glow_dist = dist[glow_mask] - thickness
glow_intensity = intensity * 0.5 * (1 - glow_dist / max(1, glow_radius)) ** 2
for c in range(3):
result[glow_mask, c] = np.clip(
result[glow_mask, c] + beam_color[c] * glow_intensity,
0, 255
)
return result.astype(np.uint8), state

View File

@@ -1,108 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect bloom
@version 1.0.0
@author artdag
@description
Bloom effect. Creates a soft glow around bright areas, simulating
camera lens bloom. Great for dreamy or ethereal looks.
@param intensity float
@range 0 2
@default 0.5
Bloom brightness.
@param threshold int
@range 0 255
@default 200
Brightness threshold for bloom (pixels above this glow).
@param radius int
@range 1 50
@default 15
Blur radius for the glow.
@param soft_threshold bool
@default true
Use soft threshold (gradual) vs hard threshold.
@param color_tint list
@default [255, 255, 255]
Tint color for the bloom.
@example
(effect bloom :intensity 0.7 :threshold 180)
@example
;; Warm bloom
(effect bloom :intensity 0.6 :color_tint [255 200 150])
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply bloom effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- intensity: bloom brightness (default 0.5)
- threshold: brightness cutoff (default 200)
- radius: blur radius (default 15)
- soft_threshold: gradual vs hard (default True)
- color_tint: RGB tint (default white)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 0.5)
threshold = int(params.get("threshold", 200))
radius = max(1, int(params.get("radius", 15)))
soft_threshold = params.get("soft_threshold", True)
color_tint = params.get("color_tint", [255, 255, 255])
if state is None:
state = {}
if intensity <= 0:
return frame, state
# Convert to float
result = frame.astype(np.float32)
# Get brightness (luminance)
lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2]
# Create bloom mask
if soft_threshold:
# Soft threshold - gradual falloff
bloom_mask = np.clip((lum - threshold) / (255 - threshold + 1e-6), 0, 1)
else:
# Hard threshold
bloom_mask = (lum > threshold).astype(np.float32)
# Extract bright areas
bloom = result * bloom_mask[:, :, np.newaxis]
# Apply blur to create glow
ksize = radius * 2 + 1
bloom = cv2.GaussianBlur(bloom, (ksize, ksize), 0)
# Apply color tint
if isinstance(color_tint, (list, tuple)) and len(color_tint) >= 3:
tint = np.array(color_tint[:3], dtype=np.float32) / 255.0
for c in range(3):
bloom[:, :, c] *= tint[c]
# Add bloom to original (screen blend)
result = result + bloom * intensity
return np.clip(result, 0, 255).astype(np.uint8), state

View File

@@ -1,21 +0,0 @@
;; Bloom effect - glow around bright areas
;; Usage: python3 plan.py effects/bloom.sexp -p intensity=0.6 -p radius=20 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; intensity: bloom strength (0.0 to 2.0, default: 0.6)
;; radius: bloom spread in pixels (5 to 50, default: 20)
(recipe "bloom"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect bloom :path "sexp_effects/effects/bloom.sexp")
;; Default parameters (overridden by -p intensity=N -p radius=N)
(def intensity 0.6)
(def radius 20)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect bloom :intensity intensity :radius radius)))
result)

View File

@@ -1,54 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect blur
@version 1.0.0
@author artdag
@description
Gaussian blur effect. Softens the image by averaging neighboring pixels.
Useful for dreamy effects or reducing detail on quiet parts.
@param radius float
@range 0 50
@default 5
Blur radius in pixels. Higher = more blur. Must be positive.
@example
(effect blur :radius 10)
@example
;; Blur on quiet, sharp on loud
(effect blur :radius (bind energy :range [20 0]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply Gaussian blur to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- radius: blur radius in pixels (default 5)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
radius = params.get("radius", 5)
if radius <= 0:
return frame, state
# Kernel size must be odd
ksize = int(radius) * 2 + 1
result = cv2.GaussianBlur(frame, (ksize, ksize), 0)
return result, state

View File

@@ -1,20 +0,0 @@
;; Blur effect - gaussian blur
;; Usage: python3 plan.py effects/blur.sexp -p radius=10 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; radius: blur radius in pixels (1 to 50, default: 10)
;; higher = more blurry
(recipe "blur"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect blur :path "sexp_effects/effects/blur.sexp")
;; Default parameter (overridden by -p radius=N)
(def radius 10)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect blur :radius radius)))
result)

View File

@@ -1,52 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect brightness
@version 1.0.0
@author artdag
@description
Adjusts the brightness of each frame by multiplying pixel values.
Values > 1.0 brighten, < 1.0 darken. Useful for pulse effects synced to music.
@param factor float
@range 0 3
@default 1.0
Brightness multiplier. 0 = black, 1 = unchanged, 2 = double brightness.
Bind to bass/energy for reactive brightness pulses.
@example
(effect brightness :factor 1.5)
@example
;; Pulse brighter on bass hits
(effect brightness :factor (bind bass :range [1.0 2.0] :transform sqrt))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Adjust brightness of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: brightness multiplier (default 1.0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
if factor == 1.0:
return frame, state
# Apply brightness multiplier with clipping
result = np.clip(frame.astype(np.float32) * factor, 0, 255).astype(np.uint8)
return result, state

View File

@@ -1,20 +0,0 @@
;; Brightness effect - adjust image brightness
;; Usage: python3 plan.py effects/brightness.sexp [-p amount=30] | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: brightness adjustment (-255 to 255, default: 30)
;; negative = darker, positive = brighter
(recipe "brightness"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect brightness :path "sexp_effects/effects/brightness.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 30)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect brightness :amount amount)))
result)

View File

@@ -1,117 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "scipy"]
# ///
"""
@effect cartoon
@version 1.0.0
@author artdag
@description
Cartoon / Cel-shaded effect. Simplifies colors into flat regions
and adds dark edge outlines for a hand-drawn cartoon appearance.
@param detail float
@range 0.1 1.0
@default 0.5
Edge detection sensitivity. Higher = more edges detected.
@param edge_thickness int
@range 1 5
@default 1
Outline thickness in pixels.
@param color_levels int
@range 2 32
@default 6
Number of color levels per channel.
@param edge_color list
@default [0, 0, 0]
RGB color for edges (default black).
@param blur_size int
@range 0 10
@default 2
Pre-blur for smoother color regions.
@example
(effect cartoon :detail 0.6 :color_levels 4)
@example
;; Thick outlines, fewer colors
(effect cartoon :edge_thickness 3 :color_levels 3 :blur_size 4)
"""
import numpy as np
from scipy import ndimage
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply cartoon effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- detail: edge sensitivity 0.1-1.0 (default 0.5)
- edge_thickness: outline thickness (default 1)
- color_levels: posterization levels (default 6)
- edge_color: RGB tuple (default [0,0,0])
- blur_size: pre-blur amount (default 2)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
detail = np.clip(params.get("detail", 0.5), 0.1, 1.0)
edge_thickness = max(1, min(int(params.get("edge_thickness", 1)), 5))
color_levels = max(2, min(int(params.get("color_levels", 6)), 32))
edge_color = params.get("edge_color", [0, 0, 0])
blur_size = max(0, int(params.get("blur_size", 2)))
if state is None:
state = {}
h, w = frame.shape[:2]
result = frame.copy().astype(np.float32)
# Step 1: Blur to reduce noise and create smoother regions
if blur_size > 0:
for c in range(3):
result[:, :, c] = ndimage.uniform_filter(result[:, :, c], size=blur_size)
# Step 2: Posterize colors (reduce to N levels)
step = 256 / color_levels
result = (np.floor(result / step) * step).astype(np.uint8)
# Step 3: Detect edges using Sobel
gray = np.mean(frame, axis=2).astype(np.float32)
sobel_x = ndimage.sobel(gray, axis=1)
sobel_y = ndimage.sobel(gray, axis=0)
edges = np.sqrt(sobel_x**2 + sobel_y**2)
# Normalize and threshold
edge_max = edges.max()
if edge_max > 0:
edges = edges / edge_max
edge_threshold = 1.0 - detail
edge_mask = edges > edge_threshold
# Dilate edges for thickness
if edge_thickness > 1:
struct = ndimage.generate_binary_structure(2, 1)
for _ in range(edge_thickness - 1):
edge_mask = ndimage.binary_dilation(edge_mask, structure=struct)
# Step 4: Apply edge color
if isinstance(edge_color, (list, tuple)) and len(edge_color) >= 3:
color = np.array(edge_color[:3], dtype=np.uint8)
else:
color = np.array([0, 0, 0], dtype=np.uint8)
result[edge_mask] = color
return result, state

View File

@@ -1,115 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect chromatic
@version 1.0.0
@author artdag
@description
Chromatic aberration effect. Creates color fringing by offsetting
RGB channels radially from the center (lens distortion simulation).
@param strength float
@range 0 50
@default 10
Aberration strength. Bind to bass for reactive effect.
@param center_x float
@range 0 1
@default 0.5
Aberration center X.
@param center_y float
@range 0 1
@default 0.5
Aberration center Y.
@param radial bool
@default true
If true, aberration increases from center to edges.
@example
(effect chromatic :strength 20)
@example
;; Beat-reactive chromatic aberration
(effect chromatic :strength (bind bass :range [0 30]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply chromatic aberration effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: aberration amount (default 10)
- center_x: center X 0-1 (default 0.5)
- center_y: center Y 0-1 (default 0.5)
- radial: increase from center (default True)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 10)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
radial = params.get("radial", True)
if state is None:
state = {}
if strength == 0:
return frame, state
h, w = frame.shape[:2]
r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]
if radial:
# Create distance-from-center map
y_coords, x_coords = np.ogrid[:h, :w]
cx, cy = w * center_x, h * center_y
dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2)
max_dist = np.sqrt(cx**2 + cy**2)
dist_normalized = (dist / max_dist).astype(np.float32)
# Create coordinate maps for remapping
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
# Direction from center
dx = (map_x - cx) / (dist + 1e-6)
dy = (map_y - cy) / (dist + 1e-6)
# Apply radial offset to red channel (outward)
r_offset = strength * dist_normalized
r_map_x = (map_x + dx * r_offset).astype(np.float32)
r_map_y = (map_y + dy * r_offset).astype(np.float32)
r_shifted = cv2.remap(r, r_map_x, r_map_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
# Apply radial offset to blue channel (inward)
b_offset = -strength * dist_normalized
b_map_x = (map_x + dx * b_offset).astype(np.float32)
b_map_y = (map_y + dy * b_offset).astype(np.float32)
b_shifted = cv2.remap(b, b_map_x, b_map_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state
else:
# Simple uniform offset
offset = int(strength)
M_r = np.float32([[1, 0, offset], [0, 1, 0]])
M_b = np.float32([[1, 0, -offset], [0, 1, 0]])
r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE)
b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE)
return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state

View File

@@ -1,21 +0,0 @@
;; Color adjust effect - combined brightness and contrast
;; Usage: python3 plan.py effects/color-adjust.sexp -p brightness=20 -p contrast=1.2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; brightness: brightness adjustment (-255 to 255, default: 20)
;; contrast: contrast multiplier (0.0 to 3.0, default: 1.2)
(recipe "color-adjust"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect color-adjust :path "sexp_effects/effects/color-adjust.sexp")
;; Default parameters (overridden by -p brightness=N -p contrast=N)
(def brightness 20)
(def contrast 1.2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect color-adjust :brightness brightness :contrast contrast)))
result)

View File

@@ -1,104 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect color_cycle
@version 1.0.0
@author artdag
@description
Color cycle effect. Shifts all hues over time creating psychedelic
rainbow cycling. Great for trippy visuals.
@param speed float
@range 0 10
@default 1
Cycle speed (rotations per second).
@param offset float
@range 0 360
@default 0
Initial hue offset in degrees.
@param saturation_boost float
@range 0 2
@default 1
Saturation multiplier.
@param mode string
@enum all highlights shadows midtones
@default all
Which tones to affect.
@example
(effect color_cycle :speed 0.5)
@example
;; Beat-synced color shift
(effect color_cycle :offset (bind beat_position :range [0 360]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply color cycle effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- speed: rotations per second (default 1)
- offset: initial hue offset (default 0)
- saturation_boost: saturation multiplier (default 1)
- mode: which tones to affect (default all)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
speed = params.get("speed", 1)
offset = params.get("offset", 0)
saturation_boost = max(0, min(params.get("saturation_boost", 1), 2))
mode = params.get("mode", "all")
t = params.get("_time", 0)
if state is None:
state = {}
# Calculate hue shift
hue_shift = int((offset + speed * t * 360) % 360)
# Convert to HSV (OpenCV uses BGR, our frame is RGB)
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
hsv = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV).astype(np.float32)
if mode == "all":
# Shift all hues
hsv[:, :, 0] = (hsv[:, :, 0] + hue_shift / 2) % 180
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * saturation_boost, 0, 255)
else:
# Calculate luminance mask
lum = hsv[:, :, 2] / 255.0
if mode == "highlights":
mask = np.clip((lum - 0.67) * 3, 0, 1)
elif mode == "shadows":
mask = np.clip(1 - lum * 3, 0, 1)
else: # midtones
shadow_mask = np.clip(1 - lum * 3, 0, 1)
highlight_mask = np.clip((lum - 0.67) * 3, 0, 1)
mask = 1 - shadow_mask - highlight_mask
# Apply selective hue shift
shifted_hue = (hsv[:, :, 0] + hue_shift / 2) % 180
hsv[:, :, 0] = hsv[:, :, 0] * (1 - mask) + shifted_hue * mask
# Convert back
hsv = np.clip(hsv, 0, 255).astype(np.uint8)
result_bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = cv2.cvtColor(result_bgr, cv2.COLOR_BGR2RGB)
return result, state

View File

@@ -1,20 +0,0 @@
;; Color cycle effect - animated hue rotation over time
;; Usage: python3 plan.py effects/color_cycle.sexp -p speed=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; speed: rotation speed multiplier (0.1 to 10, default: 2)
;; higher = faster color cycling
(recipe "color_cycle"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect color_cycle :path "sexp_effects/effects/color_cycle.sexp")
;; Default parameter (overridden by -p speed=N)
(def speed 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect color_cycle :speed speed)))
result)

View File

@@ -1,139 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect color_grade
@version 1.0.0
@author artdag
@description
Color grading effect. Applies cinematic color adjustments including
shadows/midtones/highlights tinting, lift/gamma/gain, and temperature.
@param shadows list
@default [0, 0, 0]
RGB tint for dark areas.
@param midtones list
@default [0, 0, 0]
RGB tint for middle tones.
@param highlights list
@default [0, 0, 0]
RGB tint for bright areas.
@param lift float
@range -0.5 0.5
@default 0
Raise/lower shadow levels.
@param gamma float
@range 0.5 2
@default 1
Midtone brightness curve.
@param gain float
@range 0.5 2
@default 1
Highlight intensity.
@param temperature float
@range -100 100
@default 0
Color temperature (-100 = cool/blue, +100 = warm/orange).
@param tint float
@range -100 100
@default 0
Green/magenta tint (-100 = green, +100 = magenta).
@example
(effect color_grade :temperature 30 :shadows [0 0 20])
@example
;; Cinematic teal-orange look
(effect color_grade :shadows [0 10 20] :highlights [20 10 0])
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply color grading to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
shadows = params.get("shadows", [0, 0, 0])
midtones = params.get("midtones", [0, 0, 0])
highlights = params.get("highlights", [0, 0, 0])
lift = params.get("lift", 0)
gamma = max(0.5, min(params.get("gamma", 1), 2))
gain = max(0.5, min(params.get("gain", 1), 2))
temperature = params.get("temperature", 0)
tint = params.get("tint", 0)
if state is None:
state = {}
result = frame.astype(np.float32) / 255.0
# Apply lift (shadows)
result = result + lift
# Apply gamma (midtones)
result = np.power(np.clip(result, 0.001, 1), 1 / gamma)
# Apply gain (highlights)
result = result * gain
# Convert tints to float
if isinstance(shadows, (list, tuple)) and len(shadows) >= 3:
shadows = np.array(shadows[:3], dtype=np.float32) / 255.0
else:
shadows = np.zeros(3, dtype=np.float32)
if isinstance(midtones, (list, tuple)) and len(midtones) >= 3:
midtones = np.array(midtones[:3], dtype=np.float32) / 255.0
else:
midtones = np.zeros(3, dtype=np.float32)
if isinstance(highlights, (list, tuple)) and len(highlights) >= 3:
highlights = np.array(highlights[:3], dtype=np.float32) / 255.0
else:
highlights = np.zeros(3, dtype=np.float32)
# Calculate luminance for zone-based grading
lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2]
# Create zone masks
shadow_mask = np.clip(1 - lum * 3, 0, 1)[:, :, np.newaxis]
highlight_mask = np.clip((lum - 0.67) * 3, 0, 1)[:, :, np.newaxis]
midtone_mask = 1 - shadow_mask - highlight_mask
# Apply zone tints
for c in range(3):
result[:, :, c] += shadows[c] * shadow_mask[:, :, 0]
result[:, :, c] += midtones[c] * midtone_mask[:, :, 0]
result[:, :, c] += highlights[c] * highlight_mask[:, :, 0]
# Apply temperature (blue <-> orange)
if temperature != 0:
temp_shift = temperature / 100.0
result[:, :, 0] += temp_shift * 0.3 # Red
result[:, :, 2] -= temp_shift * 0.3 # Blue
# Apply tint (green <-> magenta)
if tint != 0:
tint_shift = tint / 100.0
result[:, :, 1] -= tint_shift * 0.2 # Green
return (np.clip(result, 0, 1) * 255).astype(np.uint8), state

View File

@@ -1,52 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect contrast
@version 1.0.0
@author artdag
@description
Adjusts contrast by scaling pixel values around the midpoint (128).
Higher values increase contrast, lower values flatten the image.
@param factor float
@range 0 3
@default 1.0
Contrast multiplier. 0.5 = low contrast, 1 = unchanged, 2 = high contrast.
@example
(effect contrast :factor 1.5)
@example
;; Dramatic contrast on energy peaks
(effect contrast :factor (bind energy :range [1.0 2.0]))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Adjust contrast of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: contrast multiplier (default 1.0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
if factor == 1.0:
return frame, state
# Adjust contrast around midpoint (128)
img_float = frame.astype(np.float32)
result = 128 + factor * (img_float - 128)
return np.clip(result, 0, 255).astype(np.uint8), state

View File

@@ -1,20 +0,0 @@
;; Contrast effect - adjust image contrast
;; Usage: python3 plan.py effects/contrast.sexp -p amount=1.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: contrast multiplier (0.0 to 3.0, default: 1.5)
;; <1 = less contrast, >1 = more contrast
(recipe "contrast"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect contrast :path "sexp_effects/effects/contrast.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 1.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect contrast :amount amount)))
result)

View File

@@ -1,193 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect crt
@version 1.0.0
@author artdag
@description
CRT / Scanlines effect. Simulates CRT monitor aesthetics with visible
scan lines, optional RGB subpixels, barrel distortion, and vignette.
@param line_spacing int
@range 1 10
@default 2
Pixels between scanlines.
@param line_opacity float
@range 0 1
@default 0.3
Darkness of scanlines.
@param rgb_subpixels bool
@default false
Show RGB subpixel pattern.
@param curvature float
@range 0 0.5
@default 0
Barrel distortion amount for curved screen look.
@param vignette float
@range 0 1
@default 0
Dark corners effect.
@param bloom float
@range 0 1
@default 0
Glow/blur on bright areas.
@param flicker float
@range 0 0.3
@default 0
Brightness variation.
@param seed int
@default 42
Random seed for flicker.
@state rng DeterministicRNG
Random number generator for flicker.
@example
(effect crt :line_spacing 2 :line_opacity 0.4)
@example
;; Full retro CRT look
(effect crt :curvature 0.2 :vignette 0.3 :rgb_subpixels true :bloom 0.2)
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply CRT effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
line_spacing = max(1, int(params.get("line_spacing", 2)))
line_opacity = params.get("line_opacity", 0.3)
rgb_subpixels = params.get("rgb_subpixels", False)
curvature = params.get("curvature", 0)
vignette = params.get("vignette", 0)
bloom = params.get("bloom", 0)
flicker = params.get("flicker", 0)
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
result = frame.astype(np.float32).copy()
# Apply barrel distortion (curvature)
if curvature > 0:
result = _apply_curvature(result, curvature)
# Apply bloom (glow on bright areas)
if bloom > 0:
result = _apply_bloom(result, bloom)
# Apply scanlines
if line_opacity > 0:
for y in range(0, h, line_spacing):
result[y, :] = result[y, :] * (1 - line_opacity)
# Apply RGB subpixel pattern
if rgb_subpixels:
for x in range(w):
col_type = x % 3
if col_type == 0:
result[:, x, 0] *= 1.2
result[:, x, 1] *= 0.8
result[:, x, 2] *= 0.8
elif col_type == 1:
result[:, x, 0] *= 0.8
result[:, x, 1] *= 1.2
result[:, x, 2] *= 0.8
else:
result[:, x, 0] *= 0.8
result[:, x, 1] *= 0.8
result[:, x, 2] *= 1.2
# Apply vignette
if vignette > 0:
y_coords, x_coords = np.ogrid[:h, :w]
center_x, center_y = w / 2, h / 2
dist = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2)
max_dist = np.sqrt(center_x**2 + center_y**2)
vignette_mask = 1 - (dist / max_dist) * vignette
vignette_mask = np.clip(vignette_mask, 0, 1)
result = result * vignette_mask[:, :, np.newaxis]
# Apply flicker
if flicker > 0:
flicker_amount = 1.0 + rng.uniform(-flicker, flicker)
result = result * flicker_amount
return np.clip(result, 0, 255).astype(np.uint8), state
def _apply_curvature(frame: np.ndarray, strength: float) -> np.ndarray:
"""Apply barrel distortion."""
h, w = frame.shape[:2]
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize to -1 to 1
x_norm = (x_coords - w / 2) / (w / 2)
y_norm = (y_coords - h / 2) / (h / 2)
# Calculate radius
r = np.sqrt(x_norm**2 + y_norm**2)
# Apply barrel distortion
r_distorted = r * (1 + strength * r**2)
# Scale factor
scale = np.where(r > 0, r_distorted / r, 1)
# New coordinates
new_x = (x_norm * scale * (w / 2) + w / 2).astype(np.float32)
new_y = (y_norm * scale * (h / 2) + h / 2).astype(np.float32)
result = cv2.remap(frame.astype(np.uint8), new_x, new_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0))
return result.astype(np.float32)
def _apply_bloom(frame: np.ndarray, strength: float) -> np.ndarray:
"""Apply bloom (glow on bright areas)."""
gray = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2GRAY)
_, bright = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
bloom = cv2.GaussianBlur(bright, (21, 21), 0)
bloom = cv2.cvtColor(bloom, cv2.COLOR_GRAY2RGB)
result = frame + bloom.astype(np.float32) * strength * 0.5
return result

View File

@@ -1,21 +0,0 @@
;; CRT effect - old TV/monitor look with scanlines and vignette
;; Usage: python3 plan.py effects/crt.sexp -p line_spacing=3 -p vignette_amount=0.3 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; line_spacing: pixels between scanlines (1 to 8, default: 3)
;; vignette_amount: edge darkening strength (0 to 1, default: 0.3)
(recipe "crt"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect crt :path "sexp_effects/effects/crt.sexp")
;; Default parameters (overridden by -p line_spacing=N -p vignette_amount=N)
(def line_spacing 3)
(def vignette_amount 0.3)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect crt :line_spacing line_spacing :vignette_amount vignette_amount)))
result)

View File

@@ -1,141 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect datamosh
@version 1.0.0
@author artdag
@description
Digital corruption / glitch block effect. Randomly corrupts rectangular
blocks by shifting, swapping, or duplicating from previous frames.
Simulates video compression artifacts.
@param block_size int
@range 8 128
@default 32
Size of corruption blocks in pixels.
@param corruption float
@range 0 1
@default 0.3
Probability of corrupting each block. Bind to energy for reactive glitch.
@param max_offset int
@range 0 200
@default 50
Maximum pixel offset when shifting blocks.
@param color_corrupt bool
@default true
Also apply color channel shifts to blocks.
@param seed int
@default 42
Random seed for deterministic glitch patterns.
@state previous_frame ndarray
Stores previous frame for frame-blending corruption.
@state rng DeterministicRNG
Random number generator for reproducible results.
@example
(effect datamosh :corruption 0.4)
@example
;; Heavy glitch on energy peaks
(effect datamosh :corruption (bind energy :range [0 0.8]) :block_size 16)
@example
;; Reproducible glitch with seed
(effect datamosh :corruption 0.5 :seed 12345)
"""
import numpy as np
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply datamosh/glitch block effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- block_size: corruption block size (default 32)
- corruption: probability 0-1 (default 0.3)
- max_offset: max shift in pixels (default 50)
- color_corrupt: apply color shifts (default True)
state: Persistent state dict
- previous_frame: last frame for duplication effect
Returns:
Tuple of (processed_frame, new_state)
"""
block_size = max(8, min(int(params.get("block_size", 32)), 128))
corruption = max(0, min(params.get("corruption", 0.3), 1))
max_offset = int(params.get("max_offset", 50))
color_corrupt = params.get("color_corrupt", True)
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
if corruption == 0:
state["previous_frame"] = frame.copy()
return frame, state
h, w = frame.shape[:2]
result = frame.copy()
prev_frame = state.get("previous_frame")
# Process blocks
for by in range(0, h, block_size):
for bx in range(0, w, block_size):
bh = min(block_size, h - by)
bw = min(block_size, w - bx)
if rng.uniform() < corruption:
corruption_type = rng.choice(["shift", "duplicate", "color", "swap"])
if corruption_type == "shift" and max_offset > 0:
ox = rng.randint(-max_offset, max_offset)
oy = rng.randint(-max_offset, max_offset)
src_x = max(0, min(bx + ox, w - bw))
src_y = max(0, min(by + oy, h - bh))
result[by:by+bh, bx:bx+bw] = frame[src_y:src_y+bh, src_x:src_x+bw]
elif corruption_type == "duplicate" and prev_frame is not None:
if prev_frame.shape == frame.shape:
result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw]
elif corruption_type == "color" and color_corrupt:
block = result[by:by+bh, bx:bx+bw].copy()
shift = rng.randint(1, 3)
channel = rng.randint(0, 2)
block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0)
result[by:by+bh, bx:bx+bw] = block
elif corruption_type == "swap":
other_bx = rng.randint(0, max(0, w - bw - 1))
other_by = rng.randint(0, max(0, h - bh - 1))
temp = result[by:by+bh, bx:bx+bw].copy()
result[by:by+bh, bx:bx+bw] = frame[other_by:other_by+bh, other_bx:other_bx+bw]
result[other_by:other_by+bh, other_bx:other_bx+bw] = temp
state["previous_frame"] = frame.copy()
return result, state

View File

@@ -1,21 +0,0 @@
;; Datamosh effect - glitch art block corruption
;; Usage: python3 plan.py effects/datamosh.sexp -p corruption=0.5 -p block_size=24 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; corruption: corruption intensity (0.1 to 1.0, default: 0.5)
;; block_size: size of corrupted blocks (8 to 64, default: 24)
(recipe "datamosh"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect datamosh :path "sexp_effects/effects/datamosh.sexp")
;; Default parameters (overridden by -p corruption=N -p block_size=N)
(def corruption 0.5)
(def block_size 24)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect datamosh :corruption corruption :block_size block_size)))
result)

View File

@@ -1,170 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect displacement
@version 1.0.0
@author artdag
@description
Displacement effect. Warps the image based on a pattern (sine waves,
noise, or radial). Creates flowing, liquid-like distortions.
@param amount float
@range 0 100
@default 20
Displacement strength in pixels.
@param pattern string
@enum sine noise radial turbulence
@default sine
Displacement pattern type:
- sine: smooth sine wave
- noise: random displacement
- radial: swirl from center
- turbulence: multi-scale noise
@param frequency float
@range 1 50
@default 10
Pattern frequency (waves per frame width).
@param speed float
@range 0 10
@default 1
Animation speed.
@param direction string
@enum horizontal vertical both
@default both
Displacement direction.
@param seed int
@default 42
Random seed for noise patterns.
@state rng DeterministicRNG
Random number generator.
@example
(effect displacement :amount 30 :pattern "sine" :frequency 5)
@example
;; Reactive turbulence
(effect displacement :amount (bind energy :range [10 50]) :pattern "turbulence")
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply displacement effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
amount = params.get("amount", 20)
pattern = params.get("pattern", "sine")
frequency = max(1, params.get("frequency", 10))
speed = params.get("speed", 1)
direction = params.get("direction", "both")
seed = int(params.get("seed", 42))
t = params.get("_time", 0)
if state is None:
state = {}
if amount == 0:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
h, w = frame.shape[:2]
# Create base coordinate maps
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
# Generate displacement based on pattern
if pattern == "sine":
# Sine wave displacement
phase = t * speed * 2 * np.pi
if direction in ["horizontal", "both"]:
map_x = map_x + amount * np.sin(2 * np.pi * map_y / h * frequency + phase)
if direction in ["vertical", "both"]:
map_y = map_y + amount * np.sin(2 * np.pi * map_x / w * frequency + phase)
elif pattern == "noise":
# Generate noise displacement
rng = state["rng"]
if "noise_x" not in state or state.get("noise_size") != (h, w):
state["noise_x"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32)
state["noise_y"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32)
state["noise_size"] = (h, w)
if direction in ["horizontal", "both"]:
map_x = map_x + amount * state["noise_x"]
if direction in ["vertical", "both"]:
map_y = map_y + amount * state["noise_y"]
elif pattern == "radial":
# Radial/swirl displacement
cx, cy = w / 2, h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
dx = x_coords - cx
dy = y_coords - cy
dist = np.sqrt(dx**2 + dy**2) + 1e-6
angle = np.arctan2(dy, dx)
# Swirl amount varies with distance and time
swirl = amount * 0.01 * np.sin(dist / (w / frequency) + t * speed * 2 * np.pi)
new_angle = angle + swirl
if direction in ["horizontal", "both"]:
map_x = cx + dist * np.cos(new_angle)
if direction in ["vertical", "both"]:
map_y = cy + dist * np.sin(new_angle)
elif pattern == "turbulence":
# Multi-scale noise
rng = state["rng"]
disp_x = np.zeros((h, w), dtype=np.float32)
disp_y = np.zeros((h, w), dtype=np.float32)
for scale in [1, 2, 4]:
sh, sw = h // scale, w // scale
noise_x = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32)
noise_y = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32)
if scale > 1:
noise_x = cv2.resize(noise_x, (w, h))
noise_y = cv2.resize(noise_y, (w, h))
disp_x += noise_x / scale
disp_y += noise_y / scale
if direction in ["horizontal", "both"]:
map_x = map_x + amount * disp_x
if direction in ["vertical", "both"]:
map_y = map_y + amount * disp_y
# Apply remapping
result = cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
return result, state

View File

@@ -1,139 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect echo
@version 1.0.0
@author artdag
@description
Motion trail / echo effect. Blends current frame with previous frames
to create ghosting/trailing effects. Great for fast movement scenes.
Uses a frame buffer in state to store recent frames for blending.
@param num_echoes int
@range 1 20
@default 4
Number of trailing frames to blend.
@param decay float
@range 0 1
@default 0.5
Opacity ratio between successive echoes. 0.5 = each echo half as bright.
@param blend_mode string
@enum blend add screen maximum
@default blend
How to combine echoes:
- blend: weighted average
- add: sum (can overexpose)
- screen: like add but resists overexposure
- maximum: brightest pixel wins
@state frame_buffer list
Circular buffer of recent frames.
@example
(effect echo :num_echoes 6 :decay 0.6)
@example
;; More echoes on energy
(effect echo :num_echoes (bind energy :range [2 10]))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply echo/motion trail effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- num_echoes: number of trailing frames (default 4)
- decay: opacity decay ratio (default 0.5)
- blend_mode: blend/add/screen/maximum (default blend)
state: Persistent state dict
- frame_buffer: list of recent frames
Returns:
Tuple of (processed_frame, new_state)
"""
num_echoes = max(1, min(int(params.get("num_echoes", 4)), 20))
decay = max(0, min(params.get("decay", 0.5), 1))
blend_mode = params.get("blend_mode", "blend")
if state is None:
state = {}
# Initialize frame buffer
if "frame_buffer" not in state:
state["frame_buffer"] = []
buffer = state["frame_buffer"]
# Add current frame to buffer
buffer.append(frame.copy())
# Limit buffer size
max_buffer = num_echoes + 5
while len(buffer) > max_buffer:
buffer.pop(0)
# Collect frames and intensities for blending
frames = []
intensities = []
intensity = 1.0
# Current frame first, then older frames
for i in range(min(num_echoes + 1, len(buffer))):
idx = len(buffer) - 1 - i
if idx >= 0:
frames.append(buffer[idx].astype(np.float32))
intensities.append(intensity)
intensity *= decay
if not frames:
return frame, state
# Blend frames according to mode
result = _blend_frames(frames, intensities, blend_mode)
return np.clip(result, 0, 255).astype(np.uint8), state
def _blend_frames(frames, intensities, blend_mode):
"""Blend multiple frames according to blend mode."""
if not frames:
return frames[0]
if blend_mode == "add":
result = np.zeros_like(frames[0])
for frame, intensity in zip(frames, intensities):
result += frame * intensity
return result
elif blend_mode == "screen":
result = np.zeros_like(frames[0])
for frame, intensity in zip(frames, intensities):
weighted = (frame / 255.0) * intensity
result = 255 * (1 - (1 - result / 255.0) * (1 - weighted))
return result
elif blend_mode == "maximum":
result = frames[0] * intensities[0]
for frame, intensity in zip(frames[1:], intensities[1:]):
result = np.maximum(result, frame * intensity)
return result
else: # blend - weighted average
total = sum(intensities)
if total == 0:
return frames[0]
result = np.zeros_like(frames[0])
for frame, intensity in zip(frames, intensities):
result += frame * (intensity / total)
return result

View File

@@ -1,21 +0,0 @@
;; Echo effect - temporal ghosting/trails
;; Usage: python3 plan.py effects/echo.sexp -p num_echoes=4 -p decay=0.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; num_echoes: number of ghost frames (1 to 10, default: 4)
;; decay: fade rate per echo (0.2 to 0.9, default: 0.5)
(recipe "echo"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect echo :path "sexp_effects/effects/echo.sexp")
;; Default parameters (overridden by -p num_echoes=N -p decay=N)
(def num_echoes 4)
(def decay 0.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect echo :num_echoes num_echoes :decay decay)))
result)

View File

@@ -1,22 +0,0 @@
;; Edge detect effect - Canny edge detection
;; Usage: python3 plan.py effects/edge_detect.sexp -p low=50 -p high=150 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; low: lower threshold (10 to 100, default: 50)
;; high: upper threshold (100 to 300, default: 150)
;; edges between low-high are kept if connected to strong edges
(recipe "edge_detect"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect edge_detect :path "sexp_effects/effects/edge_detect.sexp")
;; Default parameters (overridden by -p low=N -p high=N)
(def low 50)
(def high 150)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect edge_detect :low low :high high)))
result)

View File

@@ -1,88 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect emboss
@version 1.0.0
@author artdag
@description
Emboss / relief effect. Creates a 3D raised appearance by highlighting
edges from a simulated light direction. Great for sculptural looks.
@param strength float
@range 0.5 3
@default 1.0
Emboss intensity.
@param direction float
@range 0 360
@default 135
Light direction in degrees. Bind to beat for rotating light.
@param blend float
@range 0 1
@default 0.3
Blend with original (0 = full emboss, 1 = original).
@example
(effect emboss :strength 1.5)
@example
;; Rotating light direction
(effect emboss :direction (bind beat_position :range [0 360]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply emboss effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: emboss intensity (default 1.0)
- direction: light angle in degrees (default 135)
- blend: mix with original (default 0.3)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 1.0)
direction = params.get("direction", 135)
blend = params.get("blend", 0.3)
# Calculate kernel based on direction
angle_rad = np.deg2rad(direction)
dx = np.cos(angle_rad)
dy = np.sin(angle_rad)
# Create emboss kernel
kernel = np.array([
[-strength * dy - strength * dx, -strength * dy, -strength * dy + strength * dx],
[-strength * dx, 1, strength * dx],
[strength * dy - strength * dx, strength * dy, strength * dy + strength * dx]
], dtype=np.float32)
# Apply to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
embossed = cv2.filter2D(gray, -1, kernel)
# Normalize
embossed = embossed + 128
embossed = np.clip(embossed, 0, 255)
# Convert to RGB
embossed_rgb = cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2RGB)
# Blend with original
if blend > 0:
result = frame.astype(np.float32) * blend + embossed_rgb.astype(np.float32) * (1 - blend)
return np.clip(result, 0, 255).astype(np.uint8), state
return embossed_rgb, state

View File

@@ -1,19 +0,0 @@
;; Emboss effect - 3D raised surface look
;; Usage: python3 plan.py effects/emboss.sexp -p strength=1.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: emboss intensity (0.5 to 3.0, default: 1.5)
(recipe "emboss"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect emboss :path "sexp_effects/effects/emboss.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 1.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect emboss :strength strength)))
result)

View File

@@ -1,148 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect film_grain
@version 1.0.0
@author artdag
@description
Film grain / Noise effect. Adds realistic film grain texture.
Great for vintage aesthetics and subtle texture.
@param intensity float
@range 0 1
@default 0.2
Noise intensity. Bind to energy for reactive grain.
@param grain_size float
@range 0.5 5
@default 1.0
Size of grain particles. Larger = coarser grain.
@param colored bool
@default false
Use colored noise instead of monochrome.
@param temporal_variation float
@range 0 1
@default 1.0
How much grain changes frame-to-frame. 0 = static, 1 = full animation.
@param seed int
@default 42
Random seed for reproducible grain.
@state rng DeterministicRNG
Random number generator for consistent grain.
@example
(effect film_grain :intensity 0.3)
@example
;; Colored grain, reactive to energy
(effect film_grain :intensity (bind energy :range [0.1 0.5]) :colored true)
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply film grain effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- intensity: noise intensity 0-1 (default 0.2)
- grain_size: grain particle size (default 1.0)
- colored: use colored noise (default False)
- temporal_variation: grain animation 0-1 (default 1.0)
- seed: random seed (default 42)
state: Persistent state dict
- rng: DeterministicRNG instance
- frame_count: frame counter
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 0.2)
grain_size = max(0.5, params.get("grain_size", 1.0))
colored = params.get("colored", False)
temporal_var = params.get("temporal_variation", 1.0)
seed = int(params.get("seed", 42))
if state is None:
state = {}
if intensity <= 0:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
state["frame_count"] = 0
state["static_noise"] = None
rng = state["rng"]
frame_count = state["frame_count"]
h, w = frame.shape[:2]
# Calculate noise dimensions
if grain_size > 1:
noise_h = max(1, int(h / grain_size))
noise_w = max(1, int(w / grain_size))
else:
noise_h, noise_w = h, w
# Generate noise based on temporal variation
if temporal_var >= 1.0 or state.get("static_noise") is None:
# Generate new noise
if colored:
noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)]
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
else:
noise_2d = np.array([[rng.gaussian(0, 1)
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
if temporal_var < 1.0:
state["static_noise"] = noise
else:
# Blend static and new noise
static = state["static_noise"]
if colored:
new_noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)]
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
else:
noise_2d = np.array([[rng.gaussian(0, 1)
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
new_noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
noise = static * (1 - temporal_var) + new_noise * temporal_var
# Scale noise up if using larger grain
if grain_size > 1:
noise = cv2.resize(noise.astype(np.float32), (w, h), interpolation=cv2.INTER_LINEAR)
# Add noise to frame
result = frame.astype(np.float32) + noise
state["frame_count"] = frame_count + 1
return np.clip(result, 0, 255).astype(np.uint8), state

View File

@@ -1,19 +0,0 @@
;; Film grain effect - analog film noise
;; Usage: python3 plan.py effects/film_grain.sexp -p intensity=0.25 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; intensity: grain amount (0.05 to 0.5, default: 0.25)
(recipe "film_grain"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect film_grain :path "sexp_effects/effects/film_grain.sexp")
;; Default parameter (overridden by -p intensity=N)
(def intensity 0.25)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect film_grain :intensity intensity)))
result)

View File

@@ -1,116 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect fisheye
@version 1.0.0
@author artdag
@description
Barrel/fisheye lens distortion. Positive values bulge outward (fisheye),
negative values pinch inward (pincushion). Great for emphasis effects.
@param strength float
@range -1 1
@default 0.3
Distortion strength. Positive = fisheye bulge, negative = pincushion pinch.
@param center_x float
@range 0 1
@default 0.5
Horizontal center of distortion (0 = left, 1 = right).
@param center_y float
@range 0 1
@default 0.5
Vertical center of distortion (0 = top, 1 = bottom).
@param zoom_correction bool
@default true
Automatically zoom to hide black edges on fisheye.
@example
(effect fisheye :strength 0.5)
@example
;; Pulse fisheye on bass
(effect fisheye :strength (bind bass :range [0 0.8] :transform sqrt))
@example
;; Pincushion effect
(effect fisheye :strength -0.3)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply fisheye/barrel distortion to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: distortion strength -1 to 1 (default 0.3)
- center_x: horizontal center 0-1 (default 0.5)
- center_y: vertical center 0-1 (default 0.5)
- zoom_correction: auto-zoom for fisheye (default True)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 0.3)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
zoom_correction = params.get("zoom_correction", True)
if strength == 0:
return frame, state
h, w = frame.shape[:2]
# Calculate center in pixels
cx = w * center_x
cy = h * center_y
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize coordinates relative to center
x_norm = (x_coords - cx) / (w / 2)
y_norm = (y_coords - cy) / (h / 2)
# Calculate radius from center
r = np.sqrt(x_norm**2 + y_norm**2)
# Apply barrel/pincushion distortion
if strength > 0:
# Barrel distortion (fisheye)
r_distorted = r * (1 + strength * r**2)
else:
# Pincushion distortion
r_distorted = r / (1 - strength * r**2 + 0.001)
# Calculate scale factor
scale = np.where(r > 0, r_distorted / r, 1)
# Apply zoom correction to hide black edges
if zoom_correction and strength > 0:
zoom = 1 + strength * 0.5
scale = scale / zoom
# Calculate new coordinates
new_x = (x_norm * scale * (w / 2) + cx).astype(np.float32)
new_y = (y_norm * scale * (h / 2) + cy).astype(np.float32)
# Remap
result = cv2.remap(
frame, new_x, new_y,
cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT
)
return result, state

View File

@@ -1,20 +0,0 @@
;; Fisheye effect - barrel/pincushion distortion
;; Usage: python3 plan.py effects/fisheye.sexp -p strength=0.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: distortion amount (-1.0 to 1.0, default: 0.5)
;; positive = barrel (bulge), negative = pincushion
(recipe "fisheye"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect fisheye :path "sexp_effects/effects/fisheye.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 0.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect fisheye :strength strength)))
result)

View File

@@ -1,64 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect flip
@version 1.0.0
@author artdag
@description
Flips the frame horizontally and/or vertically.
Can be used for mirror effects or beat-triggered flips.
@param horizontal bool
@default false
Flip horizontally (left-right mirror).
@param vertical bool
@default false
Flip vertically (top-bottom mirror).
@example
(effect flip :horizontal true)
@example
(effect flip :vertical true)
@example
(effect flip :horizontal true :vertical true) ; 180 degree rotation
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Flip a video frame horizontally and/or vertically.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- horizontal: flip left-right (default False)
- vertical: flip top-bottom (default False)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
horizontal = params.get("horizontal", False)
vertical = params.get("vertical", False)
result = frame
if horizontal:
result = np.flip(result, axis=1)
if vertical:
result = np.flip(result, axis=0)
# Ensure contiguous array after flips
if horizontal or vertical:
result = np.ascontiguousarray(result)
return result, state

View File

@@ -1,19 +0,0 @@
;; Flip effect - flip image horizontally or vertically
;; Usage: python3 plan.py effects/flip.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; direction: flip axis ("horizontal" or "vertical", default: "horizontal")
(recipe "flip"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect flip :path "sexp_effects/effects/flip.sexp")
;; Default parameter (overridden by -p direction=horizontal|vertical)
(def direction "horizontal")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect flip :direction direction)))
result)

View File

@@ -1,14 +0,0 @@
;; Grayscale effect - converts to black and white
;; Usage: python3 plan.py effects/grayscale.sexp | python3 execute.py - -d . -o output.mp4
;; No parameters
(recipe "grayscale"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect grayscale :path "sexp_effects/effects/grayscale.sexp")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect grayscale)))
result)

View File

@@ -1,59 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect hue_shift
@version 1.0.0
@author artdag
@description
Rotates the hue of all colors by a specified amount (0-360 degrees).
Creates rainbow cycling effects when animated over time.
@param shift float
@range 0 360
@default 0
Hue rotation in degrees. 0/360 = unchanged, 180 = complementary colors.
@example
(effect hue_shift :shift 180) ; complementary colors
@example
;; Rainbow cycling based on time (use with frame counter in state)
(effect hue_shift :shift (bind beat_position :range [0 360]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Shift hue of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- shift: hue rotation in degrees (default 0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
shift = params.get("shift", 0)
if shift == 0:
return frame, state
# Convert RGB to HSV (OpenCV uses H: 0-179, S: 0-255, V: 0-255)
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
# Shift hue (OpenCV hue is 0-179, so divide by 2)
hue_shift = int((shift % 360) / 2)
hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int16) + hue_shift) % 180
# Convert back to RGB
result = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return result, state

View File

@@ -1,20 +0,0 @@
;; Hue shift effect - rotate colors around color wheel
;; Usage: python3 plan.py effects/hue_shift.sexp -p degrees=90 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; degrees: hue rotation in degrees (0 to 360, default: 90)
;; 90 = quarter rotation, 180 = complementary colors
(recipe "hue_shift"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect hue_shift :path "sexp_effects/effects/hue_shift.sexp")
;; Default parameter (overridden by -p degrees=N)
(def degrees 90)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect hue_shift :degrees degrees)))
result)

View File

@@ -1,62 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect invert
@version 1.0.0
@author artdag
@description
Inverts the colors of each frame. Can be partial (blended with original)
based on intensity parameter. Useful for beat-reactive flashing effects.
@param intensity float
@range 0 100
@default 100
Intensity of the inversion effect (0 = original, 100 = fully inverted).
Bind to audio analysis for reactive effects.
@example
(effect invert)
@example
(effect invert :intensity 50)
@example
;; Beat-reactive inversion
(effect invert :intensity (bind bass :range [0 100] :transform sqrt))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Invert colors of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- intensity: 0-100, how much to invert (default 100)
state: Persistent state dict (unused for this effect)
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 100)
# Normalize intensity to 0-1
t = intensity / 100.0
if t <= 0:
return frame, state
if t >= 1:
return (255 - frame), state
# Partial inversion: blend between original and inverted
inverted = 255 - frame
result = (frame * (1 - t) + inverted * t).astype(np.uint8)
return result, state

View File

@@ -1,14 +0,0 @@
;; Invert effect - inverts all colors
;; Usage: python3 plan.py effects/invert.sexp | python3 execute.py - -d . -o output.mp4
;; No parameters
(recipe "invert"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect invert :path "sexp_effects/effects/invert.sexp")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect invert)))
result)

View File

@@ -1,137 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect kaleidoscope
@version 1.0.0
@author artdag
@description
Kaleidoscope effect. Creates mesmerizing mandala-like patterns by
dividing the frame into pie-slice segments and reflecting them.
Great for psychedelic visuals.
@param segments int
@range 3 16
@default 6
Number of symmetry segments.
@param rotation float
@range 0 360
@default 0
Base rotation angle in degrees.
@param rotation_speed float
@range -180 180
@default 0
Continuous rotation speed in degrees/second.
@param center_x float
@range 0 1
@default 0.5
Center X position (0-1).
@param center_y float
@range 0 1
@default 0.5
Center Y position (0-1).
@param zoom float
@range 0.5 3.0
@default 1.0
Zoom factor for the source region.
@state cumulative_rotation float
Tracks rotation over time.
@example
(effect kaleidoscope :segments 8 :rotation_speed 30)
@example
;; Beat-reactive segments
(effect kaleidoscope :segments (bind bass :range [4 12]) :zoom 1.5)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply kaleidoscope effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- segments: number of segments 3-16 (default 6)
- rotation: base rotation degrees (default 0)
- rotation_speed: degrees per second (default 0)
- center_x: center X 0-1 (default 0.5)
- center_y: center Y 0-1 (default 0.5)
- zoom: zoom factor 0.5-3 (default 1.0)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
segments = max(3, min(int(params.get("segments", 6)), 16))
rotation = params.get("rotation", 0)
rotation_speed = params.get("rotation_speed", 0)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
zoom = max(0.5, min(params.get("zoom", 1.0), 3.0))
# Get time for animation
t = params.get("_time", 0)
if state is None:
state = {}
h, w = frame.shape[:2]
# Calculate center in pixels
cx = int(w * center_x)
cy = int(h * center_y)
# Total rotation including time-based animation
total_rotation = rotation + rotation_speed * t
# Calculate the angle per segment
segment_angle = 2 * np.pi / segments
# Create coordinate maps
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Translate to center
x_centered = x_coords - cx
y_centered = y_coords - cy
# Convert to polar coordinates
r = np.sqrt(x_centered**2 + y_centered**2)
theta = np.arctan2(y_centered, x_centered)
# Apply rotation
theta = theta - np.deg2rad(total_rotation)
# Fold angle into first segment and mirror
theta_normalized = theta % (2 * np.pi)
segment_idx = (theta_normalized / segment_angle).astype(int)
theta_in_segment = theta_normalized - segment_idx * segment_angle
# Mirror alternating segments
mirror_mask = (segment_idx % 2) == 1
theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment)
# Apply zoom
r = r / zoom
# Convert back to Cartesian (source coordinates)
src_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32)
src_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32)
# Remap
result = cv2.remap(frame, src_x, src_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
return result, state

View File

@@ -1,21 +0,0 @@
;; Kaleidoscope effect - radial mirror segments
;; Usage: python3 plan.py effects/kaleidoscope.sexp -p segments=6 -p rotation_speed=30 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; segments: number of mirror segments (2 to 16, default: 6)
;; rotation_speed: rotation in degrees per second (0 to 180, default: 30)
(recipe "kaleidoscope"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp")
;; Default parameters (overridden by -p segments=N -p rotation_speed=N)
(def segments 6)
(def rotation_speed 30)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect kaleidoscope :segments segments :rotation_speed rotation_speed)))
result)

View File

@@ -1,86 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect mirror
@version 1.0.0
@author artdag
@description
Creates mirror/reflection effects by duplicating and flipping portions
of the frame. Can create kaleidoscope-like symmetry effects.
@param mode string
@enum left right top bottom quad
@default left
Mirror mode:
- left: Left half mirrored to right
- right: Right half mirrored to left
- top: Top half mirrored to bottom
- bottom: Bottom half mirrored to top
- quad: Four-way symmetry (kaleidoscope)
@example
(effect mirror :mode "left")
@example
(effect mirror :mode "quad") ; kaleidoscope effect
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Create mirror/reflection effects.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- mode: mirror mode (default "left")
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
mode = params.get("mode", "left")
h, w = frame.shape[:2]
result = frame.copy()
if mode == "left":
# Mirror left half to right
half = w // 2
result[:, half:] = np.flip(result[:, :half], axis=1)[:, :w - half]
elif mode == "right":
# Mirror right half to left
half = w // 2
result[:, :half] = np.flip(result[:, half:], axis=1)[:, :half]
elif mode == "top":
# Mirror top half to bottom
half = h // 2
result[half:, :] = np.flip(result[:half, :], axis=0)[:h - half, :]
elif mode == "bottom":
# Mirror bottom half to top
half = h // 2
result[:half, :] = np.flip(result[half:, :], axis=0)[:half, :]
elif mode == "quad":
# Four-way symmetry: use top-left quadrant
half_h = h // 2
half_w = w // 2
# Get top-left quadrant
tl = result[:half_h, :half_w]
# Mirror to top-right
result[:half_h, half_w:] = np.flip(tl, axis=1)[:, :w - half_w]
# Mirror top half to bottom
result[half_h:, :] = np.flip(result[:half_h, :], axis=0)[:h - half_h, :]
return np.ascontiguousarray(result), state

View File

@@ -1,19 +0,0 @@
;; Mirror effect - reflect image
;; Usage: python3 plan.py effects/mirror.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; direction: mirror axis ("horizontal" or "vertical", default: "horizontal")
(recipe "mirror"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect mirror :path "sexp_effects/effects/mirror.sexp")
;; Default parameter (overridden by -p direction=horizontal|vertical)
(def direction "horizontal")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect mirror :direction direction)))
result)

View File

@@ -1,122 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect motion_blur
@version 1.0.0
@author artdag
@description
Motion blur effect. Creates directional blur simulating camera or
subject motion. Great for speed effects.
@param amount int
@range 1 100
@default 20
Blur length in pixels.
@param angle float
@range 0 360
@default 0
Blur direction in degrees (0 = horizontal right).
@param zoom bool
@default false
Radial zoom blur instead of directional.
@param zoom_center_x float
@range 0 1
@default 0.5
Zoom blur center X.
@param zoom_center_y float
@range 0 1
@default 0.5
Zoom blur center Y.
@example
(effect motion_blur :amount 30 :angle 45)
@example
;; Zoom blur
(effect motion_blur :amount 20 :zoom true)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply motion blur effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: blur length (default 20)
- angle: blur direction (default 0)
- zoom: radial zoom blur (default False)
- zoom_center_x/y: zoom center (default 0.5)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
amount = max(1, int(params.get("amount", 20)))
angle = params.get("angle", 0)
zoom = params.get("zoom", False)
zoom_center_x = params.get("zoom_center_x", 0.5)
zoom_center_y = params.get("zoom_center_y", 0.5)
if state is None:
state = {}
if amount <= 1:
return frame, state
h, w = frame.shape[:2]
if zoom:
# Radial zoom blur
cx, cy = int(w * zoom_center_x), int(h * zoom_center_y)
# Accumulate multiple scaled versions
result = np.zeros_like(frame, dtype=np.float32)
num_samples = min(amount, 20)
for i in range(num_samples):
scale = 1.0 + (i / num_samples) * (amount / 100.0)
M = cv2.getRotationMatrix2D((cx, cy), 0, scale)
scaled = cv2.warpAffine(frame, M, (w, h), borderMode=cv2.BORDER_REFLECT)
result += scaled.astype(np.float32) / num_samples
return result.astype(np.uint8), state
else:
# Directional motion blur using a line kernel
angle_rad = np.deg2rad(angle)
dx = np.cos(angle_rad)
dy = np.sin(angle_rad)
# Create motion blur kernel
kernel_size = amount
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
# Draw a line in the kernel
cx, cy = kernel_size // 2, kernel_size // 2
for i in range(kernel_size):
t = (i - cx) / max(1, cx)
x = int(cx + t * cx * dx)
y = int(cy + t * cy * dy)
if 0 <= x < kernel_size and 0 <= y < kernel_size:
kernel[y, x] = 1
# Normalize kernel
kernel_sum = kernel.sum()
if kernel_sum > 0:
kernel /= kernel_sum
# Apply kernel
result = cv2.filter2D(frame, -1, kernel)
return result, state

View File

@@ -1,130 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect neon_glow
@version 1.0.0
@author artdag
@description
Neon edge glow effect. Detects edges and applies a glowing colored outline.
Great for cyberpunk/synthwave aesthetics synced to music.
@param glow_radius float
@range 0 50
@default 15
Blur radius for the glow. Bind to bass for pulsing glow.
@param glow_intensity float
@range 0.5 5
@default 2.0
Brightness multiplier for the glow.
@param edge_low float
@range 10 200
@default 50
Lower threshold for edge detection.
@param edge_high float
@range 50 300
@default 150
Upper threshold for edge detection.
@param color_r int
@range 0 255
@default 0
Red component of glow color.
@param color_g int
@range 0 255
@default 255
Green component of glow color.
@param color_b int
@range 0 255
@default 255
Blue component of glow color.
@param background float
@range 0 1
@default 0.3
How much of the original image shows through (0 = glow only).
@example
(effect neon_glow :glow_radius 20 :color_r 255 :color_g 0 :color_b 255)
@example
;; Pulsing cyan glow on bass
(effect neon_glow :glow_radius (bind bass :range [5 30] :transform sqrt))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply neon glow effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- glow_radius: blur radius (default 15)
- glow_intensity: brightness (default 2.0)
- edge_low: canny low threshold (default 50)
- edge_high: canny high threshold (default 150)
- color_r/g/b: glow color (default cyan 0,255,255)
- background: original visibility (default 0.3)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
glow_radius = int(params.get("glow_radius", 15))
glow_intensity = params.get("glow_intensity", 2.0)
edge_low = int(params.get("edge_low", 50))
edge_high = int(params.get("edge_high", 150))
color_r = int(params.get("color_r", 0))
color_g = int(params.get("color_g", 255))
color_b = int(params.get("color_b", 255))
background = params.get("background", 0.3)
h, w = frame.shape[:2]
color = np.array([color_r, color_g, color_b], dtype=np.float32)
# Edge detection
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blurred, edge_low, edge_high)
# Create colored edge image
glow_base = np.zeros((h, w, 3), dtype=np.float32)
for c in range(3):
glow_base[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0)
# Apply blur for glow
if glow_radius > 0:
ksize = glow_radius * 2 + 1
glow = cv2.GaussianBlur(glow_base, (ksize, ksize), 0)
else:
glow = glow_base
# Intensify
glow = glow * glow_intensity
# Add sharp edges on top
edge_layer = np.zeros((h, w, 3), dtype=np.float32)
for c in range(3):
edge_layer[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0) * 255
glow = np.maximum(glow, edge_layer)
# Blend with original
if background > 0:
a = frame.astype(np.float32) / 255.0 * background
b = glow / 255.0
result = (1 - (1 - a) * (1 - b)) * 255 # Screen blend
else:
result = glow
return np.clip(result, 0, 255).astype(np.uint8), state

View File

@@ -1,21 +0,0 @@
;; Neon glow effect - glowing edges on dark background
;; Usage: python3 plan.py effects/neon_glow.sexp -p glow_radius=20 -p glow_intensity=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; glow_radius: glow spread in pixels (5 to 50, default: 20)
;; glow_intensity: glow brightness (0.5 to 5, default: 2)
(recipe "neon_glow"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect neon_glow :path "sexp_effects/effects/neon_glow.sexp")
;; Default parameters (overridden by -p glow_radius=N -p glow_intensity=N)
(def glow_radius 20)
(def glow_intensity 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect neon_glow :glow_radius glow_radius :glow_intensity glow_intensity)))
result)

View File

@@ -1,137 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect noise
@version 1.0.0
@author artdag
@description
Noise effect. Adds various types of noise to the image including
static, gaussian, salt & pepper, and more.
@param intensity float
@range 0 1
@default 0.2
Noise intensity.
@param mode string
@enum gaussian uniform salt_pepper scanline
@default gaussian
Type of noise:
- gaussian: smooth normal distribution
- uniform: flat random noise
- salt_pepper: random black/white pixels
- scanline: horizontal line noise
@param colored bool
@default false
Use colored noise instead of monochrome.
@param animate bool
@default true
Different noise each frame.
@param seed int
@default 42
Random seed for reproducible noise.
@state rng DeterministicRNG
Random number generator.
@example
(effect noise :intensity 0.3 :mode "gaussian")
@example
;; Static TV noise
(effect noise :intensity 0.5 :mode "uniform" :animate true)
"""
import numpy as np
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply noise effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 0.2)
mode = params.get("mode", "gaussian")
colored = params.get("colored", False)
animate = params.get("animate", True)
seed = int(params.get("seed", 42))
if state is None:
state = {}
if intensity <= 0:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
result = frame.astype(np.float32)
if mode == "gaussian":
# Gaussian noise
if colored:
noise = np.array([[[rng.gaussian(0, intensity * 50) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
else:
noise_2d = np.array([[rng.gaussian(0, intensity * 50)
for _ in range(w)]
for _ in range(h)])
noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
result = result + noise
elif mode == "uniform":
# Uniform random noise
if colored:
noise = np.array([[[rng.uniform(-intensity * 100, intensity * 100) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
else:
noise_2d = np.array([[rng.uniform(-intensity * 100, intensity * 100)
for _ in range(w)]
for _ in range(h)])
noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
result = result + noise
elif mode == "salt_pepper":
# Salt and pepper noise
for y in range(h):
for x in range(w):
if rng.uniform() < intensity * 0.1:
if rng.uniform() < 0.5:
result[y, x] = [0, 0, 0]
else:
result[y, x] = [255, 255, 255]
elif mode == "scanline":
# Horizontal scanline noise
for y in range(h):
if rng.uniform() < intensity * 0.2:
noise_val = rng.uniform(-intensity * 100, intensity * 100)
result[y] = result[y] + noise_val
return np.clip(result, 0, 255).astype(np.uint8), state

View File

@@ -1,19 +0,0 @@
;; Noise effect - random pixel noise
;; Usage: python3 plan.py effects/noise.sexp -p amount=40 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: noise intensity (5 to 100, default: 40)
(recipe "noise"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect noise :path "sexp_effects/effects/noise.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 40)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect noise :amount amount)))
result)

View File

@@ -1,111 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect outline
@version 1.0.0
@author artdag
@description
Outline / Toon Edges effect. Extracts and displays edges as outlines,
optionally with fill. Great for cartoon/comic book aesthetics.
@param edge_thickness int
@range 1 10
@default 2
Thickness of outlines in pixels.
@param threshold float
@range 20 300
@default 100
Edge detection sensitivity.
@param outline_color list
@default [0, 0, 0]
RGB color for outlines (default black).
@param fill_mode string
@enum original solid transparent
@default original
What to show in non-edge areas:
- original: keep source image
- solid: fill with solid color
- transparent: black background
@param fill_color list
@default [255, 255, 255]
RGB color for solid fill mode.
@example
(effect outline :edge_thickness 3 :threshold 80)
@example
;; White outlines on black
(effect outline :outline_color [255 255 255] :fill_mode "transparent")
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply outline effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- edge_thickness: outline width (default 2)
- threshold: edge sensitivity (default 100)
- outline_color: RGB tuple (default [0,0,0])
- fill_mode: original/solid/transparent (default original)
- fill_color: RGB tuple for solid fill (default [255,255,255])
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
thickness = max(1, min(int(params.get("edge_thickness", 2)), 10))
threshold = params.get("threshold", 100)
outline_color = params.get("outline_color", [0, 0, 0])
fill_mode = params.get("fill_mode", "original")
fill_color = params.get("fill_color", [255, 255, 255])
if state is None:
state = {}
h, w = frame.shape[:2]
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# Apply edge detection
edges = cv2.Canny(gray, int(threshold * 0.5), int(threshold))
# Dilate edges for thickness
if thickness > 1:
kernel = np.ones((thickness, thickness), np.uint8)
edges = cv2.dilate(edges, kernel, iterations=1)
# Create result based on fill mode
if fill_mode == "original":
result = frame.copy()
elif fill_mode == "solid":
if isinstance(fill_color, (list, tuple)) and len(fill_color) >= 3:
result = np.full((h, w, 3), fill_color[:3], dtype=np.uint8)
else:
result = np.full((h, w, 3), 255, dtype=np.uint8)
else: # transparent/none
result = np.zeros((h, w, 3), dtype=np.uint8)
# Apply outline color where edges exist
if isinstance(outline_color, (list, tuple)) and len(outline_color) >= 3:
color = np.array(outline_color[:3], dtype=np.uint8)
else:
color = np.array([0, 0, 0], dtype=np.uint8)
edge_mask = edges > 0
result[edge_mask] = color
return result, state

View File

@@ -1,19 +0,0 @@
;; Outline effect - draw edges as lines
;; Usage: python3 plan.py effects/outline.sexp -p thickness=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; thickness: outline width in pixels (1 to 5, default: 2)
(recipe "outline"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect outline :path "sexp_effects/effects/outline.sexp")
;; Default parameter (overridden by -p thickness=N)
(def thickness 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect outline :thickness thickness)))
result)

View File

@@ -1,72 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect pixelate
@version 1.0.0
@author artdag
@description
Pixelate effect. Reduces resolution to create blocky, retro pixel art
look. Great for 8-bit aesthetics.
@param block_size int
@range 2 64
@default 8
Size of pixel blocks. Larger = more pixelated.
@param maintain_edges bool
@default false
Try to preserve edges while pixelating.
@example
(effect pixelate :block_size 16)
@example
;; Beat-reactive pixelation
(effect pixelate :block_size (bind bass :range [4 32]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply pixelate effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- block_size: pixel block size (default 8)
- maintain_edges: preserve edges (default False)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
block_size = max(2, min(int(params.get("block_size", 8)), 64))
maintain_edges = params.get("maintain_edges", False)
if state is None:
state = {}
h, w = frame.shape[:2]
# Scale down then up to create pixelation
small_h = max(1, h // block_size)
small_w = max(1, w // block_size)
small = cv2.resize(frame, (small_w, small_h), interpolation=cv2.INTER_AREA)
result = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)
if maintain_edges:
# Detect edges in original and overlay
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, 50, 150)
edges_dilated = cv2.dilate(edges, np.ones((2, 2), np.uint8))
edge_mask = edges_dilated > 0
result[edge_mask] = frame[edge_mask]
return result, state

View File

@@ -1,20 +0,0 @@
;; Pixelate effect - reduce resolution with blocky pixels
;; Usage: python3 plan.py effects/pixelate.sexp -p block_size=16 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; block_size: pixel block size (2 to 64, default: 16)
;; higher = more pixelated
(recipe "pixelate"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect pixelate :path "sexp_effects/effects/pixelate.sexp")
;; Default parameter (overridden by -p block_size=N)
(def block_size 16)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect pixelate :block_size block_size)))
result)

View File

@@ -1,152 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect pixelsort
@version 1.0.0
@author artdag
@description
Pixel sorting glitch art effect. Sorts pixels within rows by brightness,
hue, or other properties. Creates distinctive streaked/melted aesthetics.
@param sort_by string
@enum lightness hue saturation red green blue
@default lightness
Property to sort pixels by.
@param threshold_low float
@range 0 255
@default 50
Pixels darker than this are not sorted.
@param threshold_high float
@range 0 255
@default 200
Pixels brighter than this are not sorted.
@param angle float
@range 0 180
@default 0
Sort direction: 0 = horizontal, 90 = vertical.
@param reverse bool
@default false
Reverse the sort order.
@example
(effect pixelsort)
@example
;; Vertical pixel sort
(effect pixelsort :angle 90)
@example
;; Sort by hue for rainbow streaks
(effect pixelsort :sort_by "hue" :threshold_low 20 :threshold_high 240)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply pixel sorting to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- sort_by: property to sort by (default "lightness")
- threshold_low: min brightness to sort (default 50)
- threshold_high: max brightness to sort (default 200)
- angle: 0 = horizontal, 90 = vertical (default 0)
- reverse: reverse sort order (default False)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
sort_by = params.get("sort_by", "lightness")
threshold_low = params.get("threshold_low", 50)
threshold_high = params.get("threshold_high", 200)
angle = params.get("angle", 0)
reverse = params.get("reverse", False)
h, w = frame.shape[:2]
# Rotate for non-horizontal sorting
if 45 <= (angle % 180) <= 135:
frame = np.transpose(frame, (1, 0, 2))
h, w = frame.shape[:2]
rotated = True
else:
rotated = False
result = frame.copy()
# Get sort values
sort_values = _get_sort_values(frame, sort_by)
# Create mask of pixels to sort
mask = (sort_values >= threshold_low) & (sort_values <= threshold_high)
# Sort each row
for y in range(h):
row = result[y].copy()
row_mask = mask[y]
row_values = sort_values[y]
# Find contiguous segments to sort
segments = _find_segments(row_mask)
for start, end in segments:
if end - start > 1:
segment_values = row_values[start:end]
sort_indices = np.argsort(segment_values)
if reverse:
sort_indices = sort_indices[::-1]
row[start:end] = row[start:end][sort_indices]
result[y] = row
# Rotate back if needed
if rotated:
result = np.transpose(result, (1, 0, 2))
return np.ascontiguousarray(result), state
def _get_sort_values(frame, sort_by):
"""Get values to sort pixels by."""
if sort_by == "lightness":
return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
elif sort_by == "hue":
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
return hsv[:, :, 0].astype(np.float32)
elif sort_by == "saturation":
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
return hsv[:, :, 1].astype(np.float32)
elif sort_by == "red":
return frame[:, :, 0].astype(np.float32)
elif sort_by == "green":
return frame[:, :, 1].astype(np.float32)
elif sort_by == "blue":
return frame[:, :, 2].astype(np.float32)
return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
def _find_segments(mask):
"""Find contiguous True segments in mask."""
segments = []
start = None
for i, val in enumerate(mask):
if val and start is None:
start = i
elif not val and start is not None:
segments.append((start, i))
start = None
if start is not None:
segments.append((start, len(mask)))
return segments

View File

@@ -1,22 +0,0 @@
;; Pixelsort effect - glitch art pixel sorting
;; Usage: python3 plan.py effects/pixelsort.sexp -p threshold_low=30 -p threshold_high=220 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; threshold_low: lower brightness threshold (0 to 128, default: 30)
;; threshold_high: upper brightness threshold (128 to 255, default: 220)
;; pixels between thresholds get sorted
(recipe "pixelsort"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect pixelsort :path "sexp_effects/effects/pixelsort.sexp")
;; Default parameters (overridden by -p threshold_low=N -p threshold_high=N)
(def threshold_low 30)
(def threshold_high 220)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect pixelsort :threshold_low threshold_low :threshold_high threshold_high)))
result)

View File

@@ -1,79 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect posterize
@version 1.0.0
@author artdag
@description
Posterize / Color Quantization effect. Reduces the number of colors
to create a poster/cartoon look. Great for stylized visuals.
@param levels int
@range 2 32
@default 8
Number of color levels per channel. Lower = more stylized.
@param dither bool
@default false
Apply dithering to reduce color banding.
@example
(effect posterize :levels 4)
@example
;; Beat-reactive posterization
(effect posterize :levels (bind bass :range [2 16]))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply posterize effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- levels: color levels per channel (default 8)
- dither: apply dithering (default False)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
levels = max(2, min(int(params.get("levels", 8)), 256))
dither = params.get("dither", False)
if state is None:
state = {}
step = 256 // levels
if dither:
# Floyd-Steinberg dithering
result = frame.astype(np.float32).copy()
h, w = result.shape[:2]
for y in range(h - 1):
for x in range(1, w - 1):
for c in range(3):
old_val = result[y, x, c]
new_val = np.round(old_val / step) * step
result[y, x, c] = new_val
error = old_val - new_val
# Distribute error to neighbors
result[y, x + 1, c] += error * 7 / 16
result[y + 1, x - 1, c] += error * 3 / 16
result[y + 1, x, c] += error * 5 / 16
result[y + 1, x + 1, c] += error * 1 / 16
return np.clip(result, 0, 255).astype(np.uint8), state
else:
# Simple quantization
quantized = (frame // step) * step
return quantized.astype(np.uint8), state

View File

@@ -1,20 +0,0 @@
;; Posterize effect - reduce color levels for poster-like look
;; Usage: python3 plan.py effects/posterize.sexp -p levels=6 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; levels: number of color levels per channel (2 to 32, default: 6)
;; lower = more posterized, higher = more gradual
(recipe "posterize"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect posterize :path "sexp_effects/effects/posterize.sexp")
;; Default parameter (overridden by -p levels=N)
(def levels 6)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect posterize :levels levels)))
result)

View File

@@ -1,178 +0,0 @@
;; Quick Test Recipe
;;
;; Cycles between three video pairs (monday, duel, ecstacy) with smooth zoom-based crossfade.
;; Each pair is two copies of the same source with opposite rotations.
;; Each pair rotates in its own direction (per-pair rotation via template).
;; Cycle: active pair plays -> crossfade -> new pair plays -> advance and repeat.
;; Ripple drops on the final combined output only.
(recipe "quick_test"
:version "1.0"
:description "Cycling crossfade between three video pairs"
:minimal-primitives true
:encoding (:codec "libx264" :crf 23 :preset "ultrafast" :audio-codec "aac" :fps 30)
:params (
(audio_start :type float :default 60 :range [0 300]
:desc "Audio start time in seconds")
(audio_duration :type float :default nil
:desc "Audio duration (nil = full remaining)")
(blend_opacity :type float :default 0.5 :range [0 1]
:desc "Blend opacity within each pair")
(seed :type int :default 42 :desc "Master random seed")
)
;; Registry
(effect rotate :path "../sexp_effects/effects/rotate.sexp")
(effect zoom :path "../sexp_effects/effects/zoom.sexp")
(effect blend :path "../sexp_effects/effects/blend.sexp")
(effect invert :path "../sexp_effects/effects/invert.sexp")
(effect hue_shift :path "../sexp_effects/effects/hue_shift.sexp")
(effect ascii_art :path "../sexp_effects/effects/ascii_art.sexp")
(effect ripple :path "../sexp_effects/effects/ripple.sexp")
(effect blend_multi :path "../sexp_effects/effects/blend_multi.sexp")
(analyzer energy :path "../../artdag-analyzers/energy/analyzer.py")
(analyzer beats :path "../../artdag-analyzers/beats/analyzer.py")
;; Sources
(def video-1 (source :path "../1.mp4"))
(def video-2 (source :path "../2.webm"))
(def video-4 (source :path "../4.mp4"))
(def video-5 (source :path "../5.mp4"))
(def video-a (source :path "../monday.webm"))
(def video-b (source :path "../escher.webm"))
(def video-c (source :path "../dopple.webm"))
(def video-d (source :path "../disruptors.webm"))
(def video-e (source :path "../ecstacy.mp4"))
(def audio (source :path "../dizzy.mp3"))
;; Templates: reusable video-pair processor and cycle-crossfade
(include :path "../templates/process-pair.sexp")
(include :path "../templates/cycle-crossfade.sexp")
;; Unified RNG: auto-derives unique seeds for all scans
(def rng (make-rng seed))
;; Stage 1: Analysis - energy, beats, and global-level scans
(stage :analyze
:outputs [energy-data beat-data whole-spin
ripple-gate ripple-cx ripple-cy]
(def audio-clip (-> audio (segment :start audio_start :duration audio_duration)))
(def energy-data (-> audio-clip (analyze energy)))
(def beat-data (-> audio-clip (analyze beats)))
;; --- Whole-video continuous spin: cumulative rotation that reverses direction periodically ---
(def whole-spin (scan beat-data :rng rng
:init (dict :beat 0 :clen 25 :dir 1 :angle 0)
:step (if (< (+ beat 1) clen)
(dict :beat (+ beat 1) :clen clen :dir dir
:angle (+ angle (* dir (/ 360 clen))))
(dict :beat 0 :clen (rand-int 20 30) :dir (* dir -1)
:angle angle))
:emit angle))
;; --- Ripple drops on final output ---
(def ripple (scan beat-data :rng rng
:init (dict :rem 0 :cx 0.5 :cy 0.5)
:step (if (> rem 0)
(dict :rem (- rem 1) :cx cx :cy cy)
(if (< (rand) 0.05)
(dict :rem (rand-int 1 20) :cx (rand-range 0.1 0.9) :cy (rand-range 0.1 0.9))
(dict :rem 0 :cx 0.5 :cy 0.5)))
:emit {:gate (if (> rem 0) 1 0) :cx cx :cy cy})))
;; Stage 2: Process videos via template
;; Per-pair scans (inv/hue/ascii triggers, pair-mix, pair-rot) are now
;; defined inside the process-pair template using seed offsets.
(stage :process
:requires [:analyze]
:inputs [energy-data beat-data whole-spin
ripple-gate ripple-cx ripple-cy]
:outputs [final-video audio-clip]
;; Re-segment audio for final mux
(def audio-clip (-> audio (segment :start audio_start :duration audio_duration)))
;; --- Process each pair via template ---
(def monday-blend (process-pair
:video video-a :energy energy-data :beat-data beat-data
:rng rng :rot-dir -1
:rot-a [0 45] :rot-b [0 -45]
:zoom-a [1 1.5] :zoom-b [1 0.5]))
(def escher-blend (process-pair
:video video-b :energy energy-data :beat-data beat-data
:rng rng :rot-dir 1
:rot-a [0 45] :rot-b [0 -45]
:zoom-a [1 1.5] :zoom-b [1 0.5]))
(def duel-blend (process-pair
:video video-d :energy energy-data :beat-data beat-data
:rng rng :rot-dir -1
:rot-a [0 -45] :rot-b [0 45]
:zoom-a [1 0.5] :zoom-b [1 1.5]))
(def blend-2 (process-pair
:video video-2 :energy energy-data :beat-data beat-data
:rng rng :rot-dir 1
:rot-a [0 45] :rot-b [0 -45]
:zoom-a [1 1.5] :zoom-b [1 0.5]))
(def dopple-blend (process-pair
:video video-c :energy energy-data :beat-data beat-data
:rng rng :rot-dir -1
:rot-a [0 -45] :rot-b [0 45]
:zoom-a [1 0.5] :zoom-b [1 1.5]))
(def blend-4 (process-pair
:video video-4 :energy energy-data :beat-data beat-data
:rng rng :rot-dir -1
:rot-a [0 45] :rot-b [0 -45]
:zoom-a [1 1.5] :zoom-b [1 0.5]))
(def ext-blend (process-pair
:video video-e :energy energy-data :beat-data beat-data
:rng rng :rot-dir 1
:rot-a [0 30] :rot-b [0 -30]
:zoom-a [1 1.3] :zoom-b [1 0.7]))
(def blend-5 (process-pair
:video video-5 :energy energy-data :beat-data beat-data
:rng rng :rot-dir 1
:rot-a [0 45] :rot-b [0 -45]
:zoom-a [1 1.5] :zoom-b [1 0.5]))
;; --- Cycle zoom + crossfade via template ---
(def combined (cycle-crossfade
:beat-data beat-data
:input-videos [monday-blend escher-blend blend-2 duel-blend blend-4 ext-blend dopple-blend blend-5]
:init-clen 60))
;; --- Final output: sporadic spin + ripple ---
(def final-video (-> combined
(effect rotate :angle (bind whole-spin values))
(effect ripple
:amplitude (* (bind ripple-gate values) (bind energy-data values :range [5 50]))
:center_x (bind ripple-cx values)
:center_y (bind ripple-cy values)
:frequency 8
:decay 2
:speed 5))))
;; Stage 3: Output
(stage :output
:requires [:process]
:inputs [final-video audio-clip]
(mux final-video audio-clip)))

View File

@@ -0,0 +1,150 @@
;; Quick Test - Fully Explicit Streaming Version
;;
;; The interpreter is completely generic - knows nothing about video/audio.
;; All domain logic is explicit via primitives.
;;
;; Run with built-in sources/audio:
;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp --fps 30
;;
;; Run with external config files:
;; python3 -m streaming.stream_sexp_generic effects/quick_test_explicit.sexp \
;; --sources configs/sources-default.sexp \
;; --audio configs/audio-dizzy.sexp \
;; --fps 30
(stream "quick_test_explicit"
:fps 30
:width 1920
:height 1080
:seed 42
;; Load standard primitives and effects
(include :path "../templates/standard-primitives.sexp")
(include :path "../templates/standard-effects.sexp")
;; Load reusable templates
(include :path "../templates/stream-process-pair.sexp")
(include :path "../templates/crossfade-zoom.sexp")
;; === SOURCES AS ARRAY ===
(def sources [
(streaming:make-video-source "monday.webm" 30)
(streaming:make-video-source "escher.webm" 30)
(streaming:make-video-source "2.webm" 30)
(streaming:make-video-source "disruptors.webm" 30)
(streaming:make-video-source "4.mp4" 30)
(streaming:make-video-source "ecstacy.mp4" 30)
(streaming:make-video-source "dopple.webm" 30)
(streaming:make-video-source "5.mp4" 30)
])
;; Per-pair config: [rot-dir, rot-a-max, rot-b-max, zoom-a-max, zoom-b-max]
;; Pairs 3,6: reversed (negative rot-a, positive rot-b, shrink zoom-a, grow zoom-b)
;; Pair 5: smaller ranges
(def pair-configs [
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 0: monday
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 1: escher
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 2: vid2
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 3: disruptors (reversed)
{:dir -1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 4: vid4
{:dir 1 :rot-a 30 :rot-b -30 :zoom-a 1.3 :zoom-b 0.7} ;; 5: ecstacy (smaller)
{:dir -1 :rot-a -45 :rot-b 45 :zoom-a 0.5 :zoom-b 1.5} ;; 6: dopple (reversed)
{:dir 1 :rot-a 45 :rot-b -45 :zoom-a 1.5 :zoom-b 0.5} ;; 7: vid5
])
;; Audio analyzer
(def music (streaming:make-audio-analyzer "dizzy.mp3"))
;; Audio playback
(audio-playback "../dizzy.mp3")
;; === GLOBAL SCANS ===
;; Cycle state: which source is active (recipe-specific)
;; clen = beats per source (8-24 beats = ~4-12 seconds)
(scan cycle (streaming:audio-beat music t)
:init {:active 0 :beat 0 :clen 16}
:step (if (< (+ beat 1) clen)
(dict :active active :beat (+ beat 1) :clen clen)
(dict :active (mod (+ active 1) (len sources)) :beat 0
:clen (+ 8 (mod (* (streaming:audio-beat-count music t) 7) 17)))))
;; Reusable scans from templates (require 'music' to be defined)
(include :path "../templates/scan-oscillating-spin.sexp")
(include :path "../templates/scan-ripple-drops.sexp")
;; === PER-PAIR STATE (dynamically sized based on sources) ===
;; Each pair has: inv-a, inv-b, hue-a, hue-b, mix, rot-angle
(scan pairs (streaming:audio-beat music t)
:init {:states (map (core:range (len sources)) (lambda (_)
{:inv-a 0 :inv-b 0 :hue-a 0 :hue-b 0 :hue-a-val 0 :hue-b-val 0 :mix 0.5 :mix-rem 5 :angle 0 :rot-beat 0 :rot-clen 25}))}
:step (dict :states (map states (lambda (p)
(let [;; Invert toggles (10% chance, lasts 1-4 beats)
new-inv-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-a) 1)))
new-inv-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- (get p :inv-b) 1)))
;; Hue shifts (10% chance, lasts 1-4 beats) - use countdown like invert
old-hue-a (get p :hue-a)
old-hue-b (get p :hue-b)
new-hue-a (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-a 1)))
new-hue-b (if (< (core:rand) 0.1) (+ 1 (core:rand-int 1 4)) (core:max 0 (- old-hue-b 1)))
;; Pick random hue value when triggering (stored separately)
new-hue-a-val (if (> new-hue-a old-hue-a) (+ 30 (* (core:rand) 300)) (get p :hue-a-val))
new-hue-b-val (if (> new-hue-b old-hue-b) (+ 30 (* (core:rand) 300)) (get p :hue-b-val))
;; Mix (holds for 1-10 beats, then picks 0, 0.5, or 1)
mix-rem (get p :mix-rem)
old-mix (get p :mix)
new-mix-rem (if (> mix-rem 0) (- mix-rem 1) (+ 1 (core:rand-int 1 10)))
new-mix (if (> mix-rem 0) old-mix (* (core:rand-int 0 2) 0.5))
;; Rotation (accumulates, reverses direction when cycle completes)
rot-beat (get p :rot-beat)
rot-clen (get p :rot-clen)
old-angle (get p :angle)
;; Note: dir comes from pair-configs, but we store rotation state here
new-rot-beat (if (< (+ rot-beat 1) rot-clen) (+ rot-beat 1) 0)
new-rot-clen (if (< (+ rot-beat 1) rot-clen) rot-clen (+ 20 (core:rand-int 0 10)))
new-angle (+ old-angle (/ 360 rot-clen))]
(dict :inv-a new-inv-a :inv-b new-inv-b
:hue-a new-hue-a :hue-b new-hue-b
:hue-a-val new-hue-a-val :hue-b-val new-hue-b-val
:mix new-mix :mix-rem new-mix-rem
:angle new-angle :rot-beat new-rot-beat :rot-clen new-rot-clen))))))
;; === FRAME PIPELINE ===
(frame
(let [now t
e (streaming:audio-energy music now)
;; Get cycle state
active (bind cycle :active)
beat-pos (bind cycle :beat)
clen (bind cycle :clen)
;; Transition logic: last third of cycle crossfades to next
phase3 (* beat-pos 3)
fading (and (>= phase3 (* clen 2)) (< phase3 (* clen 3)))
fade-amt (if fading (/ (- phase3 (* clen 2)) clen) 0)
next-idx (mod (+ active 1) (len sources))
;; Get pair states array (required by process-pair macro)
pair-states (bind pairs :states)
;; Process active pair using macro from template
active-frame (process-pair active)
;; Crossfade with zoom during transition (using macro)
result (if fading
(crossfade-zoom active-frame (process-pair next-idx) fade-amt)
active-frame)
;; Final: global spin + ripple
spun (rotate result :angle (bind spin :angle))
rip-gate (bind ripple-state :gate)
rip-amp (* rip-gate (core:map-range e 0 1 5 50))]
(ripple spun
:amplitude rip-amp
:center_x (bind ripple-state :cx)
:center_y (bind ripple-state :cy)
:frequency 8
:decay 2
:speed 5))))

View File

@@ -1,217 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect random
@version 1.0.0
@author artdag
@description
Deterministic random number generator for use in recipes and effects.
Given the same seed, produces the same sequence of values every time.
This effect doesn't modify the frame - it provides random values that
can be bound to other effect parameters. The random state persists
across frames for consistent sequences.
@param seed int
@default 42
Random seed for reproducibility. Same seed = same sequence.
@param min float
@default 0
Minimum output value.
@param max float
@default 1
Maximum output value.
@param mode string
@enum uniform gaussian integer choice
@default uniform
Distribution type:
- uniform: even distribution between min and max
- gaussian: normal distribution (min=mean, max=stddev)
- integer: random integers between min and max (inclusive)
- choice: randomly pick from a list (use choices param)
@param choices list
@default []
List of values to choose from (for mode=choice).
@param step_every int
@default 1
Only generate new value every N frames (1 = every frame).
@state rng RandomState
Numpy random state for deterministic sequence.
@state frame_count int
Tracks frames for step_every.
@state current_value float
Current random value (persists between steps).
@example
;; Random value 0-1 each frame
(bind (random :seed 123))
@example
;; Random integer 1-10, changes every 5 frames
(random :seed 42 :mode "integer" :min 1 :max 10 :step_every 5)
@example
;; Gaussian noise around 0.5
(random :mode "gaussian" :min 0.5 :max 0.1)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Generate deterministic random values.
This effect passes through the frame unchanged but updates state
with random values that can be used by the recipe/executor.
Args:
frame: Input frame (passed through unchanged)
params: Effect parameters
- seed: random seed (default 42)
- min: minimum value (default 0)
- max: maximum value (default 1)
- mode: uniform/gaussian/integer/choice (default uniform)
- choices: list for choice mode
- step_every: frames between new values (default 1)
state: Persistent state dict
- rng: numpy RandomState
- frame_count: frame counter
- current_value: last generated value
Returns:
Tuple of (frame, state_with_random_value)
"""
seed = int(params.get("seed", 42))
min_val = params.get("min", 0)
max_val = params.get("max", 1)
mode = params.get("mode", "uniform")
choices = params.get("choices", [])
step_every = max(1, int(params.get("step_every", 1)))
if state is None:
state = {}
# Initialize RNG on first call
if "rng" not in state:
state["rng"] = np.random.RandomState(seed)
state["frame_count"] = 0
state["current_value"] = None
rng = state["rng"]
frame_count = state["frame_count"]
# Generate new value if needed
if frame_count % step_every == 0 or state["current_value"] is None:
if mode == "uniform":
value = rng.uniform(min_val, max_val)
elif mode == "gaussian":
# min = mean, max = stddev
value = rng.normal(min_val, max_val)
elif mode == "integer":
value = rng.randint(int(min_val), int(max_val) + 1)
elif mode == "choice" and choices:
value = choices[rng.randint(0, len(choices))]
else:
value = rng.uniform(min_val, max_val)
state["current_value"] = value
state["frame_count"] = frame_count + 1
# Store value in state for recipe access
state["value"] = state["current_value"]
return frame, state
# Standalone RNG class for use in other effects
class DeterministicRNG:
"""
Deterministic random number generator for use in effects.
Usage in effects:
from effects.random import DeterministicRNG
def process_frame(frame, params, state):
if "rng" not in state:
state["rng"] = DeterministicRNG(params.get("seed", 42))
rng = state["rng"]
value = rng.uniform(0, 1)
integer = rng.randint(0, 10)
choice = rng.choice(["a", "b", "c"])
"""
def __init__(self, seed: int = 42):
"""Initialize with seed for reproducibility."""
self._rng = np.random.RandomState(seed)
self._seed = seed
def seed(self, seed: int):
"""Reset with new seed."""
self._rng = np.random.RandomState(seed)
self._seed = seed
def uniform(self, low: float = 0, high: float = 1) -> float:
"""Random float in [low, high)."""
return self._rng.uniform(low, high)
def randint(self, low: int, high: int) -> int:
"""Random integer in [low, high]."""
return self._rng.randint(low, high + 1)
def gaussian(self, mean: float = 0, stddev: float = 1) -> float:
"""Random float from normal distribution."""
return self._rng.normal(mean, stddev)
def choice(self, items: list):
"""Random choice from list."""
if not items:
return None
return items[self._rng.randint(0, len(items))]
def shuffle(self, items: list) -> list:
"""Return shuffled copy of list."""
result = list(items)
self._rng.shuffle(result)
return result
def sample(self, items: list, n: int) -> list:
"""Random sample of n items without replacement."""
if n >= len(items):
return self.shuffle(items)
indices = self._rng.choice(len(items), n, replace=False)
return [items[i] for i in indices]
def weighted_choice(self, items: list, weights: list):
"""Random choice with weights."""
if not items or not weights:
return None
weights = np.array(weights, dtype=float)
weights /= weights.sum()
idx = self._rng.choice(len(items), p=weights)
return items[idx]
@property
def state(self) -> dict:
"""Get RNG state for serialization."""
return {"seed": self._seed, "state": self._rng.get_state()}
@classmethod
def from_state(cls, state: dict) -> 'DeterministicRNG':
"""Restore RNG from serialized state."""
rng = cls(state["seed"])
rng._rng.set_state(state["state"])
return rng

View File

@@ -1,106 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect rgb_split
@version 1.0.0
@author artdag
@description
Chromatic aberration / RGB channel separation. Offsets red and blue channels
in different directions, creating color fringing. Classic glitch aesthetic.
@param amount float
@range 0 50
@default 10
Offset amount in pixels. Bind to bass for punchy glitch effect.
@param angle float
@range 0 360
@default 0
Direction of split in degrees. 0 = horizontal, 90 = vertical.
@param red_offset float
@range -50 50
@default 0
Override: specific red channel X offset (ignores amount/angle if set).
@param blue_offset float
@range -50 50
@default 0
Override: specific blue channel X offset (ignores amount/angle if set).
@example
(effect rgb_split :amount 15)
@example
;; Bass-reactive chromatic aberration
(effect rgb_split :amount (bind bass :range [0 30] :transform sqrt))
@example
;; Vertical split
(effect rgb_split :amount 20 :angle 90)
"""
import numpy as np
import cv2
import math
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply RGB channel split to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: offset in pixels (default 10)
- angle: split direction in degrees (default 0)
- red_offset: override red X offset
- blue_offset: override blue X offset
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
amount = params.get("amount", 10)
angle = params.get("angle", 0)
red_override = params.get("red_offset")
blue_override = params.get("blue_offset")
# Calculate offsets
if red_override is not None or blue_override is not None:
# Use explicit offsets
r_x = int(red_override or 0)
r_y = 0
b_x = int(blue_override or 0)
b_y = 0
else:
# Calculate from amount and angle
angle_rad = math.radians(angle)
r_x = int(amount * math.cos(angle_rad))
r_y = int(amount * math.sin(angle_rad))
b_x = -r_x # Blue goes opposite direction
b_y = -r_y
if r_x == 0 and r_y == 0 and b_x == 0 and b_y == 0:
return frame, state
h, w = frame.shape[:2]
# Split channels
r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]
# Create translation matrices
M_r = np.float32([[1, 0, r_x], [0, 1, r_y]])
M_b = np.float32([[1, 0, b_x], [0, 1, b_y]])
# Translate red and blue channels
r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE)
b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE)
# Merge channels
result = np.stack([r_shifted, g, b_shifted], axis=-1)
return result, state

View File

@@ -1,19 +0,0 @@
;; RGB split effect - chromatic aberration
;; Usage: python3 plan.py effects/rgb_split.sexp -p offset_x=20 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; offset_x: horizontal color channel offset in pixels (2 to 50, default: 20)
(recipe "rgb_split"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect rgb_split :path "sexp_effects/effects/rgb_split.sexp")
;; Default parameter (overridden by -p offset_x=N)
(def offset_x 20)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect rgb_split :offset_x offset_x)))
result)

View File

@@ -1,21 +0,0 @@
;; Ripple effect - concentric wave distortion from center
;; Usage: python3 plan.py effects/ripple.sexp -p amplitude=20 -p frequency=6 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amplitude: ripple strength in pixels (5 to 50, default: 20)
;; frequency: number of ripples (1 to 20, default: 6)
(recipe "ripple"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ripple :path "sexp_effects/effects/ripple.sexp")
;; Default parameters (overridden by -p amplitude=N -p frequency=N)
(def amplitude 20)
(def frequency 6)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect ripple :amplitude amplitude :frequency frequency)))
result)

View File

@@ -1,110 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect rotate
@version 1.0.0
@author artdag
@description
Rotates the frame by a specified angle. Supports two modes:
- Static: rotate by fixed angle
- Continuous: rotate by speed (degrees per frame), accumulating over time
The effect uses state to track cumulative rotation for smooth continuous motion.
@param angle float
@range -360 360
@default 0
Rotation angle in degrees. Positive = counter-clockwise.
@param speed float
@range -30 30
@default 0
Rotation speed in degrees per frame (for continuous rotation).
When non-zero, angle is ignored and rotation accumulates.
@param auto_zoom bool
@default false
If true, automatically zooms to crop out black corners.
@state cumulative_angle float
Tracks total rotation for continuous mode. Persists across frames.
@example
(effect rotate :angle 45)
@example
;; Continuous spin at 5 degrees per frame
(effect rotate :speed 5)
@example
;; Beat-reactive rotation
(effect rotate :angle (bind bass :range [0 90]))
"""
import numpy as np
import cv2
import math
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Rotate a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- angle: rotation angle in degrees (default 0)
- speed: rotation speed per frame for continuous mode (default 0)
- auto_zoom: zoom to hide black corners (default False)
state: Persistent state dict
- cumulative_angle: total rotation for continuous mode
Returns:
Tuple of (processed_frame, new_state)
"""
angle = params.get("angle", 0)
speed = params.get("speed", 0)
auto_zoom = params.get("auto_zoom", False)
# Initialize state
if state is None:
state = {}
# Handle continuous rotation mode
if speed != 0:
cumulative = state.get("cumulative_angle", 0)
cumulative += speed
state["cumulative_angle"] = cumulative
angle = cumulative
if angle == 0:
return frame, state
h, w = frame.shape[:2]
center = (w / 2, h / 2)
# Calculate zoom factor to hide black corners
scale = 1.0
if auto_zoom:
angle_rad = math.radians(abs(angle) % 90)
if w >= h:
scale = math.cos(angle_rad) + (w / h) * math.sin(angle_rad)
else:
scale = math.cos(angle_rad) + (h / w) * math.sin(angle_rad)
scale = max(1.0, scale)
# Get rotation matrix
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
# Apply rotation
result = cv2.warpAffine(
frame, rotation_matrix, (w, h),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0)
)
return result, state

View File

@@ -1,20 +0,0 @@
;; Rotate effect - rotate image around center
;; Usage: python3 plan.py effects/rotate.sexp -p angle=15 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; angle: rotation in degrees (-180 to 180, default: 15)
;; positive = clockwise
(recipe "rotate"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect rotate :path "sexp_effects/effects/rotate.sexp")
;; Default parameter (overridden by -p angle=N)
(def angle 15)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect rotate :angle angle)))
result)

View File

@@ -1,61 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect saturation
@version 1.0.0
@author artdag
@description
Adjusts color saturation. 0 = grayscale, 1 = original, 2 = oversaturated.
Uses HSV color space for accurate saturation control.
@param factor float
@range 0 3
@default 1.0
Saturation multiplier. 0 = grayscale, 1 = unchanged, 2 = double saturation.
@example
(effect saturation :factor 0) ; grayscale
@example
(effect saturation :factor 1.5) ; vibrant colors
@example
;; Desaturate on quiet parts, saturate on loud
(effect saturation :factor (bind energy :range [0.5 2.0]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Adjust saturation of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: saturation multiplier (default 1.0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
if factor == 1.0:
return frame, state
# Convert RGB to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV).astype(np.float32)
# Scale saturation channel (index 1)
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255)
# Convert back to RGB
result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)
return result, state

View File

@@ -1,20 +0,0 @@
;; Saturation effect - adjust color saturation
;; Usage: python3 plan.py effects/saturation.sexp -p amount=2.0 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: saturation multiplier (0.0 to 3.0, default: 2.0)
;; 0 = grayscale, 1 = normal, >1 = vivid colors
(recipe "saturation"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect saturation :path "sexp_effects/effects/saturation.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 2.0)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect saturation :amount amount)))
result)

View File

@@ -1,116 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect scanlines
@version 1.0.0
@author artdag
@description
VHS/CRT-style scan line shift. Horizontally displaces alternating lines
to create analog video distortion. Great for retro/glitch aesthetics.
@param amplitude float
@range 0 100
@default 10
Maximum shift amount in pixels. Bind to bass for reactive glitch.
@param frequency float
@range 1 100
@default 10
Lines per cycle (affects pattern density).
@param randomness float
@range 0 1
@default 0.5
0 = regular sine pattern, 1 = fully random shifts.
@param line_gap int
@range 1 20
@default 1
Only shift every Nth line (1 = all lines).
@param seed int
@default 42
Random seed for deterministic patterns.
@state rng DeterministicRNG
Random number generator for reproducible results.
@example
(effect scanlines :amplitude 20)
@example
;; Heavy glitch on bass
(effect scanlines :amplitude (bind bass :range [0 50]) :randomness 0.8)
@example
;; Reproducible scanlines
(effect scanlines :amplitude 30 :randomness 0.7 :seed 999)
"""
import numpy as np
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply scan line shift to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amplitude: max shift in pixels (default 10)
- frequency: lines per cycle (default 10)
- randomness: 0-1 random vs sine (default 0.5)
- line_gap: shift every Nth line (default 1)
- seed: random seed (default 42)
state: Persistent state dict
- rng: DeterministicRNG instance
Returns:
Tuple of (processed_frame, new_state)
"""
amplitude = params.get("amplitude", 10)
frequency = params.get("frequency", 10) or 1
randomness = params.get("randomness", 0.5)
line_gap = max(1, int(params.get("line_gap", 1)))
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
if amplitude == 0:
return frame, state
h, w = frame.shape[:2]
result = frame.copy()
for y in range(0, h, line_gap):
# Calculate shift amount
if randomness >= 1.0:
shift = int(rng.uniform(-amplitude, amplitude))
elif randomness <= 0:
shift = int(amplitude * np.sin(2 * np.pi * y / frequency))
else:
sine_shift = amplitude * np.sin(2 * np.pi * y / frequency)
rand_shift = rng.uniform(-amplitude, amplitude)
shift = int(sine_shift * (1 - randomness) + rand_shift * randomness)
if shift != 0:
result[y] = np.roll(result[y], shift, axis=0)
return result, state

View File

@@ -1,21 +0,0 @@
;; Scanlines effect - horizontal line overlay
;; Usage: python3 plan.py effects/scanlines.sexp -p spacing=3 -p intensity=0.4 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; spacing: pixels between lines (1 to 8, default: 3)
;; intensity: line darkness (0 to 1, default: 0.4)
(recipe "scanlines"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect scanlines :path "sexp_effects/effects/scanlines.sexp")
;; Default parameters (overridden by -p spacing=N -p intensity=N)
(def spacing 3)
(def intensity 0.4)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect scanlines :spacing spacing :intensity intensity)))
result)

View File

@@ -1,142 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "scipy"]
# ///
"""
@effect scatter
@version 1.0.0
@author artdag
@description
Scatter effect. Randomly redistributes pixels in local area creating
a dissolving, dispersed look.
@param amount float
@range 0 100
@default 10
Scatter radius in pixels. Bind to energy for reactive dissolve.
@param randomize_per_frame bool
@default false
Different scatter pattern each frame (vs. static pattern).
@param grain float
@range 0 1
@default 0
Add film grain to scattered result.
@param seed int
@default 42
Random seed for reproducible patterns.
@state rng DeterministicRNG
Random number generator for displacement.
@state displacement_map ndarray
Cached displacement map for static mode.
@example
(effect scatter :amount 20)
@example
;; Reactive dissolve
(effect scatter :amount (bind energy :range [5 50]) :randomize_per_frame true)
"""
import numpy as np
from scipy import ndimage
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply scatter effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: scatter radius (default 10)
- randomize_per_frame: animate pattern (default False)
- grain: add film grain (default 0)
- seed: random seed (default 42)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
amount = int(np.clip(params.get("amount", 10), 0, 100))
randomize = params.get("randomize_per_frame", False)
grain = params.get("grain", 0)
seed = int(params.get("seed", 42))
if state is None:
state = {}
if amount < 1:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
# Generate or reuse displacement map
last_size = state.get("last_size")
if randomize or "displacement_map" not in state or last_size != (h, w):
# Generate new displacement map
displacement = np.zeros((h, w, 2), dtype=np.float32)
for y in range(h):
for x in range(w):
displacement[y, x, 0] = rng.uniform(-amount, amount)
displacement[y, x, 1] = rng.uniform(-amount, amount)
state["displacement_map"] = displacement
state["last_size"] = (h, w)
displacement_map = state["displacement_map"]
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Apply displacement
new_y = y_coords + displacement_map[:, :, 0]
new_x = x_coords + displacement_map[:, :, 1]
# Clip to valid range
new_y = np.clip(new_y, 0, h - 1)
new_x = np.clip(new_x, 0, w - 1)
# Sample from displaced positions
result = np.zeros_like(frame)
for c in range(frame.shape[2] if len(frame.shape) > 2 else 1):
if len(frame.shape) > 2:
result[:, :, c] = ndimage.map_coordinates(
frame[:, :, c],
[new_y, new_x],
order=1,
mode='reflect'
)
else:
result = ndimage.map_coordinates(
frame,
[new_y, new_x],
order=1,
mode='reflect'
)
# Add grain
if grain > 0:
noise = np.array([[[rng.uniform(-grain * 20, grain * 20) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
result = np.clip(result.astype(np.float32) + noise, 0, 255).astype(np.uint8)
return result, state

View File

@@ -1,14 +0,0 @@
;; Sepia effect - vintage brown tone
;; Usage: python3 plan.py effects/sepia.sexp | python3 execute.py - -d . -o output.mp4
;; No parameters
(recipe "sepia"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect sepia :path "sexp_effects/effects/sepia.sexp")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect sepia)))
result)

View File

@@ -1,63 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect sharpen
@version 1.0.0
@author artdag
@description
Sharpening effect using unsharp mask technique. Enhances edges and detail.
Great for making footage pop on beats.
@param amount float
@range 0 5
@default 1.0
Sharpening intensity. 0 = no change, 1 = normal, 2+ = aggressive.
@param radius float
@range 0 10
@default 1
Radius of sharpening (affects edge thickness).
@example
(effect sharpen :amount 1.5)
@example
;; Sharpen on beats
(effect sharpen :amount (bind bass :range [0.5 2.0]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply sharpening to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: sharpening intensity (default 1.0)
- radius: edge radius (default 1)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
amount = params.get("amount", 1.0)
radius = params.get("radius", 1)
if amount <= 0:
return frame, state
# Create blurred version
ksize = max(1, int(radius)) * 2 + 1
blurred = cv2.GaussianBlur(frame, (ksize, ksize), 0)
# Unsharp mask: original + amount * (original - blurred)
result = frame.astype(np.float32) + amount * (frame.astype(np.float32) - blurred.astype(np.float32))
return np.clip(result, 0, 255).astype(np.uint8), state

View File

@@ -1,20 +0,0 @@
;; Sharpen effect - enhance edge detail
;; Usage: python3 plan.py effects/sharpen.sexp -p amount=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: sharpening strength (0.5 to 5, default: 2)
;; higher = sharper edges
(recipe "sharpen"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect sharpen :path "sexp_effects/effects/sharpen.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect sharpen :amount amount)))
result)

View File

@@ -1,233 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "scipy"]
# ///
"""
@effect shatter
@version 1.0.0
@author artdag
@description
Shatter effect. Explodes the image into flying pieces that move
outward from a center point. Great for beat drops and transitions.
@param intensity float
@range 0 1
@default 0.5
Explosion force (0 = no effect, 1 = full explosion).
@param num_pieces int
@range 10 200
@default 50
Number of shatter pieces.
@param center_x float
@range 0 1
@default 0.5
Explosion center X position.
@param center_y float
@range 0 1
@default 0.5
Explosion center Y position.
@param rotation_speed float
@range 0 5
@default 1.0
How fast pieces rotate as they fly.
@param gravity float
@range 0 2
@default 0.3
Downward pull on pieces.
@param fade_out bool
@default true
Fade pieces as they fly away.
@param seed int
@default 42
Random seed for piece positions.
@state pieces list
List of piece positions and velocities.
@example
(effect shatter :intensity 0.7 :num_pieces 80)
@example
;; Beat-reactive explosion
(effect shatter :intensity (bind onset :range [0 1]) :gravity 0.5)
"""
import numpy as np
from scipy import ndimage
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply shatter effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = np.clip(params.get("intensity", 0.5), 0, 1)
num_pieces = max(10, min(int(params.get("num_pieces", 50)), 200))
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
rotation_speed = params.get("rotation_speed", 1.0)
gravity = params.get("gravity", 0.3)
fade_out = params.get("fade_out", True)
seed = int(params.get("seed", 42))
t = params.get("_time", 0)
if state is None:
state = {}
if intensity < 0.01:
return frame, state
h, w = frame.shape[:2]
cx, cy = int(center_x * w), int(center_y * h)
# Initialize pieces
if "pieces" not in state or len(state["pieces"]) != num_pieces:
state["pieces"] = _init_pieces(w, h, num_pieces, cx, cy, seed)
state["start_time"] = t
pieces = state["pieces"]
start_time = state.get("start_time", t)
effect_t = t - start_time
# Create output with black background
result = np.zeros_like(frame)
time_factor = effect_t * intensity * 3.0
# Draw each piece
for piece in pieces:
px, py, pw, ph, vx, vy, rot = piece
if pw <= 0 or ph <= 0:
continue
# Calculate current position with physics
curr_x = px + vx * time_factor * w * 0.5
curr_y = py + vy * time_factor * h * 0.5 + gravity * time_factor ** 2 * h * 0.2
curr_rot = rot * rotation_speed * time_factor * 180
# Calculate alpha (fade out over distance)
distance = np.sqrt((curr_x - px)**2 + (curr_y - py)**2)
alpha = 1.0 - (distance / max(w, h)) if fade_out else 1.0
alpha = max(0, min(1, alpha))
if alpha < 0.05:
continue
# Extract piece from original frame
px1, py1 = max(0, int(px)), max(0, int(py))
px2, py2 = min(w, int(px + pw)), min(h, int(py + ph))
if px2 <= px1 or py2 <= py1:
continue
piece_img = frame[py1:py2, px1:px2].copy()
# Rotate piece
if abs(curr_rot) > 1:
piece_img = ndimage.rotate(piece_img, curr_rot, reshape=False, mode='constant', cval=0)
# Calculate destination
dest_x = int(curr_x)
dest_y = int(curr_y)
piece_h, piece_w = piece_img.shape[:2]
# Clip to frame bounds
src_x1, src_y1 = 0, 0
src_x2, src_y2 = piece_w, piece_h
dst_x1, dst_y1 = dest_x, dest_y
dst_x2, dst_y2 = dest_x + piece_w, dest_y + piece_h
if dst_x1 < 0:
src_x1 = -dst_x1
dst_x1 = 0
if dst_y1 < 0:
src_y1 = -dst_y1
dst_y1 = 0
if dst_x2 > w:
src_x2 -= (dst_x2 - w)
dst_x2 = w
if dst_y2 > h:
src_y2 -= (dst_y2 - h)
dst_y2 = h
if dst_x2 <= dst_x1 or dst_y2 <= dst_y1:
continue
if src_x2 <= src_x1 or src_y2 <= src_y1:
continue
# Blend piece onto result
piece_region = piece_img[src_y1:src_y2, src_x1:src_x2]
if piece_region.size == 0:
continue
result_region = result[dst_y1:dst_y2, dst_x1:dst_x2]
if result_region.shape != piece_region.shape:
continue
result[dst_y1:dst_y2, dst_x1:dst_x2] = (
result_region * (1 - alpha) + piece_region * alpha
).astype(np.uint8)
# Blend with original based on intensity
final = ((1 - intensity) * frame + intensity * result).astype(np.uint8)
return final, state
def _init_pieces(w: int, h: int, num_pieces: int, cx: int, cy: int, seed: int) -> list:
"""Initialize shatter pieces with random positions and velocities."""
rng = DeterministicRNG(seed)
pieces = []
# Create grid of pieces
cols = max(1, int(np.sqrt(num_pieces * w / h)))
rows = max(1, int(num_pieces / cols))
piece_w = w // cols
piece_h = h // rows
for row in range(rows):
for col in range(cols):
px = col * piece_w
py = row * piece_h
pw = piece_w + (w % cols if col == cols - 1 else 0)
ph = piece_h + (h % rows if row == rows - 1 else 0)
# Velocity away from center
piece_cx = px + pw // 2
piece_cy = py + ph // 2
dx = piece_cx - cx
dy = piece_cy - cy
dist = max(1, np.sqrt(dx*dx + dy*dy))
vx = dx / dist + rng.uniform(-0.3, 0.3)
vy = dy / dist + rng.uniform(-0.3, 0.3)
rot = rng.uniform(-2, 2)
pieces.append((px, py, pw, ph, vx, vy, rot))
return pieces

View File

@@ -1,90 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect strobe
@version 1.0.0
@author artdag
@description
Strobe / Posterize Time effect. Locks video to a reduced frame rate,
creating a choppy, stop-motion look. Also known as frame hold.
@param frame_rate float
@range 1 60
@default 12
Target frame rate in fps. Lower = choppier.
@param sync_to_beat bool
@default false
If true, hold frames until next beat (overrides frame_rate).
@param beat_divisor int
@range 1 8
@default 1
Hold for 1/N beats when sync_to_beat is true.
@state held_frame ndarray
Currently held frame.
@state held_until float
Time until which to hold the frame.
@example
(effect strobe :frame_rate 8)
@example
;; Very choppy at 4 fps
(effect strobe :frame_rate 4)
@example
;; Beat-synced frame hold
(effect strobe :sync_to_beat true :beat_divisor 2)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply strobe/posterize time effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- frame_rate: target fps 1-60 (default 12)
- sync_to_beat: use beat timing (default False)
- beat_divisor: beat fraction (default 1)
state: Persistent state dict
- held_frame: currently held frame
- held_until: hold expiry time
Returns:
Tuple of (processed_frame, new_state)
"""
target_fps = max(1, min(params.get("frame_rate", 12), 60))
sync_to_beat = params.get("sync_to_beat", False)
beat_divisor = max(1, int(params.get("beat_divisor", 1)))
# Get current time from params (executor should provide this)
t = params.get("_time", 0)
if state is None:
state = {}
# Initialize state
if "held_frame" not in state:
state["held_frame"] = None
state["held_until"] = 0.0
state["last_beat"] = -1
# Frame rate based hold
frame_duration = 1.0 / target_fps
if t >= state["held_until"]:
# Time for new frame
state["held_frame"] = frame.copy()
state["held_until"] = t + frame_duration
return state["held_frame"] if state["held_frame"] is not None else frame, state

View File

@@ -1,20 +0,0 @@
;; Strobe effect - flashing/blinking
;; Usage: python3 plan.py effects/strobe.sexp -p frequency=4 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; frequency: flashes per second (1 to 15, default: 4)
;; WARNING: high values may cause discomfort
(recipe "strobe"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect strobe :path "sexp_effects/effects/strobe.sexp")
;; Default parameter (overridden by -p frequency=N)
(def frequency 4)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect strobe :frequency frequency)))
result)

View File

@@ -1,121 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect swirl
@version 1.0.0
@author artdag
@description
Spiral/vortex distortion that twists the image around a center point.
Creates whirlpool-like effects. Great for psychedelic/hypnotic visuals.
@param strength float
@range -10 10
@default 1.0
Swirl strength in radians. Positive = counter-clockwise, negative = clockwise.
@param radius float
@range 0.1 2
@default 0.5
Effect radius as fraction of image size. Larger = wider swirl.
@param center_x float
@range 0 1
@default 0.5
Horizontal center of swirl (0 = left, 1 = right).
@param center_y float
@range 0 1
@default 0.5
Vertical center of swirl (0 = top, 1 = bottom).
@param falloff string
@enum linear quadratic gaussian
@default quadratic
How swirl strength decreases from center:
- linear: constant decrease
- quadratic: sharper center, softer edges
- gaussian: smooth bell curve
@example
(effect swirl :strength 2)
@example
;; Reactive swirl
(effect swirl :strength (bind energy :range [0 5]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply swirl distortion to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: swirl amount in radians (default 1.0)
- radius: effect radius as fraction (default 0.5)
- center_x: horizontal center 0-1 (default 0.5)
- center_y: vertical center 0-1 (default 0.5)
- falloff: linear/quadratic/gaussian (default quadratic)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 1.0)
radius_frac = params.get("radius", 0.5)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
falloff = params.get("falloff", "quadratic")
if strength == 0:
return frame, state
h, w = frame.shape[:2]
# Calculate center and radius in pixels
cx = w * center_x
cy = h * center_y
radius = max(w, h) * radius_frac
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float64)
# Calculate distance and angle from center
dx = x_coords - cx
dy = y_coords - cy
dist = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(dy, dx)
# Normalized distance for falloff
norm_dist = dist / radius
# Calculate falloff factor
if falloff == "linear":
factor = np.maximum(0, 1 - norm_dist)
elif falloff == "gaussian":
factor = np.exp(-norm_dist**2 * 2)
else: # quadratic
factor = np.maximum(0, 1 - norm_dist**2)
# Apply swirl rotation
new_angle = angle + strength * factor
# Calculate new coordinates
new_x = (cx + dist * np.cos(new_angle)).astype(np.float32)
new_y = (cy + dist * np.sin(new_angle)).astype(np.float32)
# Remap
result = cv2.remap(
frame, new_x, new_y,
cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT
)
return result, state

View File

@@ -1,20 +0,0 @@
;; Swirl effect - spiral distortion from center
;; Usage: python3 plan.py effects/swirl.sexp -p strength=3 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: swirl intensity (-10 to 10, default: 3)
;; positive = clockwise, negative = counter-clockwise
(recipe "swirl"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect swirl :path "sexp_effects/effects/swirl.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 3)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect swirl :strength strength)))
result)

View File

@@ -1,85 +0,0 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect threshold
@version 1.0.0
@author artdag
@description
Threshold effect. Converts to high-contrast black and white.
Creates stark, graphic look by converting grayscale to pure
black/white based on a threshold value.
@param level int
@range 0 255
@default 128
Threshold level. Pixels above = white, below = black.
@param invert bool
@default false
Swap black and white.
@param color_mode string
@enum bw color
@default bw
Output mode:
- bw: pure black and white
- color: keep original colors where above threshold
@example
(effect threshold :level 100)
@example
;; Beat-reactive threshold
(effect threshold :level (bind bass :range [80 180]) :invert true)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply threshold effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- level: threshold 0-255 (default 128)
- invert: swap black/white (default False)
- color_mode: bw or color (default bw)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
level = int(np.clip(params.get("level", 128), 0, 255))
invert = params.get("invert", False)
color_mode = params.get("color_mode", "bw")
if state is None:
state = {}
# Convert to grayscale for threshold comparison
if len(frame.shape) == 3:
gray = np.mean(frame, axis=2)
else:
gray = frame
# Apply threshold
mask = gray > level
if invert:
mask = ~mask
if color_mode == "bw":
# Pure black and white
result = np.where(mask[:, :, np.newaxis], 255, 0).astype(np.uint8)
if len(frame.shape) == 3:
result = np.repeat(result, frame.shape[2], axis=2)
else:
# Keep original colors where above threshold
result = np.where(mask[:, :, np.newaxis], frame, 0).astype(np.uint8)
return result, state

View File

@@ -1,20 +0,0 @@
;; Threshold effect - convert to pure black and white
;; Usage: python3 plan.py effects/threshold.sexp -p level=128 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; level: threshold cutoff (0 to 255, default: 128)
;; pixels below = black, above = white
(recipe "threshold"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect threshold :path "sexp_effects/effects/threshold.sexp")
;; Default parameter (overridden by -p level=N)
(def level 128)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect threshold :level level)))
result)

Some files were not shown because too many files have changed in this diff Show More