Initial commit: video effects processing system

Add S-expression based video effects pipeline with modular effect
definitions, constructs, and recipe files.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
gilesb
2026-01-19 12:34:45 +00:00
commit 406cc7c0c7
171 changed files with 13406 additions and 0 deletions

205
effects/ascii_art.py Normal file
View File

@@ -0,0 +1,205 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect ascii_art
@version 1.0.0
@author artdag
@description
ASCII art effect - converts video to ASCII character representation.
Renders the video using ASCII characters based on brightness,
creating a retro terminal aesthetic.
@param char_size int
@range 4 32
@default 8
Size of each character cell in pixels. Sync to bass for reactive sizing.
@param alphabet string
@enum standard blocks cyrillic greek japanese braille
@default standard
Character set to use for rendering. Each has different visual density.
@param color_mode string
@enum mono color invert
@default color
Color rendering mode:
- mono: white on black
- color: preserve source colors
- invert: dark text on colored background
@param contrast_boost float
@range 1 3
@default 1.5
Enhance contrast for better character separation.
@param background list
@default [0, 0, 0]
Background color RGB.
@example
(effect ascii_art :char_size 8 :color_mode "color")
@example
;; Japanese characters, reactive sizing
(effect ascii_art :alphabet "japanese" :char_size (bind bass :range [6 16]))
@example
;; Braille pattern for high detail
(effect ascii_art :alphabet "braille" :char_size 4)
"""
import numpy as np
import cv2
# Character sets ordered by visual density (light to dark)
ALPHABETS = {
# Classic ASCII gradient
"standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$",
# Unicode block elements - naturally ordered by fill
"blocks": " ░▒▓█",
# Cyrillic - ordered by visual complexity
"cyrillic": " ·гтпрсьоеаилнкчуцбдвжзмъыюяфщшэ",
# Greek - ordered by visual weight
"greek": " ·ιτορεαηυικλνσςπμβγδζθξφψωΣΩΨΦ",
# Japanese Katakana - ordered by stroke complexity
"japanese": " ·ノ一ヘイコニハヒフホメヨワヲンリルレロカキクケサシスセソタチツテト",
# Braille patterns - high detail
"braille": " ⠁⠂⠃⠄⠅⠆⠇⠈⠉⠊⠋⠌⠍⠎⠏⠐⠑⠒⠓⠔⠕⠖⠗⠘⠙⠚⠛⠜⠝⠞⠟⠠⠡⠢⠣⠤⠥⠦⠧⠨⠩⠪⠫⠬⠭⠮⠯⠰⠱⠲⠳⠴⠵⠶⠷⠸⠹⠺⠻⠼⠽⠾⠿",
}
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply ASCII art effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- char_size: character cell size (default 8)
- alphabet: character set name (default "standard")
- color_mode: mono/color/invert (default "color")
- contrast_boost: enhance contrast (default 1.5)
- background: RGB tuple (default [0,0,0])
state: Persistent state dict (used for caching)
Returns:
Tuple of (processed_frame, new_state)
"""
char_size = max(4, min(int(params.get("char_size", 8)), 32))
alphabet_name = params.get("alphabet", "standard")
color_mode = params.get("color_mode", "color")
contrast_boost = max(1.0, min(params.get("contrast_boost", 1.5), 3.0))
background = params.get("background", [0, 0, 0])
if state is None:
state = {}
# Get character set
char_set = ALPHABETS.get(alphabet_name, ALPHABETS["standard"])
# Get or create character atlas cache
cache_key = f"{char_size}_{alphabet_name}"
if "atlas_cache" not in state or state.get("cache_key") != cache_key:
state["atlas"] = _create_char_atlas(char_size, char_set)
state["cache_key"] = cache_key
atlas = state["atlas"]
h, w = frame.shape[:2]
cols = w // char_size
rows = h // char_size
if cols < 1 or rows < 1:
return frame, state
# Crop frame to fit grid
grid_h, grid_w = rows * char_size, cols * char_size
frame_cropped = frame[:grid_h, :grid_w]
# Downsample to get average color per cell
reshaped = frame_cropped.reshape(rows, char_size, cols, char_size, 3)
cell_colors = reshaped.mean(axis=(1, 3)).astype(np.uint8)
# Convert to grayscale for brightness mapping
cell_gray = 0.299 * cell_colors[:,:,0] + 0.587 * cell_colors[:,:,1] + 0.114 * cell_colors[:,:,2]
# Apply contrast boost
if contrast_boost > 1:
cell_gray = (cell_gray - 128) * contrast_boost + 128
cell_gray = np.clip(cell_gray, 0, 255)
# Map brightness to character indices
char_indices = ((cell_gray / 255) * (len(char_set) - 1)).astype(np.int32)
char_indices = np.clip(char_indices, 0, len(char_set) - 1)
# Create output frame
if isinstance(background, (list, tuple)) and len(background) >= 3:
bg = background[:3]
else:
bg = [0, 0, 0]
result = np.full((grid_h, grid_w, 3), bg, dtype=np.uint8)
# Render characters
for row in range(rows):
for col in range(cols):
char_idx = char_indices[row, col]
char = char_set[char_idx]
char_mask = atlas.get(char)
if char_mask is None:
continue
y1, x1 = row * char_size, col * char_size
if color_mode == "mono":
color = np.array([255, 255, 255], dtype=np.uint8)
elif color_mode == "invert":
# Colored background, dark text
result[y1:y1+char_size, x1:x1+char_size] = cell_colors[row, col]
color = np.array([0, 0, 0], dtype=np.uint8)
else: # color
color = cell_colors[row, col]
# Apply character mask
if char != ' ':
mask = char_mask > 0
result[y1:y1+char_size, x1:x1+char_size][mask] = color
# Pad to original size if needed
if result.shape[0] < h or result.shape[1] < w:
padded = np.full((h, w, 3), bg, dtype=np.uint8)
padded[:grid_h, :grid_w] = result
result = padded
return result, state
def _create_char_atlas(char_size: int, char_set: str) -> dict:
"""Pre-render all characters as masks."""
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = char_size / 20.0
thickness = max(1, int(char_size / 10))
atlas = {}
for char in char_set:
char_img = np.zeros((char_size, char_size), dtype=np.uint8)
if char != ' ':
try:
(text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness)
text_x = (char_size - text_w) // 2
text_y = (char_size + text_h) // 2
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
except:
pass
atlas[char] = char_img
return atlas

34
effects/ascii_art.sexp Normal file
View File

@@ -0,0 +1,34 @@
;; ASCII art effect - render as text characters (music reactive)
;; Usage: python3 plan.py effects/ascii_art.sexp | python3 execute.py - -d . -o - | mpv --fs -
;;
;; Parameters:
;; color_mode: coloring mode ("color", "green", "white", default: "color")
;; char_size is bound to energy (wobbles with overall loudness)
(recipe "ascii_art"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ascii_art :path "sexp_effects/effects/ascii_art.sexp")
(analyzer energy :path "../artdag-analyzers/energy/analyzer.py")
;; Default parameter
(def color_mode "color")
(def video (source :path "monday.webm"))
(def audio (source :path "dizzy.mp3"))
(def clip (-> video (segment :start 0 :duration 10)))
;; Audio from 60s where it's louder
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Analyze just the audio clip - times are 0-10s relative to clip
(def energy-data (-> audio-clip (analyze energy)))
;; char_size bound to energy - no offset needed since analysis is on clip
(def result (-> clip
(effect ascii_art
:char_size (bind energy-data values :range [2 32])
:color_mode color_mode)))
(mux result audio-clip))

33
effects/ascii_zones.sexp Normal file
View File

@@ -0,0 +1,33 @@
;; ASCII zones effect - region-based ASCII rendering
;; Usage: python3 plan.py effects/ascii_zones.sexp -p char_size=10 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; char_size: character cell size in pixels (4 to 20, default: 10)
(recipe "ascii_zones"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ascii_zones :path "sexp_effects/effects/ascii_zones.sexp")
(analyzer energy :path "../artdag-analyzers/energy/analyzer.py")
;; Default parameter (overridden by -p char_size=N)
(def char_size 10)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def audio (source :path "dizzy.mp3"))
(def audio-clip (-> audio (segment :start 60 :duration 10)))
;; Analyze just the audio clip - times are 0-10s relative to clip
(def energy-data (-> audio-clip (analyze energy)))
;; char_size bound to energy - no offset needed since analysis is on clip
(def result (-> clip
(effect ascii_zones
:char_size (bind energy-data values :range [2 32]))))
(mux result audio-clip))

164
effects/beam.py Normal file
View File

@@ -0,0 +1,164 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect beam
@version 1.0.0
@author artdag
@description
Beam effect. Creates animated light beams / lasers from a starting
point to an ending point with glow effect.
@param start_x float
@range 0 1
@default 0
Beam start X position (0-1).
@param start_y float
@range 0 1
@default 0.5
Beam start Y position (0-1).
@param end_x float
@range 0 1
@default 1
Beam end X position (0-1).
@param end_y float
@range 0 1
@default 0.5
Beam end Y position (0-1).
@param thickness float
@range 1 50
@default 5
Beam core thickness in pixels.
@param glow_radius float
@range 0 100
@default 20
Outer glow size in pixels.
@param color list
@default [0, 255, 255]
Beam color RGB (default cyan).
@param intensity float
@range 0 2
@default 1.0
Beam brightness.
@param pulse bool
@default false
Enable pulsing animation.
@param pulse_speed float
@range 0.1 10
@default 2.0
Pulse animation speed.
@example
(effect beam :start_x 0 :start_y 0.5 :end_x 1 :end_y 0.5)
@example
;; Reactive laser
(effect beam :intensity (bind bass :range [0.5 2]) :color [255 0 0])
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply beam effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
start_x = params.get("start_x", 0)
start_y = params.get("start_y", 0.5)
end_x = params.get("end_x", 1)
end_y = params.get("end_y", 0.5)
thickness = params.get("thickness", 5)
glow_radius = params.get("glow_radius", 20)
color = params.get("color", [0, 255, 255])
intensity = params.get("intensity", 1.0)
pulse = params.get("pulse", False)
pulse_speed = params.get("pulse_speed", 2.0)
t = params.get("_time", 0)
if state is None:
state = {}
h, w = frame.shape[:2]
result = frame.copy().astype(np.float32)
# Calculate beam endpoints in pixels
x1, y1 = int(start_x * w), int(start_y * h)
x2, y2 = int(end_x * w), int(end_y * h)
# Apply pulse modulation
if pulse:
pulse_mod = 0.5 + 0.5 * np.sin(t * pulse_speed * 2 * np.pi)
intensity = intensity * pulse_mod
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Calculate distance from each pixel to the line segment
line_vec = np.array([x2 - x1, y2 - y1], dtype=np.float32)
line_len = np.sqrt(line_vec[0]**2 + line_vec[1]**2)
if line_len < 1:
return frame, state
line_unit = line_vec / line_len
# Vector from start to each pixel
px = x_coords - x1
py = y_coords - y1
# Project onto line
proj_len = px * line_unit[0] + py * line_unit[1]
proj_len = np.clip(proj_len, 0, line_len)
# Closest point on line
closest_x = x1 + proj_len * line_unit[0]
closest_y = y1 + proj_len * line_unit[1]
# Distance to closest point
dist = np.sqrt((x_coords - closest_x)**2 + (y_coords - closest_y)**2)
# Get beam color
if isinstance(color, (list, tuple)) and len(color) >= 3:
beam_color = np.array(color[:3], dtype=np.float32)
else:
beam_color = np.array([0, 255, 255], dtype=np.float32)
# Core beam (bright center)
core_mask = dist < thickness
core_intensity = intensity * (1 - dist[core_mask] / max(1, thickness))
for c in range(3):
result[core_mask, c] = np.clip(
result[core_mask, c] + beam_color[c] * core_intensity,
0, 255
)
# Glow (fading outer region)
glow_mask = (dist >= thickness) & (dist < thickness + glow_radius)
glow_dist = dist[glow_mask] - thickness
glow_intensity = intensity * 0.5 * (1 - glow_dist / max(1, glow_radius)) ** 2
for c in range(3):
result[glow_mask, c] = np.clip(
result[glow_mask, c] + beam_color[c] * glow_intensity,
0, 255
)
return result.astype(np.uint8), state

108
effects/bloom.py Normal file
View File

@@ -0,0 +1,108 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect bloom
@version 1.0.0
@author artdag
@description
Bloom effect. Creates a soft glow around bright areas, simulating
camera lens bloom. Great for dreamy or ethereal looks.
@param intensity float
@range 0 2
@default 0.5
Bloom brightness.
@param threshold int
@range 0 255
@default 200
Brightness threshold for bloom (pixels above this glow).
@param radius int
@range 1 50
@default 15
Blur radius for the glow.
@param soft_threshold bool
@default true
Use soft threshold (gradual) vs hard threshold.
@param color_tint list
@default [255, 255, 255]
Tint color for the bloom.
@example
(effect bloom :intensity 0.7 :threshold 180)
@example
;; Warm bloom
(effect bloom :intensity 0.6 :color_tint [255 200 150])
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply bloom effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- intensity: bloom brightness (default 0.5)
- threshold: brightness cutoff (default 200)
- radius: blur radius (default 15)
- soft_threshold: gradual vs hard (default True)
- color_tint: RGB tint (default white)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 0.5)
threshold = int(params.get("threshold", 200))
radius = max(1, int(params.get("radius", 15)))
soft_threshold = params.get("soft_threshold", True)
color_tint = params.get("color_tint", [255, 255, 255])
if state is None:
state = {}
if intensity <= 0:
return frame, state
# Convert to float
result = frame.astype(np.float32)
# Get brightness (luminance)
lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2]
# Create bloom mask
if soft_threshold:
# Soft threshold - gradual falloff
bloom_mask = np.clip((lum - threshold) / (255 - threshold + 1e-6), 0, 1)
else:
# Hard threshold
bloom_mask = (lum > threshold).astype(np.float32)
# Extract bright areas
bloom = result * bloom_mask[:, :, np.newaxis]
# Apply blur to create glow
ksize = radius * 2 + 1
bloom = cv2.GaussianBlur(bloom, (ksize, ksize), 0)
# Apply color tint
if isinstance(color_tint, (list, tuple)) and len(color_tint) >= 3:
tint = np.array(color_tint[:3], dtype=np.float32) / 255.0
for c in range(3):
bloom[:, :, c] *= tint[c]
# Add bloom to original (screen blend)
result = result + bloom * intensity
return np.clip(result, 0, 255).astype(np.uint8), state

21
effects/bloom.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Bloom effect - glow around bright areas
;; Usage: python3 plan.py effects/bloom.sexp -p intensity=0.6 -p radius=20 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; intensity: bloom strength (0.0 to 2.0, default: 0.6)
;; radius: bloom spread in pixels (5 to 50, default: 20)
(recipe "bloom"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect bloom :path "sexp_effects/effects/bloom.sexp")
;; Default parameters (overridden by -p intensity=N -p radius=N)
(def intensity 0.6)
(def radius 20)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect bloom :intensity intensity :radius radius)))
result)

54
effects/blur.py Normal file
View File

@@ -0,0 +1,54 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect blur
@version 1.0.0
@author artdag
@description
Gaussian blur effect. Softens the image by averaging neighboring pixels.
Useful for dreamy effects or reducing detail on quiet parts.
@param radius float
@range 0 50
@default 5
Blur radius in pixels. Higher = more blur. Must be positive.
@example
(effect blur :radius 10)
@example
;; Blur on quiet, sharp on loud
(effect blur :radius (bind energy :range [20 0]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply Gaussian blur to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- radius: blur radius in pixels (default 5)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
radius = params.get("radius", 5)
if radius <= 0:
return frame, state
# Kernel size must be odd
ksize = int(radius) * 2 + 1
result = cv2.GaussianBlur(frame, (ksize, ksize), 0)
return result, state

20
effects/blur.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Blur effect - gaussian blur
;; Usage: python3 plan.py effects/blur.sexp -p radius=10 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; radius: blur radius in pixels (1 to 50, default: 10)
;; higher = more blurry
(recipe "blur"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect blur :path "sexp_effects/effects/blur.sexp")
;; Default parameter (overridden by -p radius=N)
(def radius 10)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect blur :radius radius)))
result)

52
effects/brightness.py Normal file
View File

@@ -0,0 +1,52 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect brightness
@version 1.0.0
@author artdag
@description
Adjusts the brightness of each frame by multiplying pixel values.
Values > 1.0 brighten, < 1.0 darken. Useful for pulse effects synced to music.
@param factor float
@range 0 3
@default 1.0
Brightness multiplier. 0 = black, 1 = unchanged, 2 = double brightness.
Bind to bass/energy for reactive brightness pulses.
@example
(effect brightness :factor 1.5)
@example
;; Pulse brighter on bass hits
(effect brightness :factor (bind bass :range [1.0 2.0] :transform sqrt))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Adjust brightness of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: brightness multiplier (default 1.0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
if factor == 1.0:
return frame, state
# Apply brightness multiplier with clipping
result = np.clip(frame.astype(np.float32) * factor, 0, 255).astype(np.uint8)
return result, state

20
effects/brightness.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Brightness effect - adjust image brightness
;; Usage: python3 plan.py effects/brightness.sexp [-p amount=30] | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: brightness adjustment (-255 to 255, default: 30)
;; negative = darker, positive = brighter
(recipe "brightness"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect brightness :path "sexp_effects/effects/brightness.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 30)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect brightness :amount amount)))
result)

117
effects/cartoon.py Normal file
View File

@@ -0,0 +1,117 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "scipy"]
# ///
"""
@effect cartoon
@version 1.0.0
@author artdag
@description
Cartoon / Cel-shaded effect. Simplifies colors into flat regions
and adds dark edge outlines for a hand-drawn cartoon appearance.
@param detail float
@range 0.1 1.0
@default 0.5
Edge detection sensitivity. Higher = more edges detected.
@param edge_thickness int
@range 1 5
@default 1
Outline thickness in pixels.
@param color_levels int
@range 2 32
@default 6
Number of color levels per channel.
@param edge_color list
@default [0, 0, 0]
RGB color for edges (default black).
@param blur_size int
@range 0 10
@default 2
Pre-blur for smoother color regions.
@example
(effect cartoon :detail 0.6 :color_levels 4)
@example
;; Thick outlines, fewer colors
(effect cartoon :edge_thickness 3 :color_levels 3 :blur_size 4)
"""
import numpy as np
from scipy import ndimage
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply cartoon effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- detail: edge sensitivity 0.1-1.0 (default 0.5)
- edge_thickness: outline thickness (default 1)
- color_levels: posterization levels (default 6)
- edge_color: RGB tuple (default [0,0,0])
- blur_size: pre-blur amount (default 2)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
detail = np.clip(params.get("detail", 0.5), 0.1, 1.0)
edge_thickness = max(1, min(int(params.get("edge_thickness", 1)), 5))
color_levels = max(2, min(int(params.get("color_levels", 6)), 32))
edge_color = params.get("edge_color", [0, 0, 0])
blur_size = max(0, int(params.get("blur_size", 2)))
if state is None:
state = {}
h, w = frame.shape[:2]
result = frame.copy().astype(np.float32)
# Step 1: Blur to reduce noise and create smoother regions
if blur_size > 0:
for c in range(3):
result[:, :, c] = ndimage.uniform_filter(result[:, :, c], size=blur_size)
# Step 2: Posterize colors (reduce to N levels)
step = 256 / color_levels
result = (np.floor(result / step) * step).astype(np.uint8)
# Step 3: Detect edges using Sobel
gray = np.mean(frame, axis=2).astype(np.float32)
sobel_x = ndimage.sobel(gray, axis=1)
sobel_y = ndimage.sobel(gray, axis=0)
edges = np.sqrt(sobel_x**2 + sobel_y**2)
# Normalize and threshold
edge_max = edges.max()
if edge_max > 0:
edges = edges / edge_max
edge_threshold = 1.0 - detail
edge_mask = edges > edge_threshold
# Dilate edges for thickness
if edge_thickness > 1:
struct = ndimage.generate_binary_structure(2, 1)
for _ in range(edge_thickness - 1):
edge_mask = ndimage.binary_dilation(edge_mask, structure=struct)
# Step 4: Apply edge color
if isinstance(edge_color, (list, tuple)) and len(edge_color) >= 3:
color = np.array(edge_color[:3], dtype=np.uint8)
else:
color = np.array([0, 0, 0], dtype=np.uint8)
result[edge_mask] = color
return result, state

115
effects/chromatic.py Normal file
View File

@@ -0,0 +1,115 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect chromatic
@version 1.0.0
@author artdag
@description
Chromatic aberration effect. Creates color fringing by offsetting
RGB channels radially from the center (lens distortion simulation).
@param strength float
@range 0 50
@default 10
Aberration strength. Bind to bass for reactive effect.
@param center_x float
@range 0 1
@default 0.5
Aberration center X.
@param center_y float
@range 0 1
@default 0.5
Aberration center Y.
@param radial bool
@default true
If true, aberration increases from center to edges.
@example
(effect chromatic :strength 20)
@example
;; Beat-reactive chromatic aberration
(effect chromatic :strength (bind bass :range [0 30]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply chromatic aberration effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: aberration amount (default 10)
- center_x: center X 0-1 (default 0.5)
- center_y: center Y 0-1 (default 0.5)
- radial: increase from center (default True)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 10)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
radial = params.get("radial", True)
if state is None:
state = {}
if strength == 0:
return frame, state
h, w = frame.shape[:2]
r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]
if radial:
# Create distance-from-center map
y_coords, x_coords = np.ogrid[:h, :w]
cx, cy = w * center_x, h * center_y
dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2)
max_dist = np.sqrt(cx**2 + cy**2)
dist_normalized = (dist / max_dist).astype(np.float32)
# Create coordinate maps for remapping
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
# Direction from center
dx = (map_x - cx) / (dist + 1e-6)
dy = (map_y - cy) / (dist + 1e-6)
# Apply radial offset to red channel (outward)
r_offset = strength * dist_normalized
r_map_x = (map_x + dx * r_offset).astype(np.float32)
r_map_y = (map_y + dy * r_offset).astype(np.float32)
r_shifted = cv2.remap(r, r_map_x, r_map_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
# Apply radial offset to blue channel (inward)
b_offset = -strength * dist_normalized
b_map_x = (map_x + dx * b_offset).astype(np.float32)
b_map_y = (map_y + dy * b_offset).astype(np.float32)
b_shifted = cv2.remap(b, b_map_x, b_map_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state
else:
# Simple uniform offset
offset = int(strength)
M_r = np.float32([[1, 0, offset], [0, 1, 0]])
M_b = np.float32([[1, 0, -offset], [0, 1, 0]])
r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE)
b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE)
return np.stack([r_shifted, g, b_shifted], axis=-1).astype(np.uint8), state

21
effects/color-adjust.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Color adjust effect - combined brightness and contrast
;; Usage: python3 plan.py effects/color-adjust.sexp -p brightness=20 -p contrast=1.2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; brightness: brightness adjustment (-255 to 255, default: 20)
;; contrast: contrast multiplier (0.0 to 3.0, default: 1.2)
(recipe "color-adjust"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect color-adjust :path "sexp_effects/effects/color-adjust.sexp")
;; Default parameters (overridden by -p brightness=N -p contrast=N)
(def brightness 20)
(def contrast 1.2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect color-adjust :brightness brightness :contrast contrast)))
result)

104
effects/color_cycle.py Normal file
View File

@@ -0,0 +1,104 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect color_cycle
@version 1.0.0
@author artdag
@description
Color cycle effect. Shifts all hues over time creating psychedelic
rainbow cycling. Great for trippy visuals.
@param speed float
@range 0 10
@default 1
Cycle speed (rotations per second).
@param offset float
@range 0 360
@default 0
Initial hue offset in degrees.
@param saturation_boost float
@range 0 2
@default 1
Saturation multiplier.
@param mode string
@enum all highlights shadows midtones
@default all
Which tones to affect.
@example
(effect color_cycle :speed 0.5)
@example
;; Beat-synced color shift
(effect color_cycle :offset (bind beat_position :range [0 360]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply color cycle effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- speed: rotations per second (default 1)
- offset: initial hue offset (default 0)
- saturation_boost: saturation multiplier (default 1)
- mode: which tones to affect (default all)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
speed = params.get("speed", 1)
offset = params.get("offset", 0)
saturation_boost = max(0, min(params.get("saturation_boost", 1), 2))
mode = params.get("mode", "all")
t = params.get("_time", 0)
if state is None:
state = {}
# Calculate hue shift
hue_shift = int((offset + speed * t * 360) % 360)
# Convert to HSV (OpenCV uses BGR, our frame is RGB)
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
hsv = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV).astype(np.float32)
if mode == "all":
# Shift all hues
hsv[:, :, 0] = (hsv[:, :, 0] + hue_shift / 2) % 180
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * saturation_boost, 0, 255)
else:
# Calculate luminance mask
lum = hsv[:, :, 2] / 255.0
if mode == "highlights":
mask = np.clip((lum - 0.67) * 3, 0, 1)
elif mode == "shadows":
mask = np.clip(1 - lum * 3, 0, 1)
else: # midtones
shadow_mask = np.clip(1 - lum * 3, 0, 1)
highlight_mask = np.clip((lum - 0.67) * 3, 0, 1)
mask = 1 - shadow_mask - highlight_mask
# Apply selective hue shift
shifted_hue = (hsv[:, :, 0] + hue_shift / 2) % 180
hsv[:, :, 0] = hsv[:, :, 0] * (1 - mask) + shifted_hue * mask
# Convert back
hsv = np.clip(hsv, 0, 255).astype(np.uint8)
result_bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
result = cv2.cvtColor(result_bgr, cv2.COLOR_BGR2RGB)
return result, state

20
effects/color_cycle.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Color cycle effect - animated hue rotation over time
;; Usage: python3 plan.py effects/color_cycle.sexp -p speed=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; speed: rotation speed multiplier (0.1 to 10, default: 2)
;; higher = faster color cycling
(recipe "color_cycle"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect color_cycle :path "sexp_effects/effects/color_cycle.sexp")
;; Default parameter (overridden by -p speed=N)
(def speed 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect color_cycle :speed speed)))
result)

139
effects/color_grade.py Normal file
View File

@@ -0,0 +1,139 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect color_grade
@version 1.0.0
@author artdag
@description
Color grading effect. Applies cinematic color adjustments including
shadows/midtones/highlights tinting, lift/gamma/gain, and temperature.
@param shadows list
@default [0, 0, 0]
RGB tint for dark areas.
@param midtones list
@default [0, 0, 0]
RGB tint for middle tones.
@param highlights list
@default [0, 0, 0]
RGB tint for bright areas.
@param lift float
@range -0.5 0.5
@default 0
Raise/lower shadow levels.
@param gamma float
@range 0.5 2
@default 1
Midtone brightness curve.
@param gain float
@range 0.5 2
@default 1
Highlight intensity.
@param temperature float
@range -100 100
@default 0
Color temperature (-100 = cool/blue, +100 = warm/orange).
@param tint float
@range -100 100
@default 0
Green/magenta tint (-100 = green, +100 = magenta).
@example
(effect color_grade :temperature 30 :shadows [0 0 20])
@example
;; Cinematic teal-orange look
(effect color_grade :shadows [0 10 20] :highlights [20 10 0])
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply color grading to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
shadows = params.get("shadows", [0, 0, 0])
midtones = params.get("midtones", [0, 0, 0])
highlights = params.get("highlights", [0, 0, 0])
lift = params.get("lift", 0)
gamma = max(0.5, min(params.get("gamma", 1), 2))
gain = max(0.5, min(params.get("gain", 1), 2))
temperature = params.get("temperature", 0)
tint = params.get("tint", 0)
if state is None:
state = {}
result = frame.astype(np.float32) / 255.0
# Apply lift (shadows)
result = result + lift
# Apply gamma (midtones)
result = np.power(np.clip(result, 0.001, 1), 1 / gamma)
# Apply gain (highlights)
result = result * gain
# Convert tints to float
if isinstance(shadows, (list, tuple)) and len(shadows) >= 3:
shadows = np.array(shadows[:3], dtype=np.float32) / 255.0
else:
shadows = np.zeros(3, dtype=np.float32)
if isinstance(midtones, (list, tuple)) and len(midtones) >= 3:
midtones = np.array(midtones[:3], dtype=np.float32) / 255.0
else:
midtones = np.zeros(3, dtype=np.float32)
if isinstance(highlights, (list, tuple)) and len(highlights) >= 3:
highlights = np.array(highlights[:3], dtype=np.float32) / 255.0
else:
highlights = np.zeros(3, dtype=np.float32)
# Calculate luminance for zone-based grading
lum = 0.299 * result[:, :, 0] + 0.587 * result[:, :, 1] + 0.114 * result[:, :, 2]
# Create zone masks
shadow_mask = np.clip(1 - lum * 3, 0, 1)[:, :, np.newaxis]
highlight_mask = np.clip((lum - 0.67) * 3, 0, 1)[:, :, np.newaxis]
midtone_mask = 1 - shadow_mask - highlight_mask
# Apply zone tints
for c in range(3):
result[:, :, c] += shadows[c] * shadow_mask[:, :, 0]
result[:, :, c] += midtones[c] * midtone_mask[:, :, 0]
result[:, :, c] += highlights[c] * highlight_mask[:, :, 0]
# Apply temperature (blue <-> orange)
if temperature != 0:
temp_shift = temperature / 100.0
result[:, :, 0] += temp_shift * 0.3 # Red
result[:, :, 2] -= temp_shift * 0.3 # Blue
# Apply tint (green <-> magenta)
if tint != 0:
tint_shift = tint / 100.0
result[:, :, 1] -= tint_shift * 0.2 # Green
return (np.clip(result, 0, 1) * 255).astype(np.uint8), state

52
effects/contrast.py Normal file
View File

@@ -0,0 +1,52 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect contrast
@version 1.0.0
@author artdag
@description
Adjusts contrast by scaling pixel values around the midpoint (128).
Higher values increase contrast, lower values flatten the image.
@param factor float
@range 0 3
@default 1.0
Contrast multiplier. 0.5 = low contrast, 1 = unchanged, 2 = high contrast.
@example
(effect contrast :factor 1.5)
@example
;; Dramatic contrast on energy peaks
(effect contrast :factor (bind energy :range [1.0 2.0]))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Adjust contrast of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: contrast multiplier (default 1.0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
if factor == 1.0:
return frame, state
# Adjust contrast around midpoint (128)
img_float = frame.astype(np.float32)
result = 128 + factor * (img_float - 128)
return np.clip(result, 0, 255).astype(np.uint8), state

20
effects/contrast.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Contrast effect - adjust image contrast
;; Usage: python3 plan.py effects/contrast.sexp -p amount=1.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: contrast multiplier (0.0 to 3.0, default: 1.5)
;; <1 = less contrast, >1 = more contrast
(recipe "contrast"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect contrast :path "sexp_effects/effects/contrast.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 1.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect contrast :amount amount)))
result)

193
effects/crt.py Normal file
View File

@@ -0,0 +1,193 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect crt
@version 1.0.0
@author artdag
@description
CRT / Scanlines effect. Simulates CRT monitor aesthetics with visible
scan lines, optional RGB subpixels, barrel distortion, and vignette.
@param line_spacing int
@range 1 10
@default 2
Pixels between scanlines.
@param line_opacity float
@range 0 1
@default 0.3
Darkness of scanlines.
@param rgb_subpixels bool
@default false
Show RGB subpixel pattern.
@param curvature float
@range 0 0.5
@default 0
Barrel distortion amount for curved screen look.
@param vignette float
@range 0 1
@default 0
Dark corners effect.
@param bloom float
@range 0 1
@default 0
Glow/blur on bright areas.
@param flicker float
@range 0 0.3
@default 0
Brightness variation.
@param seed int
@default 42
Random seed for flicker.
@state rng DeterministicRNG
Random number generator for flicker.
@example
(effect crt :line_spacing 2 :line_opacity 0.4)
@example
;; Full retro CRT look
(effect crt :curvature 0.2 :vignette 0.3 :rgb_subpixels true :bloom 0.2)
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply CRT effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
line_spacing = max(1, int(params.get("line_spacing", 2)))
line_opacity = params.get("line_opacity", 0.3)
rgb_subpixels = params.get("rgb_subpixels", False)
curvature = params.get("curvature", 0)
vignette = params.get("vignette", 0)
bloom = params.get("bloom", 0)
flicker = params.get("flicker", 0)
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
result = frame.astype(np.float32).copy()
# Apply barrel distortion (curvature)
if curvature > 0:
result = _apply_curvature(result, curvature)
# Apply bloom (glow on bright areas)
if bloom > 0:
result = _apply_bloom(result, bloom)
# Apply scanlines
if line_opacity > 0:
for y in range(0, h, line_spacing):
result[y, :] = result[y, :] * (1 - line_opacity)
# Apply RGB subpixel pattern
if rgb_subpixels:
for x in range(w):
col_type = x % 3
if col_type == 0:
result[:, x, 0] *= 1.2
result[:, x, 1] *= 0.8
result[:, x, 2] *= 0.8
elif col_type == 1:
result[:, x, 0] *= 0.8
result[:, x, 1] *= 1.2
result[:, x, 2] *= 0.8
else:
result[:, x, 0] *= 0.8
result[:, x, 1] *= 0.8
result[:, x, 2] *= 1.2
# Apply vignette
if vignette > 0:
y_coords, x_coords = np.ogrid[:h, :w]
center_x, center_y = w / 2, h / 2
dist = np.sqrt((x_coords - center_x)**2 + (y_coords - center_y)**2)
max_dist = np.sqrt(center_x**2 + center_y**2)
vignette_mask = 1 - (dist / max_dist) * vignette
vignette_mask = np.clip(vignette_mask, 0, 1)
result = result * vignette_mask[:, :, np.newaxis]
# Apply flicker
if flicker > 0:
flicker_amount = 1.0 + rng.uniform(-flicker, flicker)
result = result * flicker_amount
return np.clip(result, 0, 255).astype(np.uint8), state
def _apply_curvature(frame: np.ndarray, strength: float) -> np.ndarray:
"""Apply barrel distortion."""
h, w = frame.shape[:2]
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize to -1 to 1
x_norm = (x_coords - w / 2) / (w / 2)
y_norm = (y_coords - h / 2) / (h / 2)
# Calculate radius
r = np.sqrt(x_norm**2 + y_norm**2)
# Apply barrel distortion
r_distorted = r * (1 + strength * r**2)
# Scale factor
scale = np.where(r > 0, r_distorted / r, 1)
# New coordinates
new_x = (x_norm * scale * (w / 2) + w / 2).astype(np.float32)
new_y = (y_norm * scale * (h / 2) + h / 2).astype(np.float32)
result = cv2.remap(frame.astype(np.uint8), new_x, new_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0))
return result.astype(np.float32)
def _apply_bloom(frame: np.ndarray, strength: float) -> np.ndarray:
"""Apply bloom (glow on bright areas)."""
gray = cv2.cvtColor(frame.astype(np.uint8), cv2.COLOR_RGB2GRAY)
_, bright = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
bloom = cv2.GaussianBlur(bright, (21, 21), 0)
bloom = cv2.cvtColor(bloom, cv2.COLOR_GRAY2RGB)
result = frame + bloom.astype(np.float32) * strength * 0.5
return result

21
effects/crt.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; CRT effect - old TV/monitor look with scanlines and vignette
;; Usage: python3 plan.py effects/crt.sexp -p line_spacing=3 -p vignette_amount=0.3 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; line_spacing: pixels between scanlines (1 to 8, default: 3)
;; vignette_amount: edge darkening strength (0 to 1, default: 0.3)
(recipe "crt"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect crt :path "sexp_effects/effects/crt.sexp")
;; Default parameters (overridden by -p line_spacing=N -p vignette_amount=N)
(def line_spacing 3)
(def vignette_amount 0.3)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect crt :line_spacing line_spacing :vignette_amount vignette_amount)))
result)

141
effects/datamosh.py Normal file
View File

@@ -0,0 +1,141 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect datamosh
@version 1.0.0
@author artdag
@description
Digital corruption / glitch block effect. Randomly corrupts rectangular
blocks by shifting, swapping, or duplicating from previous frames.
Simulates video compression artifacts.
@param block_size int
@range 8 128
@default 32
Size of corruption blocks in pixels.
@param corruption float
@range 0 1
@default 0.3
Probability of corrupting each block. Bind to energy for reactive glitch.
@param max_offset int
@range 0 200
@default 50
Maximum pixel offset when shifting blocks.
@param color_corrupt bool
@default true
Also apply color channel shifts to blocks.
@param seed int
@default 42
Random seed for deterministic glitch patterns.
@state previous_frame ndarray
Stores previous frame for frame-blending corruption.
@state rng DeterministicRNG
Random number generator for reproducible results.
@example
(effect datamosh :corruption 0.4)
@example
;; Heavy glitch on energy peaks
(effect datamosh :corruption (bind energy :range [0 0.8]) :block_size 16)
@example
;; Reproducible glitch with seed
(effect datamosh :corruption 0.5 :seed 12345)
"""
import numpy as np
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply datamosh/glitch block effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- block_size: corruption block size (default 32)
- corruption: probability 0-1 (default 0.3)
- max_offset: max shift in pixels (default 50)
- color_corrupt: apply color shifts (default True)
state: Persistent state dict
- previous_frame: last frame for duplication effect
Returns:
Tuple of (processed_frame, new_state)
"""
block_size = max(8, min(int(params.get("block_size", 32)), 128))
corruption = max(0, min(params.get("corruption", 0.3), 1))
max_offset = int(params.get("max_offset", 50))
color_corrupt = params.get("color_corrupt", True)
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
if corruption == 0:
state["previous_frame"] = frame.copy()
return frame, state
h, w = frame.shape[:2]
result = frame.copy()
prev_frame = state.get("previous_frame")
# Process blocks
for by in range(0, h, block_size):
for bx in range(0, w, block_size):
bh = min(block_size, h - by)
bw = min(block_size, w - bx)
if rng.uniform() < corruption:
corruption_type = rng.choice(["shift", "duplicate", "color", "swap"])
if corruption_type == "shift" and max_offset > 0:
ox = rng.randint(-max_offset, max_offset)
oy = rng.randint(-max_offset, max_offset)
src_x = max(0, min(bx + ox, w - bw))
src_y = max(0, min(by + oy, h - bh))
result[by:by+bh, bx:bx+bw] = frame[src_y:src_y+bh, src_x:src_x+bw]
elif corruption_type == "duplicate" and prev_frame is not None:
if prev_frame.shape == frame.shape:
result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw]
elif corruption_type == "color" and color_corrupt:
block = result[by:by+bh, bx:bx+bw].copy()
shift = rng.randint(1, 3)
channel = rng.randint(0, 2)
block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0)
result[by:by+bh, bx:bx+bw] = block
elif corruption_type == "swap":
other_bx = rng.randint(0, max(0, w - bw - 1))
other_by = rng.randint(0, max(0, h - bh - 1))
temp = result[by:by+bh, bx:bx+bw].copy()
result[by:by+bh, bx:bx+bw] = frame[other_by:other_by+bh, other_bx:other_bx+bw]
result[other_by:other_by+bh, other_bx:other_bx+bw] = temp
state["previous_frame"] = frame.copy()
return result, state

21
effects/datamosh.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Datamosh effect - glitch art block corruption
;; Usage: python3 plan.py effects/datamosh.sexp -p corruption=0.5 -p block_size=24 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; corruption: corruption intensity (0.1 to 1.0, default: 0.5)
;; block_size: size of corrupted blocks (8 to 64, default: 24)
(recipe "datamosh"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect datamosh :path "sexp_effects/effects/datamosh.sexp")
;; Default parameters (overridden by -p corruption=N -p block_size=N)
(def corruption 0.5)
(def block_size 24)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect datamosh :corruption corruption :block_size block_size)))
result)

170
effects/displacement.py Normal file
View File

@@ -0,0 +1,170 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect displacement
@version 1.0.0
@author artdag
@description
Displacement effect. Warps the image based on a pattern (sine waves,
noise, or radial). Creates flowing, liquid-like distortions.
@param amount float
@range 0 100
@default 20
Displacement strength in pixels.
@param pattern string
@enum sine noise radial turbulence
@default sine
Displacement pattern type:
- sine: smooth sine wave
- noise: random displacement
- radial: swirl from center
- turbulence: multi-scale noise
@param frequency float
@range 1 50
@default 10
Pattern frequency (waves per frame width).
@param speed float
@range 0 10
@default 1
Animation speed.
@param direction string
@enum horizontal vertical both
@default both
Displacement direction.
@param seed int
@default 42
Random seed for noise patterns.
@state rng DeterministicRNG
Random number generator.
@example
(effect displacement :amount 30 :pattern "sine" :frequency 5)
@example
;; Reactive turbulence
(effect displacement :amount (bind energy :range [10 50]) :pattern "turbulence")
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply displacement effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
amount = params.get("amount", 20)
pattern = params.get("pattern", "sine")
frequency = max(1, params.get("frequency", 10))
speed = params.get("speed", 1)
direction = params.get("direction", "both")
seed = int(params.get("seed", 42))
t = params.get("_time", 0)
if state is None:
state = {}
if amount == 0:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
h, w = frame.shape[:2]
# Create base coordinate maps
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
# Generate displacement based on pattern
if pattern == "sine":
# Sine wave displacement
phase = t * speed * 2 * np.pi
if direction in ["horizontal", "both"]:
map_x = map_x + amount * np.sin(2 * np.pi * map_y / h * frequency + phase)
if direction in ["vertical", "both"]:
map_y = map_y + amount * np.sin(2 * np.pi * map_x / w * frequency + phase)
elif pattern == "noise":
# Generate noise displacement
rng = state["rng"]
if "noise_x" not in state or state.get("noise_size") != (h, w):
state["noise_x"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32)
state["noise_y"] = np.array([[rng.uniform(-1, 1) for _ in range(w)] for _ in range(h)], dtype=np.float32)
state["noise_size"] = (h, w)
if direction in ["horizontal", "both"]:
map_x = map_x + amount * state["noise_x"]
if direction in ["vertical", "both"]:
map_y = map_y + amount * state["noise_y"]
elif pattern == "radial":
# Radial/swirl displacement
cx, cy = w / 2, h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
dx = x_coords - cx
dy = y_coords - cy
dist = np.sqrt(dx**2 + dy**2) + 1e-6
angle = np.arctan2(dy, dx)
# Swirl amount varies with distance and time
swirl = amount * 0.01 * np.sin(dist / (w / frequency) + t * speed * 2 * np.pi)
new_angle = angle + swirl
if direction in ["horizontal", "both"]:
map_x = cx + dist * np.cos(new_angle)
if direction in ["vertical", "both"]:
map_y = cy + dist * np.sin(new_angle)
elif pattern == "turbulence":
# Multi-scale noise
rng = state["rng"]
disp_x = np.zeros((h, w), dtype=np.float32)
disp_y = np.zeros((h, w), dtype=np.float32)
for scale in [1, 2, 4]:
sh, sw = h // scale, w // scale
noise_x = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32)
noise_y = np.array([[rng.uniform(-1, 1) for _ in range(sw)] for _ in range(sh)], dtype=np.float32)
if scale > 1:
noise_x = cv2.resize(noise_x, (w, h))
noise_y = cv2.resize(noise_y, (w, h))
disp_x += noise_x / scale
disp_y += noise_y / scale
if direction in ["horizontal", "both"]:
map_x = map_x + amount * disp_x
if direction in ["vertical", "both"]:
map_y = map_y + amount * disp_y
# Apply remapping
result = cv2.remap(frame, map_x, map_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
return result, state

139
effects/echo.py Normal file
View File

@@ -0,0 +1,139 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect echo
@version 1.0.0
@author artdag
@description
Motion trail / echo effect. Blends current frame with previous frames
to create ghosting/trailing effects. Great for fast movement scenes.
Uses a frame buffer in state to store recent frames for blending.
@param num_echoes int
@range 1 20
@default 4
Number of trailing frames to blend.
@param decay float
@range 0 1
@default 0.5
Opacity ratio between successive echoes. 0.5 = each echo half as bright.
@param blend_mode string
@enum blend add screen maximum
@default blend
How to combine echoes:
- blend: weighted average
- add: sum (can overexpose)
- screen: like add but resists overexposure
- maximum: brightest pixel wins
@state frame_buffer list
Circular buffer of recent frames.
@example
(effect echo :num_echoes 6 :decay 0.6)
@example
;; More echoes on energy
(effect echo :num_echoes (bind energy :range [2 10]))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply echo/motion trail effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- num_echoes: number of trailing frames (default 4)
- decay: opacity decay ratio (default 0.5)
- blend_mode: blend/add/screen/maximum (default blend)
state: Persistent state dict
- frame_buffer: list of recent frames
Returns:
Tuple of (processed_frame, new_state)
"""
num_echoes = max(1, min(int(params.get("num_echoes", 4)), 20))
decay = max(0, min(params.get("decay", 0.5), 1))
blend_mode = params.get("blend_mode", "blend")
if state is None:
state = {}
# Initialize frame buffer
if "frame_buffer" not in state:
state["frame_buffer"] = []
buffer = state["frame_buffer"]
# Add current frame to buffer
buffer.append(frame.copy())
# Limit buffer size
max_buffer = num_echoes + 5
while len(buffer) > max_buffer:
buffer.pop(0)
# Collect frames and intensities for blending
frames = []
intensities = []
intensity = 1.0
# Current frame first, then older frames
for i in range(min(num_echoes + 1, len(buffer))):
idx = len(buffer) - 1 - i
if idx >= 0:
frames.append(buffer[idx].astype(np.float32))
intensities.append(intensity)
intensity *= decay
if not frames:
return frame, state
# Blend frames according to mode
result = _blend_frames(frames, intensities, blend_mode)
return np.clip(result, 0, 255).astype(np.uint8), state
def _blend_frames(frames, intensities, blend_mode):
"""Blend multiple frames according to blend mode."""
if not frames:
return frames[0]
if blend_mode == "add":
result = np.zeros_like(frames[0])
for frame, intensity in zip(frames, intensities):
result += frame * intensity
return result
elif blend_mode == "screen":
result = np.zeros_like(frames[0])
for frame, intensity in zip(frames, intensities):
weighted = (frame / 255.0) * intensity
result = 255 * (1 - (1 - result / 255.0) * (1 - weighted))
return result
elif blend_mode == "maximum":
result = frames[0] * intensities[0]
for frame, intensity in zip(frames[1:], intensities[1:]):
result = np.maximum(result, frame * intensity)
return result
else: # blend - weighted average
total = sum(intensities)
if total == 0:
return frames[0]
result = np.zeros_like(frames[0])
for frame, intensity in zip(frames, intensities):
result += frame * (intensity / total)
return result

21
effects/echo.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Echo effect - temporal ghosting/trails
;; Usage: python3 plan.py effects/echo.sexp -p num_echoes=4 -p decay=0.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; num_echoes: number of ghost frames (1 to 10, default: 4)
;; decay: fade rate per echo (0.2 to 0.9, default: 0.5)
(recipe "echo"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect echo :path "sexp_effects/effects/echo.sexp")
;; Default parameters (overridden by -p num_echoes=N -p decay=N)
(def num_echoes 4)
(def decay 0.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect echo :num_echoes num_echoes :decay decay)))
result)

22
effects/edge_detect.sexp Normal file
View File

@@ -0,0 +1,22 @@
;; Edge detect effect - Canny edge detection
;; Usage: python3 plan.py effects/edge_detect.sexp -p low=50 -p high=150 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; low: lower threshold (10 to 100, default: 50)
;; high: upper threshold (100 to 300, default: 150)
;; edges between low-high are kept if connected to strong edges
(recipe "edge_detect"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect edge_detect :path "sexp_effects/effects/edge_detect.sexp")
;; Default parameters (overridden by -p low=N -p high=N)
(def low 50)
(def high 150)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect edge_detect :low low :high high)))
result)

88
effects/emboss.py Normal file
View File

@@ -0,0 +1,88 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect emboss
@version 1.0.0
@author artdag
@description
Emboss / relief effect. Creates a 3D raised appearance by highlighting
edges from a simulated light direction. Great for sculptural looks.
@param strength float
@range 0.5 3
@default 1.0
Emboss intensity.
@param direction float
@range 0 360
@default 135
Light direction in degrees. Bind to beat for rotating light.
@param blend float
@range 0 1
@default 0.3
Blend with original (0 = full emboss, 1 = original).
@example
(effect emboss :strength 1.5)
@example
;; Rotating light direction
(effect emboss :direction (bind beat_position :range [0 360]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply emboss effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: emboss intensity (default 1.0)
- direction: light angle in degrees (default 135)
- blend: mix with original (default 0.3)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 1.0)
direction = params.get("direction", 135)
blend = params.get("blend", 0.3)
# Calculate kernel based on direction
angle_rad = np.deg2rad(direction)
dx = np.cos(angle_rad)
dy = np.sin(angle_rad)
# Create emboss kernel
kernel = np.array([
[-strength * dy - strength * dx, -strength * dy, -strength * dy + strength * dx],
[-strength * dx, 1, strength * dx],
[strength * dy - strength * dx, strength * dy, strength * dy + strength * dx]
], dtype=np.float32)
# Apply to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
embossed = cv2.filter2D(gray, -1, kernel)
# Normalize
embossed = embossed + 128
embossed = np.clip(embossed, 0, 255)
# Convert to RGB
embossed_rgb = cv2.cvtColor(embossed.astype(np.uint8), cv2.COLOR_GRAY2RGB)
# Blend with original
if blend > 0:
result = frame.astype(np.float32) * blend + embossed_rgb.astype(np.float32) * (1 - blend)
return np.clip(result, 0, 255).astype(np.uint8), state
return embossed_rgb, state

19
effects/emboss.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Emboss effect - 3D raised surface look
;; Usage: python3 plan.py effects/emboss.sexp -p strength=1.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: emboss intensity (0.5 to 3.0, default: 1.5)
(recipe "emboss"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect emboss :path "sexp_effects/effects/emboss.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 1.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect emboss :strength strength)))
result)

148
effects/film_grain.py Normal file
View File

@@ -0,0 +1,148 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect film_grain
@version 1.0.0
@author artdag
@description
Film grain / Noise effect. Adds realistic film grain texture.
Great for vintage aesthetics and subtle texture.
@param intensity float
@range 0 1
@default 0.2
Noise intensity. Bind to energy for reactive grain.
@param grain_size float
@range 0.5 5
@default 1.0
Size of grain particles. Larger = coarser grain.
@param colored bool
@default false
Use colored noise instead of monochrome.
@param temporal_variation float
@range 0 1
@default 1.0
How much grain changes frame-to-frame. 0 = static, 1 = full animation.
@param seed int
@default 42
Random seed for reproducible grain.
@state rng DeterministicRNG
Random number generator for consistent grain.
@example
(effect film_grain :intensity 0.3)
@example
;; Colored grain, reactive to energy
(effect film_grain :intensity (bind energy :range [0.1 0.5]) :colored true)
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply film grain effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- intensity: noise intensity 0-1 (default 0.2)
- grain_size: grain particle size (default 1.0)
- colored: use colored noise (default False)
- temporal_variation: grain animation 0-1 (default 1.0)
- seed: random seed (default 42)
state: Persistent state dict
- rng: DeterministicRNG instance
- frame_count: frame counter
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 0.2)
grain_size = max(0.5, params.get("grain_size", 1.0))
colored = params.get("colored", False)
temporal_var = params.get("temporal_variation", 1.0)
seed = int(params.get("seed", 42))
if state is None:
state = {}
if intensity <= 0:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
state["frame_count"] = 0
state["static_noise"] = None
rng = state["rng"]
frame_count = state["frame_count"]
h, w = frame.shape[:2]
# Calculate noise dimensions
if grain_size > 1:
noise_h = max(1, int(h / grain_size))
noise_w = max(1, int(w / grain_size))
else:
noise_h, noise_w = h, w
# Generate noise based on temporal variation
if temporal_var >= 1.0 or state.get("static_noise") is None:
# Generate new noise
if colored:
noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)]
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
else:
noise_2d = np.array([[rng.gaussian(0, 1)
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
if temporal_var < 1.0:
state["static_noise"] = noise
else:
# Blend static and new noise
static = state["static_noise"]
if colored:
new_noise = np.array([[[rng.gaussian(0, 1) for _ in range(3)]
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
else:
noise_2d = np.array([[rng.gaussian(0, 1)
for _ in range(noise_w)]
for _ in range(noise_h)]) * intensity * 50
new_noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
noise = static * (1 - temporal_var) + new_noise * temporal_var
# Scale noise up if using larger grain
if grain_size > 1:
noise = cv2.resize(noise.astype(np.float32), (w, h), interpolation=cv2.INTER_LINEAR)
# Add noise to frame
result = frame.astype(np.float32) + noise
state["frame_count"] = frame_count + 1
return np.clip(result, 0, 255).astype(np.uint8), state

19
effects/film_grain.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Film grain effect - analog film noise
;; Usage: python3 plan.py effects/film_grain.sexp -p intensity=0.25 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; intensity: grain amount (0.05 to 0.5, default: 0.25)
(recipe "film_grain"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect film_grain :path "sexp_effects/effects/film_grain.sexp")
;; Default parameter (overridden by -p intensity=N)
(def intensity 0.25)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect film_grain :intensity intensity)))
result)

116
effects/fisheye.py Normal file
View File

@@ -0,0 +1,116 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect fisheye
@version 1.0.0
@author artdag
@description
Barrel/fisheye lens distortion. Positive values bulge outward (fisheye),
negative values pinch inward (pincushion). Great for emphasis effects.
@param strength float
@range -1 1
@default 0.3
Distortion strength. Positive = fisheye bulge, negative = pincushion pinch.
@param center_x float
@range 0 1
@default 0.5
Horizontal center of distortion (0 = left, 1 = right).
@param center_y float
@range 0 1
@default 0.5
Vertical center of distortion (0 = top, 1 = bottom).
@param zoom_correction bool
@default true
Automatically zoom to hide black edges on fisheye.
@example
(effect fisheye :strength 0.5)
@example
;; Pulse fisheye on bass
(effect fisheye :strength (bind bass :range [0 0.8] :transform sqrt))
@example
;; Pincushion effect
(effect fisheye :strength -0.3)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply fisheye/barrel distortion to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: distortion strength -1 to 1 (default 0.3)
- center_x: horizontal center 0-1 (default 0.5)
- center_y: vertical center 0-1 (default 0.5)
- zoom_correction: auto-zoom for fisheye (default True)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 0.3)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
zoom_correction = params.get("zoom_correction", True)
if strength == 0:
return frame, state
h, w = frame.shape[:2]
# Calculate center in pixels
cx = w * center_x
cy = h * center_y
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize coordinates relative to center
x_norm = (x_coords - cx) / (w / 2)
y_norm = (y_coords - cy) / (h / 2)
# Calculate radius from center
r = np.sqrt(x_norm**2 + y_norm**2)
# Apply barrel/pincushion distortion
if strength > 0:
# Barrel distortion (fisheye)
r_distorted = r * (1 + strength * r**2)
else:
# Pincushion distortion
r_distorted = r / (1 - strength * r**2 + 0.001)
# Calculate scale factor
scale = np.where(r > 0, r_distorted / r, 1)
# Apply zoom correction to hide black edges
if zoom_correction and strength > 0:
zoom = 1 + strength * 0.5
scale = scale / zoom
# Calculate new coordinates
new_x = (x_norm * scale * (w / 2) + cx).astype(np.float32)
new_y = (y_norm * scale * (h / 2) + cy).astype(np.float32)
# Remap
result = cv2.remap(
frame, new_x, new_y,
cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT
)
return result, state

20
effects/fisheye.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Fisheye effect - barrel/pincushion distortion
;; Usage: python3 plan.py effects/fisheye.sexp -p strength=0.5 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: distortion amount (-1.0 to 1.0, default: 0.5)
;; positive = barrel (bulge), negative = pincushion
(recipe "fisheye"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect fisheye :path "sexp_effects/effects/fisheye.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 0.5)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect fisheye :strength strength)))
result)

64
effects/flip.py Normal file
View File

@@ -0,0 +1,64 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect flip
@version 1.0.0
@author artdag
@description
Flips the frame horizontally and/or vertically.
Can be used for mirror effects or beat-triggered flips.
@param horizontal bool
@default false
Flip horizontally (left-right mirror).
@param vertical bool
@default false
Flip vertically (top-bottom mirror).
@example
(effect flip :horizontal true)
@example
(effect flip :vertical true)
@example
(effect flip :horizontal true :vertical true) ; 180 degree rotation
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Flip a video frame horizontally and/or vertically.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- horizontal: flip left-right (default False)
- vertical: flip top-bottom (default False)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
horizontal = params.get("horizontal", False)
vertical = params.get("vertical", False)
result = frame
if horizontal:
result = np.flip(result, axis=1)
if vertical:
result = np.flip(result, axis=0)
# Ensure contiguous array after flips
if horizontal or vertical:
result = np.ascontiguousarray(result)
return result, state

19
effects/flip.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Flip effect - flip image horizontally or vertically
;; Usage: python3 plan.py effects/flip.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; direction: flip axis ("horizontal" or "vertical", default: "horizontal")
(recipe "flip"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect flip :path "sexp_effects/effects/flip.sexp")
;; Default parameter (overridden by -p direction=horizontal|vertical)
(def direction "horizontal")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect flip :direction direction)))
result)

14
effects/grayscale.sexp Normal file
View File

@@ -0,0 +1,14 @@
;; Grayscale effect - converts to black and white
;; Usage: python3 plan.py effects/grayscale.sexp | python3 execute.py - -d . -o output.mp4
;; No parameters
(recipe "grayscale"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect grayscale :path "sexp_effects/effects/grayscale.sexp")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect grayscale)))
result)

59
effects/hue_shift.py Normal file
View File

@@ -0,0 +1,59 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect hue_shift
@version 1.0.0
@author artdag
@description
Rotates the hue of all colors by a specified amount (0-360 degrees).
Creates rainbow cycling effects when animated over time.
@param shift float
@range 0 360
@default 0
Hue rotation in degrees. 0/360 = unchanged, 180 = complementary colors.
@example
(effect hue_shift :shift 180) ; complementary colors
@example
;; Rainbow cycling based on time (use with frame counter in state)
(effect hue_shift :shift (bind beat_position :range [0 360]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Shift hue of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- shift: hue rotation in degrees (default 0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
shift = params.get("shift", 0)
if shift == 0:
return frame, state
# Convert RGB to HSV (OpenCV uses H: 0-179, S: 0-255, V: 0-255)
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
# Shift hue (OpenCV hue is 0-179, so divide by 2)
hue_shift = int((shift % 360) / 2)
hsv[:, :, 0] = (hsv[:, :, 0].astype(np.int16) + hue_shift) % 180
# Convert back to RGB
result = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return result, state

20
effects/hue_shift.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Hue shift effect - rotate colors around color wheel
;; Usage: python3 plan.py effects/hue_shift.sexp -p degrees=90 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; degrees: hue rotation in degrees (0 to 360, default: 90)
;; 90 = quarter rotation, 180 = complementary colors
(recipe "hue_shift"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect hue_shift :path "sexp_effects/effects/hue_shift.sexp")
;; Default parameter (overridden by -p degrees=N)
(def degrees 90)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect hue_shift :degrees degrees)))
result)

62
effects/invert.py Normal file
View File

@@ -0,0 +1,62 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect invert
@version 1.0.0
@author artdag
@description
Inverts the colors of each frame. Can be partial (blended with original)
based on intensity parameter. Useful for beat-reactive flashing effects.
@param intensity float
@range 0 100
@default 100
Intensity of the inversion effect (0 = original, 100 = fully inverted).
Bind to audio analysis for reactive effects.
@example
(effect invert)
@example
(effect invert :intensity 50)
@example
;; Beat-reactive inversion
(effect invert :intensity (bind bass :range [0 100] :transform sqrt))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Invert colors of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- intensity: 0-100, how much to invert (default 100)
state: Persistent state dict (unused for this effect)
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 100)
# Normalize intensity to 0-1
t = intensity / 100.0
if t <= 0:
return frame, state
if t >= 1:
return (255 - frame), state
# Partial inversion: blend between original and inverted
inverted = 255 - frame
result = (frame * (1 - t) + inverted * t).astype(np.uint8)
return result, state

14
effects/invert.sexp Normal file
View File

@@ -0,0 +1,14 @@
;; Invert effect - inverts all colors
;; Usage: python3 plan.py effects/invert.sexp | python3 execute.py - -d . -o output.mp4
;; No parameters
(recipe "invert"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect invert :path "sexp_effects/effects/invert.sexp")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect invert)))
result)

137
effects/kaleidoscope.py Normal file
View File

@@ -0,0 +1,137 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect kaleidoscope
@version 1.0.0
@author artdag
@description
Kaleidoscope effect. Creates mesmerizing mandala-like patterns by
dividing the frame into pie-slice segments and reflecting them.
Great for psychedelic visuals.
@param segments int
@range 3 16
@default 6
Number of symmetry segments.
@param rotation float
@range 0 360
@default 0
Base rotation angle in degrees.
@param rotation_speed float
@range -180 180
@default 0
Continuous rotation speed in degrees/second.
@param center_x float
@range 0 1
@default 0.5
Center X position (0-1).
@param center_y float
@range 0 1
@default 0.5
Center Y position (0-1).
@param zoom float
@range 0.5 3.0
@default 1.0
Zoom factor for the source region.
@state cumulative_rotation float
Tracks rotation over time.
@example
(effect kaleidoscope :segments 8 :rotation_speed 30)
@example
;; Beat-reactive segments
(effect kaleidoscope :segments (bind bass :range [4 12]) :zoom 1.5)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply kaleidoscope effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- segments: number of segments 3-16 (default 6)
- rotation: base rotation degrees (default 0)
- rotation_speed: degrees per second (default 0)
- center_x: center X 0-1 (default 0.5)
- center_y: center Y 0-1 (default 0.5)
- zoom: zoom factor 0.5-3 (default 1.0)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
segments = max(3, min(int(params.get("segments", 6)), 16))
rotation = params.get("rotation", 0)
rotation_speed = params.get("rotation_speed", 0)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
zoom = max(0.5, min(params.get("zoom", 1.0), 3.0))
# Get time for animation
t = params.get("_time", 0)
if state is None:
state = {}
h, w = frame.shape[:2]
# Calculate center in pixels
cx = int(w * center_x)
cy = int(h * center_y)
# Total rotation including time-based animation
total_rotation = rotation + rotation_speed * t
# Calculate the angle per segment
segment_angle = 2 * np.pi / segments
# Create coordinate maps
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Translate to center
x_centered = x_coords - cx
y_centered = y_coords - cy
# Convert to polar coordinates
r = np.sqrt(x_centered**2 + y_centered**2)
theta = np.arctan2(y_centered, x_centered)
# Apply rotation
theta = theta - np.deg2rad(total_rotation)
# Fold angle into first segment and mirror
theta_normalized = theta % (2 * np.pi)
segment_idx = (theta_normalized / segment_angle).astype(int)
theta_in_segment = theta_normalized - segment_idx * segment_angle
# Mirror alternating segments
mirror_mask = (segment_idx % 2) == 1
theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment)
# Apply zoom
r = r / zoom
# Convert back to Cartesian (source coordinates)
src_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32)
src_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32)
# Remap
result = cv2.remap(frame, src_x, src_y,
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
return result, state

21
effects/kaleidoscope.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Kaleidoscope effect - radial mirror segments
;; Usage: python3 plan.py effects/kaleidoscope.sexp -p segments=6 -p rotation_speed=30 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; segments: number of mirror segments (2 to 16, default: 6)
;; rotation_speed: rotation in degrees per second (0 to 180, default: 30)
(recipe "kaleidoscope"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect kaleidoscope :path "sexp_effects/effects/kaleidoscope.sexp")
;; Default parameters (overridden by -p segments=N -p rotation_speed=N)
(def segments 6)
(def rotation_speed 30)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect kaleidoscope :segments segments :rotation_speed rotation_speed)))
result)

86
effects/mirror.py Normal file
View File

@@ -0,0 +1,86 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect mirror
@version 1.0.0
@author artdag
@description
Creates mirror/reflection effects by duplicating and flipping portions
of the frame. Can create kaleidoscope-like symmetry effects.
@param mode string
@enum left right top bottom quad
@default left
Mirror mode:
- left: Left half mirrored to right
- right: Right half mirrored to left
- top: Top half mirrored to bottom
- bottom: Bottom half mirrored to top
- quad: Four-way symmetry (kaleidoscope)
@example
(effect mirror :mode "left")
@example
(effect mirror :mode "quad") ; kaleidoscope effect
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Create mirror/reflection effects.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- mode: mirror mode (default "left")
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
mode = params.get("mode", "left")
h, w = frame.shape[:2]
result = frame.copy()
if mode == "left":
# Mirror left half to right
half = w // 2
result[:, half:] = np.flip(result[:, :half], axis=1)[:, :w - half]
elif mode == "right":
# Mirror right half to left
half = w // 2
result[:, :half] = np.flip(result[:, half:], axis=1)[:, :half]
elif mode == "top":
# Mirror top half to bottom
half = h // 2
result[half:, :] = np.flip(result[:half, :], axis=0)[:h - half, :]
elif mode == "bottom":
# Mirror bottom half to top
half = h // 2
result[:half, :] = np.flip(result[half:, :], axis=0)[:half, :]
elif mode == "quad":
# Four-way symmetry: use top-left quadrant
half_h = h // 2
half_w = w // 2
# Get top-left quadrant
tl = result[:half_h, :half_w]
# Mirror to top-right
result[:half_h, half_w:] = np.flip(tl, axis=1)[:, :w - half_w]
# Mirror top half to bottom
result[half_h:, :] = np.flip(result[:half_h, :], axis=0)[:h - half_h, :]
return np.ascontiguousarray(result), state

19
effects/mirror.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Mirror effect - reflect image
;; Usage: python3 plan.py effects/mirror.sexp -p direction=horizontal | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; direction: mirror axis ("horizontal" or "vertical", default: "horizontal")
(recipe "mirror"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect mirror :path "sexp_effects/effects/mirror.sexp")
;; Default parameter (overridden by -p direction=horizontal|vertical)
(def direction "horizontal")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect mirror :direction direction)))
result)

122
effects/motion_blur.py Normal file
View File

@@ -0,0 +1,122 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect motion_blur
@version 1.0.0
@author artdag
@description
Motion blur effect. Creates directional blur simulating camera or
subject motion. Great for speed effects.
@param amount int
@range 1 100
@default 20
Blur length in pixels.
@param angle float
@range 0 360
@default 0
Blur direction in degrees (0 = horizontal right).
@param zoom bool
@default false
Radial zoom blur instead of directional.
@param zoom_center_x float
@range 0 1
@default 0.5
Zoom blur center X.
@param zoom_center_y float
@range 0 1
@default 0.5
Zoom blur center Y.
@example
(effect motion_blur :amount 30 :angle 45)
@example
;; Zoom blur
(effect motion_blur :amount 20 :zoom true)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply motion blur effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: blur length (default 20)
- angle: blur direction (default 0)
- zoom: radial zoom blur (default False)
- zoom_center_x/y: zoom center (default 0.5)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
amount = max(1, int(params.get("amount", 20)))
angle = params.get("angle", 0)
zoom = params.get("zoom", False)
zoom_center_x = params.get("zoom_center_x", 0.5)
zoom_center_y = params.get("zoom_center_y", 0.5)
if state is None:
state = {}
if amount <= 1:
return frame, state
h, w = frame.shape[:2]
if zoom:
# Radial zoom blur
cx, cy = int(w * zoom_center_x), int(h * zoom_center_y)
# Accumulate multiple scaled versions
result = np.zeros_like(frame, dtype=np.float32)
num_samples = min(amount, 20)
for i in range(num_samples):
scale = 1.0 + (i / num_samples) * (amount / 100.0)
M = cv2.getRotationMatrix2D((cx, cy), 0, scale)
scaled = cv2.warpAffine(frame, M, (w, h), borderMode=cv2.BORDER_REFLECT)
result += scaled.astype(np.float32) / num_samples
return result.astype(np.uint8), state
else:
# Directional motion blur using a line kernel
angle_rad = np.deg2rad(angle)
dx = np.cos(angle_rad)
dy = np.sin(angle_rad)
# Create motion blur kernel
kernel_size = amount
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
# Draw a line in the kernel
cx, cy = kernel_size // 2, kernel_size // 2
for i in range(kernel_size):
t = (i - cx) / max(1, cx)
x = int(cx + t * cx * dx)
y = int(cy + t * cy * dy)
if 0 <= x < kernel_size and 0 <= y < kernel_size:
kernel[y, x] = 1
# Normalize kernel
kernel_sum = kernel.sum()
if kernel_sum > 0:
kernel /= kernel_sum
# Apply kernel
result = cv2.filter2D(frame, -1, kernel)
return result, state

130
effects/neon_glow.py Normal file
View File

@@ -0,0 +1,130 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect neon_glow
@version 1.0.0
@author artdag
@description
Neon edge glow effect. Detects edges and applies a glowing colored outline.
Great for cyberpunk/synthwave aesthetics synced to music.
@param glow_radius float
@range 0 50
@default 15
Blur radius for the glow. Bind to bass for pulsing glow.
@param glow_intensity float
@range 0.5 5
@default 2.0
Brightness multiplier for the glow.
@param edge_low float
@range 10 200
@default 50
Lower threshold for edge detection.
@param edge_high float
@range 50 300
@default 150
Upper threshold for edge detection.
@param color_r int
@range 0 255
@default 0
Red component of glow color.
@param color_g int
@range 0 255
@default 255
Green component of glow color.
@param color_b int
@range 0 255
@default 255
Blue component of glow color.
@param background float
@range 0 1
@default 0.3
How much of the original image shows through (0 = glow only).
@example
(effect neon_glow :glow_radius 20 :color_r 255 :color_g 0 :color_b 255)
@example
;; Pulsing cyan glow on bass
(effect neon_glow :glow_radius (bind bass :range [5 30] :transform sqrt))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply neon glow effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- glow_radius: blur radius (default 15)
- glow_intensity: brightness (default 2.0)
- edge_low: canny low threshold (default 50)
- edge_high: canny high threshold (default 150)
- color_r/g/b: glow color (default cyan 0,255,255)
- background: original visibility (default 0.3)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
glow_radius = int(params.get("glow_radius", 15))
glow_intensity = params.get("glow_intensity", 2.0)
edge_low = int(params.get("edge_low", 50))
edge_high = int(params.get("edge_high", 150))
color_r = int(params.get("color_r", 0))
color_g = int(params.get("color_g", 255))
color_b = int(params.get("color_b", 255))
background = params.get("background", 0.3)
h, w = frame.shape[:2]
color = np.array([color_r, color_g, color_b], dtype=np.float32)
# Edge detection
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blurred, edge_low, edge_high)
# Create colored edge image
glow_base = np.zeros((h, w, 3), dtype=np.float32)
for c in range(3):
glow_base[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0)
# Apply blur for glow
if glow_radius > 0:
ksize = glow_radius * 2 + 1
glow = cv2.GaussianBlur(glow_base, (ksize, ksize), 0)
else:
glow = glow_base
# Intensify
glow = glow * glow_intensity
# Add sharp edges on top
edge_layer = np.zeros((h, w, 3), dtype=np.float32)
for c in range(3):
edge_layer[:, :, c] = edges.astype(np.float32) * (color[c] / 255.0) * 255
glow = np.maximum(glow, edge_layer)
# Blend with original
if background > 0:
a = frame.astype(np.float32) / 255.0 * background
b = glow / 255.0
result = (1 - (1 - a) * (1 - b)) * 255 # Screen blend
else:
result = glow
return np.clip(result, 0, 255).astype(np.uint8), state

21
effects/neon_glow.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Neon glow effect - glowing edges on dark background
;; Usage: python3 plan.py effects/neon_glow.sexp -p glow_radius=20 -p glow_intensity=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; glow_radius: glow spread in pixels (5 to 50, default: 20)
;; glow_intensity: glow brightness (0.5 to 5, default: 2)
(recipe "neon_glow"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect neon_glow :path "sexp_effects/effects/neon_glow.sexp")
;; Default parameters (overridden by -p glow_radius=N -p glow_intensity=N)
(def glow_radius 20)
(def glow_intensity 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect neon_glow :glow_radius glow_radius :glow_intensity glow_intensity)))
result)

137
effects/noise.py Normal file
View File

@@ -0,0 +1,137 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect noise
@version 1.0.0
@author artdag
@description
Noise effect. Adds various types of noise to the image including
static, gaussian, salt & pepper, and more.
@param intensity float
@range 0 1
@default 0.2
Noise intensity.
@param mode string
@enum gaussian uniform salt_pepper scanline
@default gaussian
Type of noise:
- gaussian: smooth normal distribution
- uniform: flat random noise
- salt_pepper: random black/white pixels
- scanline: horizontal line noise
@param colored bool
@default false
Use colored noise instead of monochrome.
@param animate bool
@default true
Different noise each frame.
@param seed int
@default 42
Random seed for reproducible noise.
@state rng DeterministicRNG
Random number generator.
@example
(effect noise :intensity 0.3 :mode "gaussian")
@example
;; Static TV noise
(effect noise :intensity 0.5 :mode "uniform" :animate true)
"""
import numpy as np
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply noise effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = params.get("intensity", 0.2)
mode = params.get("mode", "gaussian")
colored = params.get("colored", False)
animate = params.get("animate", True)
seed = int(params.get("seed", 42))
if state is None:
state = {}
if intensity <= 0:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
result = frame.astype(np.float32)
if mode == "gaussian":
# Gaussian noise
if colored:
noise = np.array([[[rng.gaussian(0, intensity * 50) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
else:
noise_2d = np.array([[rng.gaussian(0, intensity * 50)
for _ in range(w)]
for _ in range(h)])
noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
result = result + noise
elif mode == "uniform":
# Uniform random noise
if colored:
noise = np.array([[[rng.uniform(-intensity * 100, intensity * 100) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
else:
noise_2d = np.array([[rng.uniform(-intensity * 100, intensity * 100)
for _ in range(w)]
for _ in range(h)])
noise = np.stack([noise_2d, noise_2d, noise_2d], axis=-1)
result = result + noise
elif mode == "salt_pepper":
# Salt and pepper noise
for y in range(h):
for x in range(w):
if rng.uniform() < intensity * 0.1:
if rng.uniform() < 0.5:
result[y, x] = [0, 0, 0]
else:
result[y, x] = [255, 255, 255]
elif mode == "scanline":
# Horizontal scanline noise
for y in range(h):
if rng.uniform() < intensity * 0.2:
noise_val = rng.uniform(-intensity * 100, intensity * 100)
result[y] = result[y] + noise_val
return np.clip(result, 0, 255).astype(np.uint8), state

19
effects/noise.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Noise effect - random pixel noise
;; Usage: python3 plan.py effects/noise.sexp -p amount=40 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: noise intensity (5 to 100, default: 40)
(recipe "noise"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect noise :path "sexp_effects/effects/noise.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 40)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect noise :amount amount)))
result)

111
effects/outline.py Normal file
View File

@@ -0,0 +1,111 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect outline
@version 1.0.0
@author artdag
@description
Outline / Toon Edges effect. Extracts and displays edges as outlines,
optionally with fill. Great for cartoon/comic book aesthetics.
@param edge_thickness int
@range 1 10
@default 2
Thickness of outlines in pixels.
@param threshold float
@range 20 300
@default 100
Edge detection sensitivity.
@param outline_color list
@default [0, 0, 0]
RGB color for outlines (default black).
@param fill_mode string
@enum original solid transparent
@default original
What to show in non-edge areas:
- original: keep source image
- solid: fill with solid color
- transparent: black background
@param fill_color list
@default [255, 255, 255]
RGB color for solid fill mode.
@example
(effect outline :edge_thickness 3 :threshold 80)
@example
;; White outlines on black
(effect outline :outline_color [255 255 255] :fill_mode "transparent")
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply outline effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- edge_thickness: outline width (default 2)
- threshold: edge sensitivity (default 100)
- outline_color: RGB tuple (default [0,0,0])
- fill_mode: original/solid/transparent (default original)
- fill_color: RGB tuple for solid fill (default [255,255,255])
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
thickness = max(1, min(int(params.get("edge_thickness", 2)), 10))
threshold = params.get("threshold", 100)
outline_color = params.get("outline_color", [0, 0, 0])
fill_mode = params.get("fill_mode", "original")
fill_color = params.get("fill_color", [255, 255, 255])
if state is None:
state = {}
h, w = frame.shape[:2]
# Convert to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# Apply edge detection
edges = cv2.Canny(gray, int(threshold * 0.5), int(threshold))
# Dilate edges for thickness
if thickness > 1:
kernel = np.ones((thickness, thickness), np.uint8)
edges = cv2.dilate(edges, kernel, iterations=1)
# Create result based on fill mode
if fill_mode == "original":
result = frame.copy()
elif fill_mode == "solid":
if isinstance(fill_color, (list, tuple)) and len(fill_color) >= 3:
result = np.full((h, w, 3), fill_color[:3], dtype=np.uint8)
else:
result = np.full((h, w, 3), 255, dtype=np.uint8)
else: # transparent/none
result = np.zeros((h, w, 3), dtype=np.uint8)
# Apply outline color where edges exist
if isinstance(outline_color, (list, tuple)) and len(outline_color) >= 3:
color = np.array(outline_color[:3], dtype=np.uint8)
else:
color = np.array([0, 0, 0], dtype=np.uint8)
edge_mask = edges > 0
result[edge_mask] = color
return result, state

19
effects/outline.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Outline effect - draw edges as lines
;; Usage: python3 plan.py effects/outline.sexp -p thickness=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; thickness: outline width in pixels (1 to 5, default: 2)
(recipe "outline"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect outline :path "sexp_effects/effects/outline.sexp")
;; Default parameter (overridden by -p thickness=N)
(def thickness 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect outline :thickness thickness)))
result)

72
effects/pixelate.py Normal file
View File

@@ -0,0 +1,72 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect pixelate
@version 1.0.0
@author artdag
@description
Pixelate effect. Reduces resolution to create blocky, retro pixel art
look. Great for 8-bit aesthetics.
@param block_size int
@range 2 64
@default 8
Size of pixel blocks. Larger = more pixelated.
@param maintain_edges bool
@default false
Try to preserve edges while pixelating.
@example
(effect pixelate :block_size 16)
@example
;; Beat-reactive pixelation
(effect pixelate :block_size (bind bass :range [4 32]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply pixelate effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- block_size: pixel block size (default 8)
- maintain_edges: preserve edges (default False)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
block_size = max(2, min(int(params.get("block_size", 8)), 64))
maintain_edges = params.get("maintain_edges", False)
if state is None:
state = {}
h, w = frame.shape[:2]
# Scale down then up to create pixelation
small_h = max(1, h // block_size)
small_w = max(1, w // block_size)
small = cv2.resize(frame, (small_w, small_h), interpolation=cv2.INTER_AREA)
result = cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)
if maintain_edges:
# Detect edges in original and overlay
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, 50, 150)
edges_dilated = cv2.dilate(edges, np.ones((2, 2), np.uint8))
edge_mask = edges_dilated > 0
result[edge_mask] = frame[edge_mask]
return result, state

20
effects/pixelate.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Pixelate effect - reduce resolution with blocky pixels
;; Usage: python3 plan.py effects/pixelate.sexp -p block_size=16 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; block_size: pixel block size (2 to 64, default: 16)
;; higher = more pixelated
(recipe "pixelate"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect pixelate :path "sexp_effects/effects/pixelate.sexp")
;; Default parameter (overridden by -p block_size=N)
(def block_size 16)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect pixelate :block_size block_size)))
result)

152
effects/pixelsort.py Normal file
View File

@@ -0,0 +1,152 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect pixelsort
@version 1.0.0
@author artdag
@description
Pixel sorting glitch art effect. Sorts pixels within rows by brightness,
hue, or other properties. Creates distinctive streaked/melted aesthetics.
@param sort_by string
@enum lightness hue saturation red green blue
@default lightness
Property to sort pixels by.
@param threshold_low float
@range 0 255
@default 50
Pixels darker than this are not sorted.
@param threshold_high float
@range 0 255
@default 200
Pixels brighter than this are not sorted.
@param angle float
@range 0 180
@default 0
Sort direction: 0 = horizontal, 90 = vertical.
@param reverse bool
@default false
Reverse the sort order.
@example
(effect pixelsort)
@example
;; Vertical pixel sort
(effect pixelsort :angle 90)
@example
;; Sort by hue for rainbow streaks
(effect pixelsort :sort_by "hue" :threshold_low 20 :threshold_high 240)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply pixel sorting to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- sort_by: property to sort by (default "lightness")
- threshold_low: min brightness to sort (default 50)
- threshold_high: max brightness to sort (default 200)
- angle: 0 = horizontal, 90 = vertical (default 0)
- reverse: reverse sort order (default False)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
sort_by = params.get("sort_by", "lightness")
threshold_low = params.get("threshold_low", 50)
threshold_high = params.get("threshold_high", 200)
angle = params.get("angle", 0)
reverse = params.get("reverse", False)
h, w = frame.shape[:2]
# Rotate for non-horizontal sorting
if 45 <= (angle % 180) <= 135:
frame = np.transpose(frame, (1, 0, 2))
h, w = frame.shape[:2]
rotated = True
else:
rotated = False
result = frame.copy()
# Get sort values
sort_values = _get_sort_values(frame, sort_by)
# Create mask of pixels to sort
mask = (sort_values >= threshold_low) & (sort_values <= threshold_high)
# Sort each row
for y in range(h):
row = result[y].copy()
row_mask = mask[y]
row_values = sort_values[y]
# Find contiguous segments to sort
segments = _find_segments(row_mask)
for start, end in segments:
if end - start > 1:
segment_values = row_values[start:end]
sort_indices = np.argsort(segment_values)
if reverse:
sort_indices = sort_indices[::-1]
row[start:end] = row[start:end][sort_indices]
result[y] = row
# Rotate back if needed
if rotated:
result = np.transpose(result, (1, 0, 2))
return np.ascontiguousarray(result), state
def _get_sort_values(frame, sort_by):
"""Get values to sort pixels by."""
if sort_by == "lightness":
return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
elif sort_by == "hue":
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
return hsv[:, :, 0].astype(np.float32)
elif sort_by == "saturation":
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
return hsv[:, :, 1].astype(np.float32)
elif sort_by == "red":
return frame[:, :, 0].astype(np.float32)
elif sort_by == "green":
return frame[:, :, 1].astype(np.float32)
elif sort_by == "blue":
return frame[:, :, 2].astype(np.float32)
return cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
def _find_segments(mask):
"""Find contiguous True segments in mask."""
segments = []
start = None
for i, val in enumerate(mask):
if val and start is None:
start = i
elif not val and start is not None:
segments.append((start, i))
start = None
if start is not None:
segments.append((start, len(mask)))
return segments

22
effects/pixelsort.sexp Normal file
View File

@@ -0,0 +1,22 @@
;; Pixelsort effect - glitch art pixel sorting
;; Usage: python3 plan.py effects/pixelsort.sexp -p threshold_low=30 -p threshold_high=220 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; threshold_low: lower brightness threshold (0 to 128, default: 30)
;; threshold_high: upper brightness threshold (128 to 255, default: 220)
;; pixels between thresholds get sorted
(recipe "pixelsort"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect pixelsort :path "sexp_effects/effects/pixelsort.sexp")
;; Default parameters (overridden by -p threshold_low=N -p threshold_high=N)
(def threshold_low 30)
(def threshold_high 220)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect pixelsort :threshold_low threshold_low :threshold_high threshold_high)))
result)

79
effects/posterize.py Normal file
View File

@@ -0,0 +1,79 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect posterize
@version 1.0.0
@author artdag
@description
Posterize / Color Quantization effect. Reduces the number of colors
to create a poster/cartoon look. Great for stylized visuals.
@param levels int
@range 2 32
@default 8
Number of color levels per channel. Lower = more stylized.
@param dither bool
@default false
Apply dithering to reduce color banding.
@example
(effect posterize :levels 4)
@example
;; Beat-reactive posterization
(effect posterize :levels (bind bass :range [2 16]))
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply posterize effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- levels: color levels per channel (default 8)
- dither: apply dithering (default False)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
levels = max(2, min(int(params.get("levels", 8)), 256))
dither = params.get("dither", False)
if state is None:
state = {}
step = 256 // levels
if dither:
# Floyd-Steinberg dithering
result = frame.astype(np.float32).copy()
h, w = result.shape[:2]
for y in range(h - 1):
for x in range(1, w - 1):
for c in range(3):
old_val = result[y, x, c]
new_val = np.round(old_val / step) * step
result[y, x, c] = new_val
error = old_val - new_val
# Distribute error to neighbors
result[y, x + 1, c] += error * 7 / 16
result[y + 1, x - 1, c] += error * 3 / 16
result[y + 1, x, c] += error * 5 / 16
result[y + 1, x + 1, c] += error * 1 / 16
return np.clip(result, 0, 255).astype(np.uint8), state
else:
# Simple quantization
quantized = (frame // step) * step
return quantized.astype(np.uint8), state

20
effects/posterize.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Posterize effect - reduce color levels for poster-like look
;; Usage: python3 plan.py effects/posterize.sexp -p levels=6 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; levels: number of color levels per channel (2 to 32, default: 6)
;; lower = more posterized, higher = more gradual
(recipe "posterize"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect posterize :path "sexp_effects/effects/posterize.sexp")
;; Default parameter (overridden by -p levels=N)
(def levels 6)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect posterize :levels levels)))
result)

217
effects/random.py Normal file
View File

@@ -0,0 +1,217 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect random
@version 1.0.0
@author artdag
@description
Deterministic random number generator for use in recipes and effects.
Given the same seed, produces the same sequence of values every time.
This effect doesn't modify the frame - it provides random values that
can be bound to other effect parameters. The random state persists
across frames for consistent sequences.
@param seed int
@default 42
Random seed for reproducibility. Same seed = same sequence.
@param min float
@default 0
Minimum output value.
@param max float
@default 1
Maximum output value.
@param mode string
@enum uniform gaussian integer choice
@default uniform
Distribution type:
- uniform: even distribution between min and max
- gaussian: normal distribution (min=mean, max=stddev)
- integer: random integers between min and max (inclusive)
- choice: randomly pick from a list (use choices param)
@param choices list
@default []
List of values to choose from (for mode=choice).
@param step_every int
@default 1
Only generate new value every N frames (1 = every frame).
@state rng RandomState
Numpy random state for deterministic sequence.
@state frame_count int
Tracks frames for step_every.
@state current_value float
Current random value (persists between steps).
@example
;; Random value 0-1 each frame
(bind (random :seed 123))
@example
;; Random integer 1-10, changes every 5 frames
(random :seed 42 :mode "integer" :min 1 :max 10 :step_every 5)
@example
;; Gaussian noise around 0.5
(random :mode "gaussian" :min 0.5 :max 0.1)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Generate deterministic random values.
This effect passes through the frame unchanged but updates state
with random values that can be used by the recipe/executor.
Args:
frame: Input frame (passed through unchanged)
params: Effect parameters
- seed: random seed (default 42)
- min: minimum value (default 0)
- max: maximum value (default 1)
- mode: uniform/gaussian/integer/choice (default uniform)
- choices: list for choice mode
- step_every: frames between new values (default 1)
state: Persistent state dict
- rng: numpy RandomState
- frame_count: frame counter
- current_value: last generated value
Returns:
Tuple of (frame, state_with_random_value)
"""
seed = int(params.get("seed", 42))
min_val = params.get("min", 0)
max_val = params.get("max", 1)
mode = params.get("mode", "uniform")
choices = params.get("choices", [])
step_every = max(1, int(params.get("step_every", 1)))
if state is None:
state = {}
# Initialize RNG on first call
if "rng" not in state:
state["rng"] = np.random.RandomState(seed)
state["frame_count"] = 0
state["current_value"] = None
rng = state["rng"]
frame_count = state["frame_count"]
# Generate new value if needed
if frame_count % step_every == 0 or state["current_value"] is None:
if mode == "uniform":
value = rng.uniform(min_val, max_val)
elif mode == "gaussian":
# min = mean, max = stddev
value = rng.normal(min_val, max_val)
elif mode == "integer":
value = rng.randint(int(min_val), int(max_val) + 1)
elif mode == "choice" and choices:
value = choices[rng.randint(0, len(choices))]
else:
value = rng.uniform(min_val, max_val)
state["current_value"] = value
state["frame_count"] = frame_count + 1
# Store value in state for recipe access
state["value"] = state["current_value"]
return frame, state
# Standalone RNG class for use in other effects
class DeterministicRNG:
"""
Deterministic random number generator for use in effects.
Usage in effects:
from effects.random import DeterministicRNG
def process_frame(frame, params, state):
if "rng" not in state:
state["rng"] = DeterministicRNG(params.get("seed", 42))
rng = state["rng"]
value = rng.uniform(0, 1)
integer = rng.randint(0, 10)
choice = rng.choice(["a", "b", "c"])
"""
def __init__(self, seed: int = 42):
"""Initialize with seed for reproducibility."""
self._rng = np.random.RandomState(seed)
self._seed = seed
def seed(self, seed: int):
"""Reset with new seed."""
self._rng = np.random.RandomState(seed)
self._seed = seed
def uniform(self, low: float = 0, high: float = 1) -> float:
"""Random float in [low, high)."""
return self._rng.uniform(low, high)
def randint(self, low: int, high: int) -> int:
"""Random integer in [low, high]."""
return self._rng.randint(low, high + 1)
def gaussian(self, mean: float = 0, stddev: float = 1) -> float:
"""Random float from normal distribution."""
return self._rng.normal(mean, stddev)
def choice(self, items: list):
"""Random choice from list."""
if not items:
return None
return items[self._rng.randint(0, len(items))]
def shuffle(self, items: list) -> list:
"""Return shuffled copy of list."""
result = list(items)
self._rng.shuffle(result)
return result
def sample(self, items: list, n: int) -> list:
"""Random sample of n items without replacement."""
if n >= len(items):
return self.shuffle(items)
indices = self._rng.choice(len(items), n, replace=False)
return [items[i] for i in indices]
def weighted_choice(self, items: list, weights: list):
"""Random choice with weights."""
if not items or not weights:
return None
weights = np.array(weights, dtype=float)
weights /= weights.sum()
idx = self._rng.choice(len(items), p=weights)
return items[idx]
@property
def state(self) -> dict:
"""Get RNG state for serialization."""
return {"seed": self._seed, "state": self._rng.get_state()}
@classmethod
def from_state(cls, state: dict) -> 'DeterministicRNG':
"""Restore RNG from serialized state."""
rng = cls(state["seed"])
rng._rng.set_state(state["state"])
return rng

106
effects/rgb_split.py Normal file
View File

@@ -0,0 +1,106 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect rgb_split
@version 1.0.0
@author artdag
@description
Chromatic aberration / RGB channel separation. Offsets red and blue channels
in different directions, creating color fringing. Classic glitch aesthetic.
@param amount float
@range 0 50
@default 10
Offset amount in pixels. Bind to bass for punchy glitch effect.
@param angle float
@range 0 360
@default 0
Direction of split in degrees. 0 = horizontal, 90 = vertical.
@param red_offset float
@range -50 50
@default 0
Override: specific red channel X offset (ignores amount/angle if set).
@param blue_offset float
@range -50 50
@default 0
Override: specific blue channel X offset (ignores amount/angle if set).
@example
(effect rgb_split :amount 15)
@example
;; Bass-reactive chromatic aberration
(effect rgb_split :amount (bind bass :range [0 30] :transform sqrt))
@example
;; Vertical split
(effect rgb_split :amount 20 :angle 90)
"""
import numpy as np
import cv2
import math
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply RGB channel split to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: offset in pixels (default 10)
- angle: split direction in degrees (default 0)
- red_offset: override red X offset
- blue_offset: override blue X offset
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
amount = params.get("amount", 10)
angle = params.get("angle", 0)
red_override = params.get("red_offset")
blue_override = params.get("blue_offset")
# Calculate offsets
if red_override is not None or blue_override is not None:
# Use explicit offsets
r_x = int(red_override or 0)
r_y = 0
b_x = int(blue_override or 0)
b_y = 0
else:
# Calculate from amount and angle
angle_rad = math.radians(angle)
r_x = int(amount * math.cos(angle_rad))
r_y = int(amount * math.sin(angle_rad))
b_x = -r_x # Blue goes opposite direction
b_y = -r_y
if r_x == 0 and r_y == 0 and b_x == 0 and b_y == 0:
return frame, state
h, w = frame.shape[:2]
# Split channels
r, g, b = frame[:, :, 0], frame[:, :, 1], frame[:, :, 2]
# Create translation matrices
M_r = np.float32([[1, 0, r_x], [0, 1, r_y]])
M_b = np.float32([[1, 0, b_x], [0, 1, b_y]])
# Translate red and blue channels
r_shifted = cv2.warpAffine(r, M_r, (w, h), borderMode=cv2.BORDER_REPLICATE)
b_shifted = cv2.warpAffine(b, M_b, (w, h), borderMode=cv2.BORDER_REPLICATE)
# Merge channels
result = np.stack([r_shifted, g, b_shifted], axis=-1)
return result, state

19
effects/rgb_split.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; RGB split effect - chromatic aberration
;; Usage: python3 plan.py effects/rgb_split.sexp -p offset_x=20 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; offset_x: horizontal color channel offset in pixels (2 to 50, default: 20)
(recipe "rgb_split"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect rgb_split :path "sexp_effects/effects/rgb_split.sexp")
;; Default parameter (overridden by -p offset_x=N)
(def offset_x 20)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect rgb_split :offset_x offset_x)))
result)

21
effects/ripple.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Ripple effect - concentric wave distortion from center
;; Usage: python3 plan.py effects/ripple.sexp -p amplitude=20 -p frequency=6 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amplitude: ripple strength in pixels (5 to 50, default: 20)
;; frequency: number of ripples (1 to 20, default: 6)
(recipe "ripple"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect ripple :path "sexp_effects/effects/ripple.sexp")
;; Default parameters (overridden by -p amplitude=N -p frequency=N)
(def amplitude 20)
(def frequency 6)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect ripple :amplitude amplitude :frequency frequency)))
result)

110
effects/rotate.py Normal file
View File

@@ -0,0 +1,110 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect rotate
@version 1.0.0
@author artdag
@description
Rotates the frame by a specified angle. Supports two modes:
- Static: rotate by fixed angle
- Continuous: rotate by speed (degrees per frame), accumulating over time
The effect uses state to track cumulative rotation for smooth continuous motion.
@param angle float
@range -360 360
@default 0
Rotation angle in degrees. Positive = counter-clockwise.
@param speed float
@range -30 30
@default 0
Rotation speed in degrees per frame (for continuous rotation).
When non-zero, angle is ignored and rotation accumulates.
@param auto_zoom bool
@default false
If true, automatically zooms to crop out black corners.
@state cumulative_angle float
Tracks total rotation for continuous mode. Persists across frames.
@example
(effect rotate :angle 45)
@example
;; Continuous spin at 5 degrees per frame
(effect rotate :speed 5)
@example
;; Beat-reactive rotation
(effect rotate :angle (bind bass :range [0 90]))
"""
import numpy as np
import cv2
import math
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Rotate a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- angle: rotation angle in degrees (default 0)
- speed: rotation speed per frame for continuous mode (default 0)
- auto_zoom: zoom to hide black corners (default False)
state: Persistent state dict
- cumulative_angle: total rotation for continuous mode
Returns:
Tuple of (processed_frame, new_state)
"""
angle = params.get("angle", 0)
speed = params.get("speed", 0)
auto_zoom = params.get("auto_zoom", False)
# Initialize state
if state is None:
state = {}
# Handle continuous rotation mode
if speed != 0:
cumulative = state.get("cumulative_angle", 0)
cumulative += speed
state["cumulative_angle"] = cumulative
angle = cumulative
if angle == 0:
return frame, state
h, w = frame.shape[:2]
center = (w / 2, h / 2)
# Calculate zoom factor to hide black corners
scale = 1.0
if auto_zoom:
angle_rad = math.radians(abs(angle) % 90)
if w >= h:
scale = math.cos(angle_rad) + (w / h) * math.sin(angle_rad)
else:
scale = math.cos(angle_rad) + (h / w) * math.sin(angle_rad)
scale = max(1.0, scale)
# Get rotation matrix
rotation_matrix = cv2.getRotationMatrix2D(center, angle, scale)
# Apply rotation
result = cv2.warpAffine(
frame, rotation_matrix, (w, h),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0)
)
return result, state

20
effects/rotate.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Rotate effect - rotate image around center
;; Usage: python3 plan.py effects/rotate.sexp -p angle=15 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; angle: rotation in degrees (-180 to 180, default: 15)
;; positive = clockwise
(recipe "rotate"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect rotate :path "sexp_effects/effects/rotate.sexp")
;; Default parameter (overridden by -p angle=N)
(def angle 15)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect rotate :angle angle)))
result)

61
effects/saturation.py Normal file
View File

@@ -0,0 +1,61 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect saturation
@version 1.0.0
@author artdag
@description
Adjusts color saturation. 0 = grayscale, 1 = original, 2 = oversaturated.
Uses HSV color space for accurate saturation control.
@param factor float
@range 0 3
@default 1.0
Saturation multiplier. 0 = grayscale, 1 = unchanged, 2 = double saturation.
@example
(effect saturation :factor 0) ; grayscale
@example
(effect saturation :factor 1.5) ; vibrant colors
@example
;; Desaturate on quiet parts, saturate on loud
(effect saturation :factor (bind energy :range [0.5 2.0]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Adjust saturation of a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: saturation multiplier (default 1.0)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
if factor == 1.0:
return frame, state
# Convert RGB to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV).astype(np.float32)
# Scale saturation channel (index 1)
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * factor, 0, 255)
# Convert back to RGB
result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)
return result, state

20
effects/saturation.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Saturation effect - adjust color saturation
;; Usage: python3 plan.py effects/saturation.sexp -p amount=2.0 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: saturation multiplier (0.0 to 3.0, default: 2.0)
;; 0 = grayscale, 1 = normal, >1 = vivid colors
(recipe "saturation"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect saturation :path "sexp_effects/effects/saturation.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 2.0)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect saturation :amount amount)))
result)

116
effects/scanlines.py Normal file
View File

@@ -0,0 +1,116 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect scanlines
@version 1.0.0
@author artdag
@description
VHS/CRT-style scan line shift. Horizontally displaces alternating lines
to create analog video distortion. Great for retro/glitch aesthetics.
@param amplitude float
@range 0 100
@default 10
Maximum shift amount in pixels. Bind to bass for reactive glitch.
@param frequency float
@range 1 100
@default 10
Lines per cycle (affects pattern density).
@param randomness float
@range 0 1
@default 0.5
0 = regular sine pattern, 1 = fully random shifts.
@param line_gap int
@range 1 20
@default 1
Only shift every Nth line (1 = all lines).
@param seed int
@default 42
Random seed for deterministic patterns.
@state rng DeterministicRNG
Random number generator for reproducible results.
@example
(effect scanlines :amplitude 20)
@example
;; Heavy glitch on bass
(effect scanlines :amplitude (bind bass :range [0 50]) :randomness 0.8)
@example
;; Reproducible scanlines
(effect scanlines :amplitude 30 :randomness 0.7 :seed 999)
"""
import numpy as np
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply scan line shift to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amplitude: max shift in pixels (default 10)
- frequency: lines per cycle (default 10)
- randomness: 0-1 random vs sine (default 0.5)
- line_gap: shift every Nth line (default 1)
- seed: random seed (default 42)
state: Persistent state dict
- rng: DeterministicRNG instance
Returns:
Tuple of (processed_frame, new_state)
"""
amplitude = params.get("amplitude", 10)
frequency = params.get("frequency", 10) or 1
randomness = params.get("randomness", 0.5)
line_gap = max(1, int(params.get("line_gap", 1)))
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
if amplitude == 0:
return frame, state
h, w = frame.shape[:2]
result = frame.copy()
for y in range(0, h, line_gap):
# Calculate shift amount
if randomness >= 1.0:
shift = int(rng.uniform(-amplitude, amplitude))
elif randomness <= 0:
shift = int(amplitude * np.sin(2 * np.pi * y / frequency))
else:
sine_shift = amplitude * np.sin(2 * np.pi * y / frequency)
rand_shift = rng.uniform(-amplitude, amplitude)
shift = int(sine_shift * (1 - randomness) + rand_shift * randomness)
if shift != 0:
result[y] = np.roll(result[y], shift, axis=0)
return result, state

21
effects/scanlines.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Scanlines effect - horizontal line overlay
;; Usage: python3 plan.py effects/scanlines.sexp -p spacing=3 -p intensity=0.4 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; spacing: pixels between lines (1 to 8, default: 3)
;; intensity: line darkness (0 to 1, default: 0.4)
(recipe "scanlines"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect scanlines :path "sexp_effects/effects/scanlines.sexp")
;; Default parameters (overridden by -p spacing=N -p intensity=N)
(def spacing 3)
(def intensity 0.4)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect scanlines :spacing spacing :intensity intensity)))
result)

142
effects/scatter.py Normal file
View File

@@ -0,0 +1,142 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "scipy"]
# ///
"""
@effect scatter
@version 1.0.0
@author artdag
@description
Scatter effect. Randomly redistributes pixels in local area creating
a dissolving, dispersed look.
@param amount float
@range 0 100
@default 10
Scatter radius in pixels. Bind to energy for reactive dissolve.
@param randomize_per_frame bool
@default false
Different scatter pattern each frame (vs. static pattern).
@param grain float
@range 0 1
@default 0
Add film grain to scattered result.
@param seed int
@default 42
Random seed for reproducible patterns.
@state rng DeterministicRNG
Random number generator for displacement.
@state displacement_map ndarray
Cached displacement map for static mode.
@example
(effect scatter :amount 20)
@example
;; Reactive dissolve
(effect scatter :amount (bind energy :range [5 50]) :randomize_per_frame true)
"""
import numpy as np
from scipy import ndimage
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply scatter effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: scatter radius (default 10)
- randomize_per_frame: animate pattern (default False)
- grain: add film grain (default 0)
- seed: random seed (default 42)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
amount = int(np.clip(params.get("amount", 10), 0, 100))
randomize = params.get("randomize_per_frame", False)
grain = params.get("grain", 0)
seed = int(params.get("seed", 42))
if state is None:
state = {}
if amount < 1:
return frame, state
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
# Generate or reuse displacement map
last_size = state.get("last_size")
if randomize or "displacement_map" not in state or last_size != (h, w):
# Generate new displacement map
displacement = np.zeros((h, w, 2), dtype=np.float32)
for y in range(h):
for x in range(w):
displacement[y, x, 0] = rng.uniform(-amount, amount)
displacement[y, x, 1] = rng.uniform(-amount, amount)
state["displacement_map"] = displacement
state["last_size"] = (h, w)
displacement_map = state["displacement_map"]
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Apply displacement
new_y = y_coords + displacement_map[:, :, 0]
new_x = x_coords + displacement_map[:, :, 1]
# Clip to valid range
new_y = np.clip(new_y, 0, h - 1)
new_x = np.clip(new_x, 0, w - 1)
# Sample from displaced positions
result = np.zeros_like(frame)
for c in range(frame.shape[2] if len(frame.shape) > 2 else 1):
if len(frame.shape) > 2:
result[:, :, c] = ndimage.map_coordinates(
frame[:, :, c],
[new_y, new_x],
order=1,
mode='reflect'
)
else:
result = ndimage.map_coordinates(
frame,
[new_y, new_x],
order=1,
mode='reflect'
)
# Add grain
if grain > 0:
noise = np.array([[[rng.uniform(-grain * 20, grain * 20) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
result = np.clip(result.astype(np.float32) + noise, 0, 255).astype(np.uint8)
return result, state

14
effects/sepia.sexp Normal file
View File

@@ -0,0 +1,14 @@
;; Sepia effect - vintage brown tone
;; Usage: python3 plan.py effects/sepia.sexp | python3 execute.py - -d . -o output.mp4
;; No parameters
(recipe "sepia"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect sepia :path "sexp_effects/effects/sepia.sexp")
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect sepia)))
result)

63
effects/sharpen.py Normal file
View File

@@ -0,0 +1,63 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect sharpen
@version 1.0.0
@author artdag
@description
Sharpening effect using unsharp mask technique. Enhances edges and detail.
Great for making footage pop on beats.
@param amount float
@range 0 5
@default 1.0
Sharpening intensity. 0 = no change, 1 = normal, 2+ = aggressive.
@param radius float
@range 0 10
@default 1
Radius of sharpening (affects edge thickness).
@example
(effect sharpen :amount 1.5)
@example
;; Sharpen on beats
(effect sharpen :amount (bind bass :range [0.5 2.0]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply sharpening to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amount: sharpening intensity (default 1.0)
- radius: edge radius (default 1)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
amount = params.get("amount", 1.0)
radius = params.get("radius", 1)
if amount <= 0:
return frame, state
# Create blurred version
ksize = max(1, int(radius)) * 2 + 1
blurred = cv2.GaussianBlur(frame, (ksize, ksize), 0)
# Unsharp mask: original + amount * (original - blurred)
result = frame.astype(np.float32) + amount * (frame.astype(np.float32) - blurred.astype(np.float32))
return np.clip(result, 0, 255).astype(np.uint8), state

20
effects/sharpen.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Sharpen effect - enhance edge detail
;; Usage: python3 plan.py effects/sharpen.sexp -p amount=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amount: sharpening strength (0.5 to 5, default: 2)
;; higher = sharper edges
(recipe "sharpen"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect sharpen :path "sexp_effects/effects/sharpen.sexp")
;; Default parameter (overridden by -p amount=N)
(def amount 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect sharpen :amount amount)))
result)

233
effects/shatter.py Normal file
View File

@@ -0,0 +1,233 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "scipy"]
# ///
"""
@effect shatter
@version 1.0.0
@author artdag
@description
Shatter effect. Explodes the image into flying pieces that move
outward from a center point. Great for beat drops and transitions.
@param intensity float
@range 0 1
@default 0.5
Explosion force (0 = no effect, 1 = full explosion).
@param num_pieces int
@range 10 200
@default 50
Number of shatter pieces.
@param center_x float
@range 0 1
@default 0.5
Explosion center X position.
@param center_y float
@range 0 1
@default 0.5
Explosion center Y position.
@param rotation_speed float
@range 0 5
@default 1.0
How fast pieces rotate as they fly.
@param gravity float
@range 0 2
@default 0.3
Downward pull on pieces.
@param fade_out bool
@default true
Fade pieces as they fly away.
@param seed int
@default 42
Random seed for piece positions.
@state pieces list
List of piece positions and velocities.
@example
(effect shatter :intensity 0.7 :num_pieces 80)
@example
;; Beat-reactive explosion
(effect shatter :intensity (bind onset :range [0 1]) :gravity 0.5)
"""
import numpy as np
from scipy import ndimage
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply shatter effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
intensity = np.clip(params.get("intensity", 0.5), 0, 1)
num_pieces = max(10, min(int(params.get("num_pieces", 50)), 200))
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
rotation_speed = params.get("rotation_speed", 1.0)
gravity = params.get("gravity", 0.3)
fade_out = params.get("fade_out", True)
seed = int(params.get("seed", 42))
t = params.get("_time", 0)
if state is None:
state = {}
if intensity < 0.01:
return frame, state
h, w = frame.shape[:2]
cx, cy = int(center_x * w), int(center_y * h)
# Initialize pieces
if "pieces" not in state or len(state["pieces"]) != num_pieces:
state["pieces"] = _init_pieces(w, h, num_pieces, cx, cy, seed)
state["start_time"] = t
pieces = state["pieces"]
start_time = state.get("start_time", t)
effect_t = t - start_time
# Create output with black background
result = np.zeros_like(frame)
time_factor = effect_t * intensity * 3.0
# Draw each piece
for piece in pieces:
px, py, pw, ph, vx, vy, rot = piece
if pw <= 0 or ph <= 0:
continue
# Calculate current position with physics
curr_x = px + vx * time_factor * w * 0.5
curr_y = py + vy * time_factor * h * 0.5 + gravity * time_factor ** 2 * h * 0.2
curr_rot = rot * rotation_speed * time_factor * 180
# Calculate alpha (fade out over distance)
distance = np.sqrt((curr_x - px)**2 + (curr_y - py)**2)
alpha = 1.0 - (distance / max(w, h)) if fade_out else 1.0
alpha = max(0, min(1, alpha))
if alpha < 0.05:
continue
# Extract piece from original frame
px1, py1 = max(0, int(px)), max(0, int(py))
px2, py2 = min(w, int(px + pw)), min(h, int(py + ph))
if px2 <= px1 or py2 <= py1:
continue
piece_img = frame[py1:py2, px1:px2].copy()
# Rotate piece
if abs(curr_rot) > 1:
piece_img = ndimage.rotate(piece_img, curr_rot, reshape=False, mode='constant', cval=0)
# Calculate destination
dest_x = int(curr_x)
dest_y = int(curr_y)
piece_h, piece_w = piece_img.shape[:2]
# Clip to frame bounds
src_x1, src_y1 = 0, 0
src_x2, src_y2 = piece_w, piece_h
dst_x1, dst_y1 = dest_x, dest_y
dst_x2, dst_y2 = dest_x + piece_w, dest_y + piece_h
if dst_x1 < 0:
src_x1 = -dst_x1
dst_x1 = 0
if dst_y1 < 0:
src_y1 = -dst_y1
dst_y1 = 0
if dst_x2 > w:
src_x2 -= (dst_x2 - w)
dst_x2 = w
if dst_y2 > h:
src_y2 -= (dst_y2 - h)
dst_y2 = h
if dst_x2 <= dst_x1 or dst_y2 <= dst_y1:
continue
if src_x2 <= src_x1 or src_y2 <= src_y1:
continue
# Blend piece onto result
piece_region = piece_img[src_y1:src_y2, src_x1:src_x2]
if piece_region.size == 0:
continue
result_region = result[dst_y1:dst_y2, dst_x1:dst_x2]
if result_region.shape != piece_region.shape:
continue
result[dst_y1:dst_y2, dst_x1:dst_x2] = (
result_region * (1 - alpha) + piece_region * alpha
).astype(np.uint8)
# Blend with original based on intensity
final = ((1 - intensity) * frame + intensity * result).astype(np.uint8)
return final, state
def _init_pieces(w: int, h: int, num_pieces: int, cx: int, cy: int, seed: int) -> list:
"""Initialize shatter pieces with random positions and velocities."""
rng = DeterministicRNG(seed)
pieces = []
# Create grid of pieces
cols = max(1, int(np.sqrt(num_pieces * w / h)))
rows = max(1, int(num_pieces / cols))
piece_w = w // cols
piece_h = h // rows
for row in range(rows):
for col in range(cols):
px = col * piece_w
py = row * piece_h
pw = piece_w + (w % cols if col == cols - 1 else 0)
ph = piece_h + (h % rows if row == rows - 1 else 0)
# Velocity away from center
piece_cx = px + pw // 2
piece_cy = py + ph // 2
dx = piece_cx - cx
dy = piece_cy - cy
dist = max(1, np.sqrt(dx*dx + dy*dy))
vx = dx / dist + rng.uniform(-0.3, 0.3)
vy = dy / dist + rng.uniform(-0.3, 0.3)
rot = rng.uniform(-2, 2)
pieces.append((px, py, pw, ph, vx, vy, rot))
return pieces

90
effects/strobe.py Normal file
View File

@@ -0,0 +1,90 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect strobe
@version 1.0.0
@author artdag
@description
Strobe / Posterize Time effect. Locks video to a reduced frame rate,
creating a choppy, stop-motion look. Also known as frame hold.
@param frame_rate float
@range 1 60
@default 12
Target frame rate in fps. Lower = choppier.
@param sync_to_beat bool
@default false
If true, hold frames until next beat (overrides frame_rate).
@param beat_divisor int
@range 1 8
@default 1
Hold for 1/N beats when sync_to_beat is true.
@state held_frame ndarray
Currently held frame.
@state held_until float
Time until which to hold the frame.
@example
(effect strobe :frame_rate 8)
@example
;; Very choppy at 4 fps
(effect strobe :frame_rate 4)
@example
;; Beat-synced frame hold
(effect strobe :sync_to_beat true :beat_divisor 2)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply strobe/posterize time effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- frame_rate: target fps 1-60 (default 12)
- sync_to_beat: use beat timing (default False)
- beat_divisor: beat fraction (default 1)
state: Persistent state dict
- held_frame: currently held frame
- held_until: hold expiry time
Returns:
Tuple of (processed_frame, new_state)
"""
target_fps = max(1, min(params.get("frame_rate", 12), 60))
sync_to_beat = params.get("sync_to_beat", False)
beat_divisor = max(1, int(params.get("beat_divisor", 1)))
# Get current time from params (executor should provide this)
t = params.get("_time", 0)
if state is None:
state = {}
# Initialize state
if "held_frame" not in state:
state["held_frame"] = None
state["held_until"] = 0.0
state["last_beat"] = -1
# Frame rate based hold
frame_duration = 1.0 / target_fps
if t >= state["held_until"]:
# Time for new frame
state["held_frame"] = frame.copy()
state["held_until"] = t + frame_duration
return state["held_frame"] if state["held_frame"] is not None else frame, state

20
effects/strobe.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Strobe effect - flashing/blinking
;; Usage: python3 plan.py effects/strobe.sexp -p frequency=4 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; frequency: flashes per second (1 to 15, default: 4)
;; WARNING: high values may cause discomfort
(recipe "strobe"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect strobe :path "sexp_effects/effects/strobe.sexp")
;; Default parameter (overridden by -p frequency=N)
(def frequency 4)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect strobe :frequency frequency)))
result)

121
effects/swirl.py Normal file
View File

@@ -0,0 +1,121 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect swirl
@version 1.0.0
@author artdag
@description
Spiral/vortex distortion that twists the image around a center point.
Creates whirlpool-like effects. Great for psychedelic/hypnotic visuals.
@param strength float
@range -10 10
@default 1.0
Swirl strength in radians. Positive = counter-clockwise, negative = clockwise.
@param radius float
@range 0.1 2
@default 0.5
Effect radius as fraction of image size. Larger = wider swirl.
@param center_x float
@range 0 1
@default 0.5
Horizontal center of swirl (0 = left, 1 = right).
@param center_y float
@range 0 1
@default 0.5
Vertical center of swirl (0 = top, 1 = bottom).
@param falloff string
@enum linear quadratic gaussian
@default quadratic
How swirl strength decreases from center:
- linear: constant decrease
- quadratic: sharper center, softer edges
- gaussian: smooth bell curve
@example
(effect swirl :strength 2)
@example
;; Reactive swirl
(effect swirl :strength (bind energy :range [0 5]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply swirl distortion to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: swirl amount in radians (default 1.0)
- radius: effect radius as fraction (default 0.5)
- center_x: horizontal center 0-1 (default 0.5)
- center_y: vertical center 0-1 (default 0.5)
- falloff: linear/quadratic/gaussian (default quadratic)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
strength = params.get("strength", 1.0)
radius_frac = params.get("radius", 0.5)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
falloff = params.get("falloff", "quadratic")
if strength == 0:
return frame, state
h, w = frame.shape[:2]
# Calculate center and radius in pixels
cx = w * center_x
cy = h * center_y
radius = max(w, h) * radius_frac
# Create coordinate grids
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float64)
# Calculate distance and angle from center
dx = x_coords - cx
dy = y_coords - cy
dist = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(dy, dx)
# Normalized distance for falloff
norm_dist = dist / radius
# Calculate falloff factor
if falloff == "linear":
factor = np.maximum(0, 1 - norm_dist)
elif falloff == "gaussian":
factor = np.exp(-norm_dist**2 * 2)
else: # quadratic
factor = np.maximum(0, 1 - norm_dist**2)
# Apply swirl rotation
new_angle = angle + strength * factor
# Calculate new coordinates
new_x = (cx + dist * np.cos(new_angle)).astype(np.float32)
new_y = (cy + dist * np.sin(new_angle)).astype(np.float32)
# Remap
result = cv2.remap(
frame, new_x, new_y,
cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT
)
return result, state

20
effects/swirl.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Swirl effect - spiral distortion from center
;; Usage: python3 plan.py effects/swirl.sexp -p strength=3 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: swirl intensity (-10 to 10, default: 3)
;; positive = clockwise, negative = counter-clockwise
(recipe "swirl"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect swirl :path "sexp_effects/effects/swirl.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 3)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect swirl :strength strength)))
result)

85
effects/threshold.py Normal file
View File

@@ -0,0 +1,85 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect threshold
@version 1.0.0
@author artdag
@description
Threshold effect. Converts to high-contrast black and white.
Creates stark, graphic look by converting grayscale to pure
black/white based on a threshold value.
@param level int
@range 0 255
@default 128
Threshold level. Pixels above = white, below = black.
@param invert bool
@default false
Swap black and white.
@param color_mode string
@enum bw color
@default bw
Output mode:
- bw: pure black and white
- color: keep original colors where above threshold
@example
(effect threshold :level 100)
@example
;; Beat-reactive threshold
(effect threshold :level (bind bass :range [80 180]) :invert true)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply threshold effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- level: threshold 0-255 (default 128)
- invert: swap black/white (default False)
- color_mode: bw or color (default bw)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
level = int(np.clip(params.get("level", 128), 0, 255))
invert = params.get("invert", False)
color_mode = params.get("color_mode", "bw")
if state is None:
state = {}
# Convert to grayscale for threshold comparison
if len(frame.shape) == 3:
gray = np.mean(frame, axis=2)
else:
gray = frame
# Apply threshold
mask = gray > level
if invert:
mask = ~mask
if color_mode == "bw":
# Pure black and white
result = np.where(mask[:, :, np.newaxis], 255, 0).astype(np.uint8)
if len(frame.shape) == 3:
result = np.repeat(result, frame.shape[2], axis=2)
else:
# Keep original colors where above threshold
result = np.where(mask[:, :, np.newaxis], frame, 0).astype(np.uint8)
return result, state

20
effects/threshold.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Threshold effect - convert to pure black and white
;; Usage: python3 plan.py effects/threshold.sexp -p level=128 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; level: threshold cutoff (0 to 255, default: 128)
;; pixels below = black, above = white
(recipe "threshold"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect threshold :path "sexp_effects/effects/threshold.sexp")
;; Default parameter (overridden by -p level=N)
(def level 128)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect threshold :level level)))
result)

134
effects/tile_grid.py Normal file
View File

@@ -0,0 +1,134 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect tile_grid
@version 1.0.0
@author artdag
@description
Tile Grid effect. Repeats the frame in a grid pattern creating
a mosaic by tiling scaled-down copies. Great for psychedelic visuals.
@param rows int
@range 1 10
@default 2
Number of rows in grid.
@param cols int
@range 1 10
@default 2
Number of columns in grid.
@param gap int
@range 0 50
@default 0
Gap between tiles in pixels.
@param gap_color list
@default [0, 0, 0]
RGB color for gaps.
@param rotation_per_tile float
@range -180 180
@default 0
Rotation increment per tile in degrees.
@param alternate_flip bool
@default false
Flip alternating tiles horizontally.
@example
(effect tile_grid :rows 3 :cols 3)
@example
;; Rotating tiles
(effect tile_grid :rows 2 :cols 2 :rotation_per_tile 90)
@example
;; Beat-reactive grid
(effect tile_grid :rows (bind bass :range [2 6]) :cols (bind bass :range [2 6]))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply tile grid effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- rows: number of rows (default 2)
- cols: number of columns (default 2)
- gap: gap between tiles (default 0)
- gap_color: RGB tuple (default [0,0,0])
- rotation_per_tile: rotation increment (default 0)
- alternate_flip: flip alternating tiles (default False)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
rows = max(1, min(int(params.get("rows", 2)), 10))
cols = max(1, min(int(params.get("cols", 2)), 10))
gap = max(0, int(params.get("gap", 0)))
gap_color = params.get("gap_color", [0, 0, 0])
rotation_per_tile = params.get("rotation_per_tile", 0)
alternate_flip = params.get("alternate_flip", False)
if state is None:
state = {}
h, w = frame.shape[:2]
# Calculate tile size
tile_w = (w - gap * (cols - 1)) // cols
tile_h = (h - gap * (rows - 1)) // rows
if tile_w <= 0 or tile_h <= 0:
return frame, state
# Scale down the original frame to tile size
tile = cv2.resize(frame, (tile_w, tile_h), interpolation=cv2.INTER_LINEAR)
# Create result with gap color
if isinstance(gap_color, (list, tuple)) and len(gap_color) >= 3:
result = np.full((h, w, 3), gap_color[:3], dtype=np.uint8)
else:
result = np.zeros((h, w, 3), dtype=np.uint8)
# Place tiles
tile_idx = 0
for row in range(rows):
for col in range(cols):
y = row * (tile_h + gap)
x = col * (tile_w + gap)
current_tile = tile.copy()
# Apply rotation if specified
if rotation_per_tile != 0:
angle = rotation_per_tile * tile_idx
center = (tile_w // 2, tile_h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
current_tile = cv2.warpAffine(current_tile, M, (tile_w, tile_h),
borderMode=cv2.BORDER_REFLECT)
# Apply flip for alternating tiles
if alternate_flip and (row + col) % 2 == 1:
current_tile = cv2.flip(current_tile, 1)
# Place tile
y_end = min(y + tile_h, h)
x_end = min(x + tile_w, w)
tile_crop_h = y_end - y
tile_crop_w = x_end - x
result[y:y_end, x:x_end] = current_tile[:tile_crop_h, :tile_crop_w]
tile_idx += 1
return result, state

21
effects/tile_grid.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Tile grid effect - repeat image in grid
;; Usage: python3 plan.py effects/tile_grid.sexp -p rows=2 -p cols=2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; rows: number of rows (1 to 8, default: 2)
;; cols: number of columns (1 to 8, default: 2)
(recipe "tile_grid"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect tile_grid :path "sexp_effects/effects/tile_grid.sexp")
;; Default parameters (overridden by -p rows=N -p cols=N)
(def rows 2)
(def cols 2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect tile_grid :rows rows :cols cols)))
result)

95
effects/trails.py Normal file
View File

@@ -0,0 +1,95 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect trails
@version 1.0.0
@author artdag
@description
Trails effect. Creates persistent motion trails by blending current
frame with previous frames. Like echo but with configurable blend.
@param persistence float
@range 0 0.99
@default 0.8
How much of previous frame remains (0 = none, 0.99 = very long trails).
@param blend_mode string
@enum blend add screen lighten darken
@default blend
How to combine frames.
@param fade_color list
@default [0, 0, 0]
Color to fade toward.
@state trail_buffer ndarray
Accumulated trail buffer.
@example
(effect trails :persistence 0.85)
@example
;; Long bright trails
(effect trails :persistence 0.9 :blend_mode "add")
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply trails effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- persistence: trail length 0-0.99 (default 0.8)
- blend_mode: how to combine (default blend)
- fade_color: color to fade to (default black)
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
persistence = max(0, min(params.get("persistence", 0.8), 0.99))
blend_mode = params.get("blend_mode", "blend")
fade_color = params.get("fade_color", [0, 0, 0])
if state is None:
state = {}
# Initialize trail buffer
if "trail_buffer" not in state or state["trail_buffer"].shape != frame.shape:
state["trail_buffer"] = frame.astype(np.float32)
buffer = state["trail_buffer"]
current = frame.astype(np.float32)
# Get fade color
if isinstance(fade_color, (list, tuple)) and len(fade_color) >= 3:
fade = np.array(fade_color[:3], dtype=np.float32)
else:
fade = np.array([0, 0, 0], dtype=np.float32)
# Blend buffer toward fade color
faded_buffer = buffer * persistence + fade * (1 - persistence)
# Combine with current frame based on blend mode
if blend_mode == "add":
result = faded_buffer + current
elif blend_mode == "screen":
result = 255 - ((255 - faded_buffer) * (255 - current) / 255)
elif blend_mode == "lighten":
result = np.maximum(faded_buffer, current)
elif blend_mode == "darken":
result = np.minimum(faded_buffer, current)
else: # blend
result = faded_buffer + current * (1 - persistence)
# Update buffer
state["trail_buffer"] = np.clip(result, 0, 255)
return np.clip(result, 0, 255).astype(np.uint8), state

20
effects/trails.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Trails effect - motion blur/persistence
;; Usage: python3 plan.py effects/trails.sexp -p persistence=0.7 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; persistence: trail strength (0.3 to 0.95, default: 0.7)
;; higher = longer trails
(recipe "trails"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect trails :path "sexp_effects/effects/trails.sexp")
;; Default parameter (overridden by -p persistence=N)
(def persistence 0.7)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect trails :persistence persistence)))
result)

163
effects/vhs.py Normal file
View File

@@ -0,0 +1,163 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect vhs
@version 1.0.0
@author artdag
@description
VHS / Analog Video effect. Complete VHS tape simulation combining
tracking errors, color bleeding, noise, and scan line distortion.
@param tracking_error float
@range 0 50
@default 5
Horizontal displacement amount. Sync to onset for glitches.
@param color_bleed int
@range 0 20
@default 3
Horizontal color smearing (typical VHS artifact).
@param noise_intensity float
@range 0 1
@default 0.2
Static noise amount.
@param chroma_shift int
@range 0 15
@default 2
Color channel offset (VHS color alignment issues).
@param head_switching bool
@default true
Bottom-of-frame distortion.
@param tape_crease_prob float
@range 0 0.5
@default 0.05
Probability of random tape crease distortion per frame.
@param blur_amount float
@range 0 5
@default 1
VHS softness blur.
@param seed int
@default 42
Random seed for deterministic artifacts.
@state rng DeterministicRNG
Random number generator for artifacts.
@example
(effect vhs :tracking_error 10 :color_bleed 5)
@example
;; Reactive VHS glitch
(effect vhs :tracking_error (bind onset :range [0 30]) :tape_crease_prob 0.1)
"""
import numpy as np
import cv2
from pathlib import Path
import sys
# Import DeterministicRNG from same directory
_effects_dir = Path(__file__).parent
if str(_effects_dir) not in sys.path:
sys.path.insert(0, str(_effects_dir))
from random import DeterministicRNG
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply VHS effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
tracking_error = params.get("tracking_error", 5)
color_bleed = int(params.get("color_bleed", 3))
noise_intensity = params.get("noise_intensity", 0.2)
chroma_shift = int(params.get("chroma_shift", 2))
head_switching = params.get("head_switching", True)
tape_crease_prob = params.get("tape_crease_prob", 0.05)
blur_amount = params.get("blur_amount", 1)
seed = int(params.get("seed", 42))
if state is None:
state = {}
# Initialize RNG
if "rng" not in state:
state["rng"] = DeterministicRNG(seed)
rng = state["rng"]
h, w = frame.shape[:2]
result = frame.astype(np.float32).copy()
# Apply slight blur (VHS softness)
if blur_amount > 0:
ksize = int(blur_amount * 2) * 2 + 1
result = cv2.GaussianBlur(result, (ksize, 1), 0)
# Apply color bleed (horizontal color smearing)
if color_bleed > 0:
ksize = color_bleed * 2 + 1
result[:, :, 0] = cv2.blur(result[:, :, 0], (ksize, 1))
result[:, :, 2] = cv2.blur(result[:, :, 2], (ksize, 1))
# Apply chroma shift (color channel misalignment)
if chroma_shift > 0:
shifted = np.zeros_like(result)
M_r = np.float32([[1, 0, chroma_shift], [0, 1, 0]])
M_b = np.float32([[1, 0, -chroma_shift], [0, 1, 0]])
shifted[:, :, 0] = cv2.warpAffine(result[:, :, 0], M_r, (w, h), borderMode=cv2.BORDER_REPLICATE)
shifted[:, :, 1] = result[:, :, 1]
shifted[:, :, 2] = cv2.warpAffine(result[:, :, 2], M_b, (w, h), borderMode=cv2.BORDER_REPLICATE)
result = shifted
# Apply tracking error (horizontal line displacement)
if tracking_error > 0:
for y in range(h):
sine_shift = np.sin(y * 0.05)
rand_shift = rng.uniform(-0.3, 0.3)
displacement = int(tracking_error * (sine_shift + rand_shift))
if displacement != 0:
result[y] = np.roll(result[y], displacement, axis=0)
# Apply tape crease (random distortion bands)
if tape_crease_prob > 0 and rng.uniform() < tape_crease_prob:
band_start = rng.randint(0, max(1, h - 20))
band_height = rng.randint(5, 20)
for y in range(band_start, min(band_start + band_height, h)):
displacement = rng.randint(-50, 50)
result[y] = np.roll(result[y], displacement, axis=0)
result[y] = result[y] * rng.uniform(0.5, 1.5)
# Apply head switching noise (bottom of frame distortion)
if head_switching:
switch_height = rng.randint(5, 15)
for y in range(h - switch_height, h):
factor = (y - (h - switch_height)) / switch_height
displacement = int(factor * 30 * rng.uniform(0.5, 1.5))
result[y] = np.roll(result[y], displacement, axis=0)
noise = np.array([[rng.gaussian(0, 20 * factor) for _ in range(3)] for _ in range(w)])
result[y] = result[y] + noise
# Apply static noise
if noise_intensity > 0:
noise = np.array([[[rng.gaussian(0, noise_intensity * 30) for _ in range(3)]
for _ in range(w)]
for _ in range(h)])
result = result + noise
return np.clip(result, 0, 255).astype(np.uint8), state

116
effects/vignette.py Normal file
View File

@@ -0,0 +1,116 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy"]
# ///
"""
@effect vignette
@version 1.0.0
@author artdag
@description
Vignette effect. Darkens the corners of the frame, drawing focus
to the center. Classic cinematic look.
@param strength float
@range 0 1
@default 0.5
How dark the corners get (0 = none, 1 = black corners).
@param radius float
@range 0.5 2
@default 1.0
Size of the bright center area. Smaller = more vignette.
@param softness float
@range 0.1 1
@default 0.5
How gradual the falloff is.
@param center_x float
@range 0 1
@default 0.5
Center X position.
@param center_y float
@range 0 1
@default 0.5
Center Y position.
@param color list
@default [0, 0, 0]
Vignette color (default black).
@example
(effect vignette :strength 0.6)
@example
;; Off-center vignette
(effect vignette :center_x 0.3 :center_y 0.3 :strength 0.7)
"""
import numpy as np
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply vignette effect to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- strength: darkness 0-1 (default 0.5)
- radius: center size 0.5-2 (default 1.0)
- softness: falloff gradient (default 0.5)
- center_x: center X 0-1 (default 0.5)
- center_y: center Y 0-1 (default 0.5)
- color: RGB tuple (default [0,0,0])
state: Persistent state dict
Returns:
Tuple of (processed_frame, new_state)
"""
strength = np.clip(params.get("strength", 0.5), 0, 1)
radius = max(0.5, min(params.get("radius", 1.0), 2))
softness = max(0.1, min(params.get("softness", 0.5), 1))
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
color = params.get("color", [0, 0, 0])
if state is None:
state = {}
if strength <= 0:
return frame, state
h, w = frame.shape[:2]
# Calculate center in pixels
cx = w * center_x
cy = h * center_y
# Create distance map from center
y_coords, x_coords = np.ogrid[:h, :w]
dist = np.sqrt((x_coords - cx)**2 + (y_coords - cy)**2)
# Normalize distance
max_dist = np.sqrt(cx**2 + cy**2) * radius
# Create vignette mask
normalized_dist = dist / max_dist
# Apply softness to the falloff
vignette_mask = 1 - np.clip((normalized_dist - (1 - softness)) / softness, 0, 1) * strength
# Apply vignette
if isinstance(color, (list, tuple)) and len(color) >= 3:
vignette_color = np.array(color[:3], dtype=np.float32)
else:
vignette_color = np.array([0, 0, 0], dtype=np.float32)
result = frame.astype(np.float32)
# Blend toward vignette color based on mask
for c in range(3):
result[:, :, c] = result[:, :, c] * vignette_mask + vignette_color[c] * (1 - vignette_mask)
return np.clip(result, 0, 255).astype(np.uint8), state

19
effects/vignette.sexp Normal file
View File

@@ -0,0 +1,19 @@
;; Vignette effect - darken edges
;; Usage: python3 plan.py effects/vignette.sexp -p strength=0.6 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; strength: edge darkening amount (0.1 to 1.0, default: 0.6)
(recipe "vignette"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect vignette :path "sexp_effects/effects/vignette.sexp")
;; Default parameter (overridden by -p strength=N)
(def strength 0.6)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect vignette :strength strength)))
result)

108
effects/wave.py Normal file
View File

@@ -0,0 +1,108 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect wave
@version 1.0.0
@author artdag
@description
Sine wave displacement distortion. Creates wavy, liquid-like warping.
Great for psychedelic and underwater effects synced to music.
@param amplitude float
@range 0 100
@default 10
Wave height in pixels. Bind to bass for punchy distortion.
@param wavelength float
@range 10 500
@default 50
Distance between wave peaks in pixels.
@param speed float
@range 0 10
@default 1
Wave animation speed. Uses state to track phase over time.
@param direction string
@enum horizontal vertical both
@default horizontal
Wave direction:
- horizontal: waves move left-right
- vertical: waves move up-down
- both: waves in both directions
@state phase float
Tracks wave animation phase across frames.
@example
(effect wave :amplitude 20 :wavelength 100)
@example
;; Bass-reactive waves
(effect wave :amplitude (bind bass :range [0 50] :transform sqrt))
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Apply wave distortion to a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- amplitude: wave height in pixels (default 10)
- wavelength: distance between peaks (default 50)
- speed: animation speed (default 1)
- direction: horizontal/vertical/both (default horizontal)
state: Persistent state dict
- phase: current wave phase
Returns:
Tuple of (processed_frame, new_state)
"""
amplitude = params.get("amplitude", 10)
wavelength = params.get("wavelength", 50)
speed = params.get("speed", 1)
direction = params.get("direction", "horizontal")
if state is None:
state = {}
if amplitude == 0:
return frame, state
h, w = frame.shape[:2]
# Update phase for animation
phase = state.get("phase", 0)
phase += speed * 0.1
state["phase"] = phase
# Create coordinate maps
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
if direction in ("horizontal", "both"):
# Horizontal waves: displace X based on Y
wave = np.sin(2 * np.pi * map_y / wavelength + phase) * amplitude
map_x = map_x + wave
if direction in ("vertical", "both"):
# Vertical waves: displace Y based on X
wave = np.sin(2 * np.pi * map_x / wavelength + phase) * amplitude
map_y = map_y + wave
# Apply distortion
result = cv2.remap(
frame, map_x, map_y,
cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT
)
return result, state

21
effects/wave.sexp Normal file
View File

@@ -0,0 +1,21 @@
;; Wave effect - sinusoidal distortion
;; Usage: python3 plan.py effects/wave.sexp -p amplitude=30 -p wavelength=60 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; amplitude: wave height in pixels (5 to 100, default: 30)
;; wavelength: wave period in pixels (20 to 200, default: 60)
(recipe "wave"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect wave :path "sexp_effects/effects/wave.sexp")
;; Default parameters (overridden by -p amplitude=N -p wavelength=N)
(def amplitude 30)
(def wavelength 60)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect wave :amplitude amplitude :wavelength wavelength)))
result)

91
effects/zoom.py Normal file
View File

@@ -0,0 +1,91 @@
# /// script
# requires-python = ">=3.10"
# dependencies = ["numpy", "opencv-python"]
# ///
"""
@effect zoom
@version 1.0.0
@author artdag
@description
Zooms into the center of the frame. Values > 1 zoom in (magnify),
values < 1 zoom out (shrink with black borders).
@param factor float
@range 0.1 5
@default 1.0
Zoom factor. 1 = unchanged, 2 = 2x magnification, 0.5 = half size.
@param center_x float
@range 0 1
@default 0.5
Horizontal center of zoom (0 = left, 1 = right).
@param center_y float
@range 0 1
@default 0.5
Vertical center of zoom (0 = top, 1 = bottom).
@example
(effect zoom :factor 1.5)
@example
;; Pulse zoom on bass
(effect zoom :factor (bind bass :range [1.0 1.5] :transform sqrt))
@example
;; Zoom to corner
(effect zoom :factor 2 :center_x 0 :center_y 0)
"""
import numpy as np
import cv2
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
"""
Zoom a video frame.
Args:
frame: Input frame as numpy array (H, W, 3) RGB uint8
params: Effect parameters
- factor: zoom multiplier (default 1.0)
- center_x: horizontal center 0-1 (default 0.5)
- center_y: vertical center 0-1 (default 0.5)
state: Persistent state dict (unused)
Returns:
Tuple of (processed_frame, new_state)
"""
factor = params.get("factor", 1.0)
center_x = params.get("center_x", 0.5)
center_y = params.get("center_y", 0.5)
if factor is None or factor <= 0.01:
factor = 1.0
if factor == 1.0:
return frame, state
h, w = frame.shape[:2]
# Calculate crop region for zoom in
new_w = int(w / factor)
new_h = int(h / factor)
if new_w <= 0 or new_h <= 0:
return frame, state
# Calculate crop offset based on center
x_start = int((w - new_w) * center_x)
y_start = int((h - new_h) * center_y)
# Clamp to valid range
x_start = max(0, min(x_start, w - new_w))
y_start = max(0, min(y_start, h - new_h))
# Crop and resize back to original dimensions
cropped = frame[y_start:y_start + new_h, x_start:x_start + new_w]
result = cv2.resize(cropped, (w, h), interpolation=cv2.INTER_LINEAR)
return result, state

20
effects/zoom.sexp Normal file
View File

@@ -0,0 +1,20 @@
;; Zoom effect - scale from center
;; Usage: python3 plan.py effects/zoom.sexp -p factor=1.2 | python3 execute.py - -d . -o output.mp4
;;
;; Parameters:
;; factor: zoom multiplier (0.5 to 3.0, default: 1.2)
;; <1 = zoom out, >1 = zoom in
(recipe "zoom"
:version "1.0"
:encoding (:codec "libx264" :crf 20 :preset "medium" :audio-codec "aac" :fps 30)
(effect zoom :path "sexp_effects/effects/zoom.sexp")
;; Default parameter (overridden by -p factor=N)
(def factor 1.2)
(def video (source :path "monday.webm"))
(def clip (-> video (segment :start 0 :duration 10)))
(def result (-> clip (effect zoom :factor factor)))
result)