Add S-expression based video effects pipeline with modular effect definitions, constructs, and recipe files. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
123 lines
3.1 KiB
Python
123 lines
3.1 KiB
Python
# /// script
|
|
# requires-python = ">=3.10"
|
|
# dependencies = ["numpy", "opencv-python"]
|
|
# ///
|
|
"""
|
|
@effect motion_blur
|
|
@version 1.0.0
|
|
@author artdag
|
|
|
|
@description
|
|
Motion blur effect. Creates directional blur simulating camera or
|
|
subject motion. Great for speed effects.
|
|
|
|
@param amount int
|
|
@range 1 100
|
|
@default 20
|
|
Blur length in pixels.
|
|
|
|
@param angle float
|
|
@range 0 360
|
|
@default 0
|
|
Blur direction in degrees (0 = horizontal right).
|
|
|
|
@param zoom bool
|
|
@default false
|
|
Radial zoom blur instead of directional.
|
|
|
|
@param zoom_center_x float
|
|
@range 0 1
|
|
@default 0.5
|
|
Zoom blur center X.
|
|
|
|
@param zoom_center_y float
|
|
@range 0 1
|
|
@default 0.5
|
|
Zoom blur center Y.
|
|
|
|
@example
|
|
(effect motion_blur :amount 30 :angle 45)
|
|
|
|
@example
|
|
;; Zoom blur
|
|
(effect motion_blur :amount 20 :zoom true)
|
|
"""
|
|
|
|
import numpy as np
|
|
import cv2
|
|
|
|
|
|
def process_frame(frame: np.ndarray, params: dict, state: dict) -> tuple:
|
|
"""
|
|
Apply motion blur effect to a video frame.
|
|
|
|
Args:
|
|
frame: Input frame as numpy array (H, W, 3) RGB uint8
|
|
params: Effect parameters
|
|
- amount: blur length (default 20)
|
|
- angle: blur direction (default 0)
|
|
- zoom: radial zoom blur (default False)
|
|
- zoom_center_x/y: zoom center (default 0.5)
|
|
state: Persistent state dict
|
|
|
|
Returns:
|
|
Tuple of (processed_frame, new_state)
|
|
"""
|
|
amount = max(1, int(params.get("amount", 20)))
|
|
angle = params.get("angle", 0)
|
|
zoom = params.get("zoom", False)
|
|
zoom_center_x = params.get("zoom_center_x", 0.5)
|
|
zoom_center_y = params.get("zoom_center_y", 0.5)
|
|
|
|
if state is None:
|
|
state = {}
|
|
|
|
if amount <= 1:
|
|
return frame, state
|
|
|
|
h, w = frame.shape[:2]
|
|
|
|
if zoom:
|
|
# Radial zoom blur
|
|
cx, cy = int(w * zoom_center_x), int(h * zoom_center_y)
|
|
|
|
# Accumulate multiple scaled versions
|
|
result = np.zeros_like(frame, dtype=np.float32)
|
|
num_samples = min(amount, 20)
|
|
|
|
for i in range(num_samples):
|
|
scale = 1.0 + (i / num_samples) * (amount / 100.0)
|
|
M = cv2.getRotationMatrix2D((cx, cy), 0, scale)
|
|
scaled = cv2.warpAffine(frame, M, (w, h), borderMode=cv2.BORDER_REFLECT)
|
|
result += scaled.astype(np.float32) / num_samples
|
|
|
|
return result.astype(np.uint8), state
|
|
else:
|
|
# Directional motion blur using a line kernel
|
|
angle_rad = np.deg2rad(angle)
|
|
dx = np.cos(angle_rad)
|
|
dy = np.sin(angle_rad)
|
|
|
|
# Create motion blur kernel
|
|
kernel_size = amount
|
|
kernel = np.zeros((kernel_size, kernel_size), dtype=np.float32)
|
|
|
|
# Draw a line in the kernel
|
|
cx, cy = kernel_size // 2, kernel_size // 2
|
|
for i in range(kernel_size):
|
|
t = (i - cx) / max(1, cx)
|
|
x = int(cx + t * cx * dx)
|
|
y = int(cy + t * cy * dy)
|
|
if 0 <= x < kernel_size and 0 <= y < kernel_size:
|
|
kernel[y, x] = 1
|
|
|
|
# Normalize kernel
|
|
kernel_sum = kernel.sum()
|
|
if kernel_sum > 0:
|
|
kernel /= kernel_sum
|
|
|
|
# Apply kernel
|
|
result = cv2.filter2D(frame, -1, kernel)
|
|
|
|
return result, state
|