Files
test/sexp_effects/primitives.py
gilesb 6ceaa37ab6 Add composable ASCII art with per-cell effects and explicit effect loading
Implements ascii_fx_zone effect that allows applying arbitrary sexp effects
to each character cell via cell_effect lambdas. Each cell is rendered as a
small image that effects can operate on.

Key changes:
- New ascii_fx_zone effect with cell_effect parameter for per-cell transforms
- Zone context (row, col, lum, sat, hue, etc.) available in cell_effect lambdas
- Effects are now loaded explicitly from recipe declarations, not auto-loaded
- Added effects_registry to plan for explicit effect dependency tracking
- Updated effect definition syntax across all sexp effects
- New run_staged.py for executing staged recipes
- Example recipes demonstrating alternating rotation and blur/rgb_split patterns

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-19 21:58:05 +00:00

3003 lines
98 KiB
Python

"""
Safe Primitives for S-Expression Effects
These are the building blocks that user-defined effects can use.
All primitives operate only on image data - no filesystem, network, etc.
"""
import numpy as np
import cv2
from typing import Any, Callable, Dict, List, Tuple, Optional
from dataclasses import dataclass
import math
@dataclass
class ZoneContext:
"""Context for a single cell/zone in ASCII art grid."""
row: int
col: int
row_norm: float # Normalized row position 0-1
col_norm: float # Normalized col position 0-1
luminance: float # Cell luminance 0-1
saturation: float # Cell saturation 0-1
hue: float # Cell hue 0-360
r: float # Red component 0-1
g: float # Green component 0-1
b: float # Blue component 0-1
class DeterministicRNG:
"""Seeded RNG for reproducible effects."""
def __init__(self, seed: int = 42):
self._rng = np.random.RandomState(seed)
def random(self, low: float = 0, high: float = 1) -> float:
return self._rng.uniform(low, high)
def randint(self, low: int, high: int) -> int:
return self._rng.randint(low, high + 1)
def gaussian(self, mean: float = 0, std: float = 1) -> float:
return self._rng.normal(mean, std)
# Global RNG instance (reset per frame with seed param)
_rng = DeterministicRNG()
def reset_rng(seed: int):
"""Reset the global RNG with a new seed."""
global _rng
_rng = DeterministicRNG(seed)
# =============================================================================
# Color Names (FFmpeg/X11 compatible)
# =============================================================================
NAMED_COLORS = {
# Basic colors
"black": (0, 0, 0),
"white": (255, 255, 255),
"red": (255, 0, 0),
"green": (0, 128, 0),
"blue": (0, 0, 255),
"yellow": (255, 255, 0),
"cyan": (0, 255, 255),
"magenta": (255, 0, 255),
# Grays
"gray": (128, 128, 128),
"grey": (128, 128, 128),
"darkgray": (169, 169, 169),
"darkgrey": (169, 169, 169),
"lightgray": (211, 211, 211),
"lightgrey": (211, 211, 211),
"dimgray": (105, 105, 105),
"dimgrey": (105, 105, 105),
"silver": (192, 192, 192),
# Reds
"darkred": (139, 0, 0),
"firebrick": (178, 34, 34),
"crimson": (220, 20, 60),
"indianred": (205, 92, 92),
"lightcoral": (240, 128, 128),
"salmon": (250, 128, 114),
"darksalmon": (233, 150, 122),
"lightsalmon": (255, 160, 122),
"tomato": (255, 99, 71),
"orangered": (255, 69, 0),
"coral": (255, 127, 80),
# Oranges
"orange": (255, 165, 0),
"darkorange": (255, 140, 0),
# Yellows
"gold": (255, 215, 0),
"lightyellow": (255, 255, 224),
"lemonchiffon": (255, 250, 205),
"papayawhip": (255, 239, 213),
"moccasin": (255, 228, 181),
"peachpuff": (255, 218, 185),
"palegoldenrod": (238, 232, 170),
"khaki": (240, 230, 140),
"darkkhaki": (189, 183, 107),
# Greens
"lime": (0, 255, 0),
"limegreen": (50, 205, 50),
"forestgreen": (34, 139, 34),
"darkgreen": (0, 100, 0),
"seagreen": (46, 139, 87),
"mediumseagreen": (60, 179, 113),
"springgreen": (0, 255, 127),
"mediumspringgreen": (0, 250, 154),
"lightgreen": (144, 238, 144),
"palegreen": (152, 251, 152),
"darkseagreen": (143, 188, 143),
"greenyellow": (173, 255, 47),
"chartreuse": (127, 255, 0),
"lawngreen": (124, 252, 0),
"olivedrab": (107, 142, 35),
"olive": (128, 128, 0),
"darkolivegreen": (85, 107, 47),
"yellowgreen": (154, 205, 50),
# Cyans/Teals
"aqua": (0, 255, 255),
"teal": (0, 128, 128),
"darkcyan": (0, 139, 139),
"lightcyan": (224, 255, 255),
"aquamarine": (127, 255, 212),
"mediumaquamarine": (102, 205, 170),
"paleturquoise": (175, 238, 238),
"turquoise": (64, 224, 208),
"mediumturquoise": (72, 209, 204),
"darkturquoise": (0, 206, 209),
"cadetblue": (95, 158, 160),
# Blues
"navy": (0, 0, 128),
"darkblue": (0, 0, 139),
"mediumblue": (0, 0, 205),
"royalblue": (65, 105, 225),
"cornflowerblue": (100, 149, 237),
"steelblue": (70, 130, 180),
"dodgerblue": (30, 144, 255),
"deepskyblue": (0, 191, 255),
"lightskyblue": (135, 206, 250),
"skyblue": (135, 206, 235),
"lightsteelblue": (176, 196, 222),
"lightblue": (173, 216, 230),
"powderblue": (176, 224, 230),
"slateblue": (106, 90, 205),
"mediumslateblue": (123, 104, 238),
"darkslateblue": (72, 61, 139),
"midnightblue": (25, 25, 112),
# Purples/Violets
"purple": (128, 0, 128),
"darkmagenta": (139, 0, 139),
"darkviolet": (148, 0, 211),
"blueviolet": (138, 43, 226),
"darkorchid": (153, 50, 204),
"mediumorchid": (186, 85, 211),
"orchid": (218, 112, 214),
"violet": (238, 130, 238),
"plum": (221, 160, 221),
"thistle": (216, 191, 216),
"lavender": (230, 230, 250),
"indigo": (75, 0, 130),
"mediumpurple": (147, 112, 219),
"fuchsia": (255, 0, 255),
"hotpink": (255, 105, 180),
"deeppink": (255, 20, 147),
"mediumvioletred": (199, 21, 133),
"palevioletred": (219, 112, 147),
# Pinks
"pink": (255, 192, 203),
"lightpink": (255, 182, 193),
"mistyrose": (255, 228, 225),
# Browns
"brown": (165, 42, 42),
"maroon": (128, 0, 0),
"saddlebrown": (139, 69, 19),
"sienna": (160, 82, 45),
"chocolate": (210, 105, 30),
"peru": (205, 133, 63),
"sandybrown": (244, 164, 96),
"burlywood": (222, 184, 135),
"tan": (210, 180, 140),
"rosybrown": (188, 143, 143),
"goldenrod": (218, 165, 32),
"darkgoldenrod": (184, 134, 11),
# Whites
"snow": (255, 250, 250),
"honeydew": (240, 255, 240),
"mintcream": (245, 255, 250),
"azure": (240, 255, 255),
"aliceblue": (240, 248, 255),
"ghostwhite": (248, 248, 255),
"whitesmoke": (245, 245, 245),
"seashell": (255, 245, 238),
"beige": (245, 245, 220),
"oldlace": (253, 245, 230),
"floralwhite": (255, 250, 240),
"ivory": (255, 255, 240),
"antiquewhite": (250, 235, 215),
"linen": (250, 240, 230),
"lavenderblush": (255, 240, 245),
"wheat": (245, 222, 179),
"cornsilk": (255, 248, 220),
"blanchedalmond": (255, 235, 205),
"bisque": (255, 228, 196),
"navajowhite": (255, 222, 173),
# Special
"transparent": (0, 0, 0), # Note: no alpha support, just black
}
def parse_color(color_spec: str) -> Optional[Tuple[int, int, int]]:
"""
Parse a color specification into RGB tuple.
Supports:
- Named colors: "red", "green", "lime", "navy", etc.
- Hex colors: "#FF0000", "#f00", "0xFF0000"
- Special modes: "color", "mono", "invert" return None (handled separately)
Returns:
RGB tuple (r, g, b) or None for special modes
"""
if color_spec is None:
return None
color_spec = str(color_spec).strip().lower()
# Special modes handled elsewhere
if color_spec in ("color", "mono", "invert"):
return None
# Check named colors
if color_spec in NAMED_COLORS:
return NAMED_COLORS[color_spec]
# Handle hex colors
hex_str = None
if color_spec.startswith("#"):
hex_str = color_spec[1:]
elif color_spec.startswith("0x"):
hex_str = color_spec[2:]
elif all(c in "0123456789abcdef" for c in color_spec) and len(color_spec) in (3, 6):
hex_str = color_spec
if hex_str:
try:
if len(hex_str) == 3:
# Short form: #RGB -> #RRGGBB
r = int(hex_str[0] * 2, 16)
g = int(hex_str[1] * 2, 16)
b = int(hex_str[2] * 2, 16)
return (r, g, b)
elif len(hex_str) == 6:
r = int(hex_str[0:2], 16)
g = int(hex_str[2:4], 16)
b = int(hex_str[4:6], 16)
return (r, g, b)
except ValueError:
pass
# Unknown color - default to None (will use original colors)
return None
# =============================================================================
# Image Primitives
# =============================================================================
def prim_width(img: np.ndarray) -> int:
"""Get image width."""
return img.shape[1]
def prim_height(img: np.ndarray) -> int:
"""Get image height."""
return img.shape[0]
def prim_make_image(w: int, h: int, color: List[int]) -> np.ndarray:
"""Create a new image filled with color."""
img = np.zeros((int(h), int(w), 3), dtype=np.uint8)
if color:
img[:, :] = color[:3]
return img
def prim_copy(img: np.ndarray) -> np.ndarray:
"""Copy an image."""
return img.copy()
def prim_pixel(img: np.ndarray, x: int, y: int) -> List[int]:
"""Get pixel at (x, y) as [r, g, b]."""
h, w = img.shape[:2]
x, y = int(x), int(y)
if 0 <= x < w and 0 <= y < h:
return list(img[y, x])
return [0, 0, 0]
def prim_set_pixel(img: np.ndarray, x: int, y: int, color: List[int]) -> np.ndarray:
"""Set pixel at (x, y). Returns modified image."""
h, w = img.shape[:2]
x, y = int(x), int(y)
if 0 <= x < w and 0 <= y < h:
img[y, x] = color[:3]
return img
def prim_sample(img: np.ndarray, x: float, y: float) -> List[float]:
"""Bilinear sample at float coordinates."""
h, w = img.shape[:2]
x = np.clip(x, 0, w - 1)
y = np.clip(y, 0, h - 1)
x0, y0 = int(x), int(y)
x1, y1 = min(x0 + 1, w - 1), min(y0 + 1, h - 1)
fx, fy = x - x0, y - y0
c00 = img[y0, x0].astype(float)
c10 = img[y0, x1].astype(float)
c01 = img[y1, x0].astype(float)
c11 = img[y1, x1].astype(float)
c = (c00 * (1 - fx) * (1 - fy) +
c10 * fx * (1 - fy) +
c01 * (1 - fx) * fy +
c11 * fx * fy)
return list(c)
def prim_channel(img: np.ndarray, c: int) -> np.ndarray:
"""Extract a single channel as 2D array."""
return img[:, :, int(c)].copy()
def prim_merge_channels(r: np.ndarray, g: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Merge three channels into RGB image."""
return np.stack([r, g, b], axis=-1).astype(np.uint8)
def prim_resize(img: np.ndarray, w: int, h: int, mode: str = "linear") -> np.ndarray:
"""Resize image. Mode: linear, nearest, area."""
w, h = int(w), int(h)
if w < 1 or h < 1:
return img
interp = {
"linear": cv2.INTER_LINEAR,
"nearest": cv2.INTER_NEAREST,
"area": cv2.INTER_AREA,
}.get(mode, cv2.INTER_LINEAR)
return cv2.resize(img, (w, h), interpolation=interp)
def prim_crop(img: np.ndarray, x: int, y: int, w: int, h: int) -> np.ndarray:
"""Crop a region from image."""
ih, iw = img.shape[:2]
x, y, w, h = int(x), int(y), int(w), int(h)
x = max(0, min(x, iw))
y = max(0, min(y, ih))
w = max(0, min(w, iw - x))
h = max(0, min(h, ih - y))
return img[y:y + h, x:x + w].copy()
def prim_paste(dst: np.ndarray, src: np.ndarray, x: int, y: int) -> np.ndarray:
"""Paste src onto dst at position (x, y)."""
dh, dw = dst.shape[:2]
sh, sw = src.shape[:2]
x, y = int(x), int(y)
# Calculate valid regions
sx1 = max(0, -x)
sy1 = max(0, -y)
sx2 = min(sw, dw - x)
sy2 = min(sh, dh - y)
dx1 = max(0, x)
dy1 = max(0, y)
dx2 = dx1 + (sx2 - sx1)
dy2 = dy1 + (sy2 - sy1)
if dx2 > dx1 and dy2 > dy1:
dst[dy1:dy2, dx1:dx2] = src[sy1:sy2, sx1:sx2]
return dst
# =============================================================================
# Color Primitives
# =============================================================================
def prim_rgb(r: float, g: float, b: float) -> List[int]:
"""Create RGB color."""
return [int(np.clip(r, 0, 255)),
int(np.clip(g, 0, 255)),
int(np.clip(b, 0, 255))]
def prim_red(c: List[int]) -> int:
return c[0] if c else 0
def prim_green(c: List[int]) -> int:
return c[1] if len(c) > 1 else 0
def prim_blue(c: List[int]) -> int:
return c[2] if len(c) > 2 else 0
def prim_luminance(c: List[int]) -> float:
"""Calculate luminance (grayscale value)."""
if not c:
return 0
return 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]
def prim_rgb_to_hsv(c: List[int]) -> List[float]:
"""Convert RGB to HSV."""
r, g, b = c[0] / 255, c[1] / 255, c[2] / 255
mx, mn = max(r, g, b), min(r, g, b)
diff = mx - mn
if diff == 0:
h = 0
elif mx == r:
h = (60 * ((g - b) / diff) + 360) % 360
elif mx == g:
h = (60 * ((b - r) / diff) + 120) % 360
else:
h = (60 * ((r - g) / diff) + 240) % 360
s = 0 if mx == 0 else diff / mx
v = mx
return [h, s * 100, v * 100]
def prim_hsv_to_rgb(hsv: List[float]) -> List[int]:
"""Convert HSV to RGB."""
h, s, v = hsv[0], hsv[1] / 100, hsv[2] / 100
c = v * s
x = c * (1 - abs((h / 60) % 2 - 1))
m = v - c
if h < 60:
r, g, b = c, x, 0
elif h < 120:
r, g, b = x, c, 0
elif h < 180:
r, g, b = 0, c, x
elif h < 240:
r, g, b = 0, x, c
elif h < 300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return [int((r + m) * 255), int((g + m) * 255), int((b + m) * 255)]
def prim_blend_color(c1: List[int], c2: List[int], alpha: float) -> List[int]:
"""Blend two colors."""
alpha = np.clip(alpha, 0, 1)
return [int(c1[i] * (1 - alpha) + c2[i] * alpha) for i in range(3)]
def prim_average_color(img: np.ndarray) -> List[int]:
"""Get average color of image/region."""
return [int(x) for x in img.mean(axis=(0, 1))]
# =============================================================================
# Image Operations (Bulk)
# =============================================================================
def prim_map_pixels(img: np.ndarray, fn: Callable) -> np.ndarray:
"""Apply function to each pixel: fn(x, y, [r,g,b]) -> [r,g,b]."""
result = img.copy()
h, w = img.shape[:2]
for y in range(h):
for x in range(w):
color = list(img[y, x])
new_color = fn(x, y, color)
if new_color is not None:
result[y, x] = new_color[:3]
return result
def prim_map_rows(img: np.ndarray, fn: Callable) -> np.ndarray:
"""Apply function to each row: fn(y, row) -> row."""
result = img.copy()
h = img.shape[0]
for y in range(h):
row = img[y].copy()
new_row = fn(y, row)
if new_row is not None:
result[y] = new_row
return result
def prim_for_grid(img: np.ndarray, cell_size: int, fn: Callable) -> np.ndarray:
"""Iterate over grid cells: fn(gx, gy, cell_img) for side effects."""
cell_size = max(1, int(cell_size))
h, w = img.shape[:2]
rows = h // cell_size
cols = w // cell_size
for gy in range(rows):
for gx in range(cols):
y, x = gy * cell_size, gx * cell_size
cell = img[y:y + cell_size, x:x + cell_size]
fn(gx, gy, cell)
return img
def prim_fold_pixels(img: np.ndarray, init: Any, fn: Callable) -> Any:
"""Fold over pixels: fn(acc, x, y, color) -> acc."""
acc = init
h, w = img.shape[:2]
for y in range(h):
for x in range(w):
color = list(img[y, x])
acc = fn(acc, x, y, color)
return acc
# =============================================================================
# Convolution / Filters
# =============================================================================
def prim_convolve(img: np.ndarray, kernel: List[List[float]]) -> np.ndarray:
"""Apply convolution kernel."""
k = np.array(kernel, dtype=np.float32)
return cv2.filter2D(img, -1, k)
def prim_blur(img: np.ndarray, radius: int) -> np.ndarray:
"""Gaussian blur."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.GaussianBlur(img, (ksize, ksize), 0)
def prim_box_blur(img: np.ndarray, radius: int) -> np.ndarray:
"""Box blur (faster than Gaussian)."""
radius = max(1, int(radius))
ksize = radius * 2 + 1
return cv2.blur(img, (ksize, ksize))
def prim_edges(img: np.ndarray, low: int = 50, high: int = 150) -> np.ndarray:
"""Canny edge detection, returns grayscale edges."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, int(low), int(high))
return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
def prim_sobel(img: np.ndarray) -> np.ndarray:
"""Sobel edge detection."""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).astype(np.float32)
sx = cv2.Sobel(gray, cv2.CV_32F, 1, 0)
sy = cv2.Sobel(gray, cv2.CV_32F, 0, 1)
magnitude = np.sqrt(sx ** 2 + sy ** 2)
magnitude = np.clip(magnitude, 0, 255).astype(np.uint8)
return cv2.cvtColor(magnitude, cv2.COLOR_GRAY2RGB)
def prim_dilate(img: np.ndarray, size: int = 1) -> np.ndarray:
"""Morphological dilation."""
kernel = np.ones((size, size), np.uint8)
return cv2.dilate(img, kernel, iterations=1)
def prim_erode(img: np.ndarray, size: int = 1) -> np.ndarray:
"""Morphological erosion."""
kernel = np.ones((size, size), np.uint8)
return cv2.erode(img, kernel, iterations=1)
# =============================================================================
# Geometric Transforms
# =============================================================================
def prim_translate(img: np.ndarray, dx: float, dy: float) -> np.ndarray:
"""Translate image."""
h, w = img.shape[:2]
M = np.float32([[1, 0, dx], [0, 1, dy]])
return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT)
def prim_rotate(img: np.ndarray, angle: float, cx: float = None, cy: float = None) -> np.ndarray:
"""Rotate image around center."""
h, w = img.shape[:2]
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
M = cv2.getRotationMatrix2D((cx, cy), angle, 1.0)
return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT)
def prim_scale(img: np.ndarray, sx: float, sy: float, cx: float = None, cy: float = None) -> np.ndarray:
"""Scale image around center."""
h, w = img.shape[:2]
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
M = np.float32([
[sx, 0, cx * (1 - sx)],
[0, sy, cy * (1 - sy)]
])
return cv2.warpAffine(img, M, (w, h), borderMode=cv2.BORDER_REFLECT)
def prim_flip_h(img: np.ndarray) -> np.ndarray:
"""Flip horizontally."""
return cv2.flip(img, 1)
def prim_flip_v(img: np.ndarray) -> np.ndarray:
"""Flip vertically."""
return cv2.flip(img, 0)
def prim_remap(img: np.ndarray, map_x: np.ndarray, map_y: np.ndarray) -> np.ndarray:
"""Remap using coordinate maps."""
return cv2.remap(img, map_x.astype(np.float32), map_y.astype(np.float32),
cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
def prim_make_coords(w: int, h: int) -> Tuple[np.ndarray, np.ndarray]:
"""Create coordinate grid (map_x, map_y)."""
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
return map_x, map_y
# =============================================================================
# Blending
# =============================================================================
def prim_blend_images(a: np.ndarray, b: np.ndarray, alpha: float) -> np.ndarray:
"""Blend two images. Auto-resizes b to match a if sizes differ."""
alpha = np.clip(alpha, 0, 1)
# Auto-resize b to match a if different sizes
if a.shape[:2] != b.shape[:2]:
b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR)
return (a.astype(float) * (1 - alpha) + b.astype(float) * alpha).astype(np.uint8)
def prim_blend_mode(a: np.ndarray, b: np.ndarray, mode: str) -> np.ndarray:
"""Blend with various modes: add, multiply, screen, overlay, difference.
Auto-resizes b to match a if sizes differ."""
# Auto-resize b to match a if different sizes
if a.shape[:2] != b.shape[:2]:
b = cv2.resize(b, (a.shape[1], a.shape[0]), interpolation=cv2.INTER_LINEAR)
af = a.astype(float) / 255
bf = b.astype(float) / 255
if mode == "add":
result = af + bf
elif mode == "multiply":
result = af * bf
elif mode == "screen":
result = 1 - (1 - af) * (1 - bf)
elif mode == "overlay":
mask = af < 0.5
result = np.where(mask, 2 * af * bf, 1 - 2 * (1 - af) * (1 - bf))
elif mode == "difference":
result = np.abs(af - bf)
elif mode == "lighten":
result = np.maximum(af, bf)
elif mode == "darken":
result = np.minimum(af, bf)
else:
result = af
return (np.clip(result, 0, 1) * 255).astype(np.uint8)
def prim_mask(img: np.ndarray, mask_img: np.ndarray) -> np.ndarray:
"""Apply grayscale mask to image."""
if len(mask_img.shape) == 3:
mask = cv2.cvtColor(mask_img, cv2.COLOR_RGB2GRAY)
else:
mask = mask_img
mask_f = mask.astype(float) / 255
result = img.astype(float) * mask_f[:, :, np.newaxis]
return result.astype(np.uint8)
# =============================================================================
# Drawing
# =============================================================================
# Simple font (5x7 bitmap characters)
FONT_5X7 = {
' ': [0, 0, 0, 0, 0, 0, 0],
'.': [0, 0, 0, 0, 0, 0, 4],
':': [0, 0, 4, 0, 4, 0, 0],
'-': [0, 0, 0, 14, 0, 0, 0],
'=': [0, 0, 14, 0, 14, 0, 0],
'+': [0, 4, 4, 31, 4, 4, 0],
'*': [0, 4, 21, 14, 21, 4, 0],
'#': [10, 31, 10, 10, 31, 10, 0],
'%': [19, 19, 4, 8, 25, 25, 0],
'@': [14, 17, 23, 21, 23, 16, 14],
'0': [14, 17, 19, 21, 25, 17, 14],
'1': [4, 12, 4, 4, 4, 4, 14],
'2': [14, 17, 1, 2, 4, 8, 31],
'3': [31, 2, 4, 2, 1, 17, 14],
'4': [2, 6, 10, 18, 31, 2, 2],
'5': [31, 16, 30, 1, 1, 17, 14],
'6': [6, 8, 16, 30, 17, 17, 14],
'7': [31, 1, 2, 4, 8, 8, 8],
'8': [14, 17, 17, 14, 17, 17, 14],
'9': [14, 17, 17, 15, 1, 2, 12],
}
# Add uppercase letters
for i, c in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
FONT_5X7[c] = [0] * 7 # Placeholder
def prim_draw_char(img: np.ndarray, char: str, x: int, y: int,
size: int, color: List[int]) -> np.ndarray:
"""Draw a character at position."""
# Use OpenCV's built-in font for simplicity
font = cv2.FONT_HERSHEY_SIMPLEX
scale = size / 20.0
thickness = max(1, int(size / 10))
cv2.putText(img, char, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness)
return img
def prim_draw_text(img: np.ndarray, text: str, x: int, y: int,
size: int, color: List[int]) -> np.ndarray:
"""Draw text at position."""
font = cv2.FONT_HERSHEY_SIMPLEX
scale = size / 20.0
thickness = max(1, int(size / 10))
cv2.putText(img, text, (int(x), int(y + size)), font, scale, tuple(color[:3]), thickness)
return img
def prim_fill_rect(img: np.ndarray, x: int, y: int, w: int, h: int,
color: List[int]) -> np.ndarray:
"""Fill rectangle."""
x, y, w, h = int(x), int(y), int(w), int(h)
img[y:y + h, x:x + w] = color[:3]
return img
def prim_draw_line(img: np.ndarray, x1: int, y1: int, x2: int, y2: int,
color: List[int], thickness: int = 1) -> np.ndarray:
"""Draw line."""
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), tuple(color[:3]), int(thickness))
return img
# =============================================================================
# Math Primitives
# =============================================================================
def prim_sin(x: float) -> float:
return math.sin(x)
def prim_cos(x: float) -> float:
return math.cos(x)
def prim_tan(x: float) -> float:
return math.tan(x)
def prim_atan2(y: float, x: float) -> float:
return math.atan2(y, x)
def prim_sqrt(x: float) -> float:
return math.sqrt(max(0, x))
def prim_pow(x: float, y: float) -> float:
return math.pow(x, y)
def prim_abs(x: float) -> float:
return abs(x)
def prim_floor(x: float) -> int:
return int(math.floor(x))
def prim_ceil(x: float) -> int:
return int(math.ceil(x))
def prim_round(x: float) -> int:
return int(round(x))
def prim_min(*args) -> float:
return min(args)
def prim_max(*args) -> float:
return max(args)
def prim_clamp(x: float, lo: float, hi: float) -> float:
return max(lo, min(hi, x))
def prim_lerp(a: float, b: float, t: float) -> float:
"""Linear interpolation."""
return a + (b - a) * t
def prim_mod(a: float, b: float) -> float:
return a % b
def prim_random(lo: float = 0, hi: float = 1) -> float:
"""Random number from global RNG."""
return _rng.random(lo, hi)
def prim_randint(lo: int, hi: int) -> int:
"""Random integer from global RNG."""
return _rng.randint(lo, hi)
def prim_gaussian(mean: float = 0, std: float = 1) -> float:
"""Gaussian random from global RNG."""
return _rng.gaussian(mean, std)
def prim_assert(condition, message: str = "Assertion failed"):
"""Assert that condition is true, raise error with message if false."""
if not condition:
raise RuntimeError(f"Assertion error: {message}")
return True
# =============================================================================
# Array/List Primitives
# =============================================================================
def prim_length(seq) -> int:
return len(seq)
def prim_nth(seq, i: int):
i = int(i)
if 0 <= i < len(seq):
return seq[i]
return None
def prim_first(seq):
return seq[0] if seq else None
def prim_rest(seq):
return seq[1:] if seq else []
def prim_take(seq, n: int):
return seq[:int(n)]
def prim_drop(seq, n: int):
return seq[int(n):]
def prim_cons(x, seq):
return [x] + list(seq)
def prim_append(*seqs):
result = []
for s in seqs:
result.extend(s)
return result
def prim_reverse(seq):
return list(reversed(seq))
def prim_range(start: int, end: int, step: int = 1) -> List[int]:
return list(range(int(start), int(end), int(step)))
def prim_roll(arr: np.ndarray, shift: int, axis: int = 0) -> np.ndarray:
"""Circular roll of array."""
return np.roll(arr, int(shift), axis=int(axis))
def prim_list(*args) -> list:
"""Create a list."""
return list(args)
# =============================================================================
# Primitive Registry
# =============================================================================
def prim_add(*args):
return sum(args)
def prim_sub(a, b=None):
if b is None:
return -a # Unary negation
return a - b
def prim_mul(*args):
result = 1
for x in args:
result *= x
return result
def prim_div(a, b):
return a / b if b != 0 else 0
def prim_lt(a, b):
return a < b
def prim_gt(a, b):
return a > b
def prim_le(a, b):
return a <= b
def prim_ge(a, b):
return a >= b
def prim_eq(a, b):
# Handle None/nil comparisons with numpy arrays
if a is None:
return b is None
if b is None:
return a is None
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
return np.array_equal(a, b)
return False # array vs non-array
return a == b
def prim_ne(a, b):
return not prim_eq(a, b)
# =============================================================================
# Vectorized Bulk Operations (true primitives for composing effects)
# =============================================================================
def prim_color_matrix(img: np.ndarray, matrix: List[List[float]]) -> np.ndarray:
"""Apply a 3x3 color transformation matrix to all pixels."""
m = np.array(matrix, dtype=np.float32)
result = img.astype(np.float32) @ m.T
return np.clip(result, 0, 255).astype(np.uint8)
def prim_adjust(img: np.ndarray, brightness: float = 0, contrast: float = 1) -> np.ndarray:
"""Adjust brightness and contrast. Brightness: -255 to 255, Contrast: 0 to 3+."""
result = (img.astype(np.float32) - 128) * contrast + 128 + brightness
return np.clip(result, 0, 255).astype(np.uint8)
def prim_mix_gray(img: np.ndarray, amount: float) -> np.ndarray:
"""Mix image with its grayscale version. 0=original, 1=grayscale."""
gray = 0.299 * img[:, :, 0] + 0.587 * img[:, :, 1] + 0.114 * img[:, :, 2]
gray_rgb = np.stack([gray, gray, gray], axis=-1)
result = img.astype(np.float32) * (1 - amount) + gray_rgb * amount
return np.clip(result, 0, 255).astype(np.uint8)
def prim_invert_img(img: np.ndarray) -> np.ndarray:
"""Invert all pixel values."""
return (255 - img).astype(np.uint8)
def prim_add_noise(img: np.ndarray, amount: float) -> np.ndarray:
"""Add gaussian noise to image."""
noise = _rng._rng.normal(0, amount, img.shape)
result = img.astype(np.float32) + noise
return np.clip(result, 0, 255).astype(np.uint8)
def prim_quantize(img: np.ndarray, levels: int) -> np.ndarray:
"""Reduce to N color levels per channel."""
levels = max(2, int(levels))
factor = 256 / levels
result = (img // factor) * factor + factor // 2
return np.clip(result, 0, 255).astype(np.uint8)
def prim_shift_hsv(img: np.ndarray, h: float = 0, s: float = 1, v: float = 1) -> np.ndarray:
"""Shift HSV: h=degrees offset, s/v=multipliers."""
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV).astype(np.float32)
hsv[:, :, 0] = (hsv[:, :, 0] + h / 2) % 180
hsv[:, :, 1] = np.clip(hsv[:, :, 1] * s, 0, 255)
hsv[:, :, 2] = np.clip(hsv[:, :, 2] * v, 0, 255)
return cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2RGB)
# =============================================================================
# Array Math Primitives (vectorized operations on coordinate arrays)
# =============================================================================
def prim_arr_add(a: np.ndarray, b) -> np.ndarray:
"""Element-wise addition. b can be array or scalar."""
return (np.asarray(a) + np.asarray(b)).astype(np.float32)
def prim_arr_sub(a: np.ndarray, b) -> np.ndarray:
"""Element-wise subtraction. b can be array or scalar."""
return (np.asarray(a) - np.asarray(b)).astype(np.float32)
def prim_arr_mul(a: np.ndarray, b) -> np.ndarray:
"""Element-wise multiplication. b can be array or scalar."""
return (np.asarray(a) * np.asarray(b)).astype(np.float32)
def prim_arr_div(a: np.ndarray, b) -> np.ndarray:
"""Element-wise division. b can be array or scalar."""
b = np.asarray(b)
# Avoid division by zero
with np.errstate(divide='ignore', invalid='ignore'):
result = np.asarray(a) / np.where(b == 0, 1e-10, b)
return result.astype(np.float32)
def prim_arr_mod(a: np.ndarray, b) -> np.ndarray:
"""Element-wise modulo."""
return (np.asarray(a) % np.asarray(b)).astype(np.float32)
def prim_arr_sin(a: np.ndarray) -> np.ndarray:
"""Element-wise sine."""
return np.sin(np.asarray(a)).astype(np.float32)
def prim_arr_cos(a: np.ndarray) -> np.ndarray:
"""Element-wise cosine."""
return np.cos(np.asarray(a)).astype(np.float32)
def prim_arr_tan(a: np.ndarray) -> np.ndarray:
"""Element-wise tangent."""
return np.tan(np.asarray(a)).astype(np.float32)
def prim_arr_sqrt(a: np.ndarray) -> np.ndarray:
"""Element-wise square root."""
return np.sqrt(np.maximum(0, np.asarray(a))).astype(np.float32)
def prim_arr_pow(a: np.ndarray, b) -> np.ndarray:
"""Element-wise power."""
return np.power(np.asarray(a), np.asarray(b)).astype(np.float32)
def prim_arr_abs(a: np.ndarray) -> np.ndarray:
"""Element-wise absolute value."""
return np.abs(np.asarray(a)).astype(np.float32)
def prim_arr_neg(a: np.ndarray) -> np.ndarray:
"""Element-wise negation."""
return (-np.asarray(a)).astype(np.float32)
def prim_arr_exp(a: np.ndarray) -> np.ndarray:
"""Element-wise exponential."""
return np.exp(np.asarray(a)).astype(np.float32)
def prim_arr_atan2(y: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Element-wise atan2(y, x)."""
return np.arctan2(np.asarray(y), np.asarray(x)).astype(np.float32)
def prim_arr_min(a: np.ndarray, b) -> np.ndarray:
"""Element-wise minimum."""
return np.minimum(np.asarray(a), np.asarray(b)).astype(np.float32)
def prim_arr_max(a: np.ndarray, b) -> np.ndarray:
"""Element-wise maximum."""
return np.maximum(np.asarray(a), np.asarray(b)).astype(np.float32)
def prim_arr_clip(a: np.ndarray, lo, hi) -> np.ndarray:
"""Element-wise clip to range."""
return np.clip(np.asarray(a), lo, hi).astype(np.float32)
def prim_arr_where(cond: np.ndarray, a, b) -> np.ndarray:
"""Element-wise conditional: where cond is true, use a, else b."""
return np.where(np.asarray(cond), np.asarray(a), np.asarray(b)).astype(np.float32)
def prim_arr_floor(a: np.ndarray) -> np.ndarray:
"""Element-wise floor."""
return np.floor(np.asarray(a)).astype(np.float32)
def prim_arr_lerp(a: np.ndarray, b: np.ndarray, t) -> np.ndarray:
"""Element-wise linear interpolation."""
a, b = np.asarray(a), np.asarray(b)
return (a + (b - a) * t).astype(np.float32)
# =============================================================================
# Coordinate Transformation Primitives
# =============================================================================
def prim_polar_from_center(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]:
"""
Create polar coordinates (r, theta) from image center.
Usage:
(polar-from-center img) ; center of image
(polar-from-center img cx cy) ; custom center
(polar-from-center w h cx cy) ; explicit dimensions
Returns: (r, theta) tuple of arrays
"""
if isinstance(img_or_w, np.ndarray):
h, w = img_or_w.shape[:2]
if h_or_cx is None:
cx, cy = w / 2, h / 2
else:
cx, cy = h_or_cx, cx if cx is not None else h / 2
else:
w = int(img_or_w)
h = int(h_or_cx)
cx = cx if cx is not None else w / 2
cy = cy if cy is not None else h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
dx = x_coords - cx
dy = y_coords - cy
r = np.sqrt(dx**2 + dy**2)
theta = np.arctan2(dy, dx)
return (r, theta)
def prim_cart_from_polar(r: np.ndarray, theta: np.ndarray, cx: float, cy: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Convert polar coordinates back to Cartesian.
Args:
r: radius array
theta: angle array
cx, cy: center point
Returns: (x, y) tuple of coordinate arrays
"""
x = (cx + r * np.cos(theta)).astype(np.float32)
y = (cy + r * np.sin(theta)).astype(np.float32)
return (x, y)
def prim_normalize_coords(img_or_w, h_or_cx=None, cx=None, cy=None) -> Tuple[np.ndarray, np.ndarray]:
"""
Create normalized coordinates (-1 to 1) from center.
Returns: (x_norm, y_norm) tuple of arrays where center is (0,0)
"""
if isinstance(img_or_w, np.ndarray):
h, w = img_or_w.shape[:2]
if h_or_cx is None:
cx, cy = w / 2, h / 2
else:
cx, cy = h_or_cx, cx if cx is not None else h / 2
else:
w = int(img_or_w)
h = int(h_or_cx)
cx = cx if cx is not None else w / 2
cy = cy if cy is not None else h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
x_norm = (x_coords - cx) / (w / 2)
y_norm = (y_coords - cy) / (h / 2)
return (x_norm, y_norm)
def prim_coords_x(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray:
"""Get x/first component from coordinate tuple."""
return coords[0]
def prim_coords_y(coords: Tuple[np.ndarray, np.ndarray]) -> np.ndarray:
"""Get y/second component from coordinate tuple."""
return coords[1]
def prim_make_coords_centered(w: int, h: int, cx: float = None, cy: float = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Create coordinate grids centered at (cx, cy).
Like make-coords but returns coordinates relative to center.
"""
w, h = int(w), int(h)
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
return (x_coords - cx, y_coords - cy)
# =============================================================================
# Specialized Distortion Primitives
# =============================================================================
def prim_wave_displace(w: int, h: int, axis: str, freq: float, amp: float, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]:
"""
Create wave displacement maps.
Args:
w, h: dimensions
axis: "x" (horizontal waves) or "y" (vertical waves)
freq: wave frequency (waves per image width/height)
amp: wave amplitude in pixels
phase: phase offset in radians
Returns: (map_x, map_y) for use with remap
"""
w, h = int(w), int(h)
map_x = np.tile(np.arange(w, dtype=np.float32), (h, 1))
map_y = np.tile(np.arange(h, dtype=np.float32).reshape(-1, 1), (1, w))
if axis == "x" or axis == "horizontal":
# Horizontal waves: displace x based on y
wave = np.sin(2 * np.pi * freq * map_y / h + phase) * amp
map_x = map_x + wave
elif axis == "y" or axis == "vertical":
# Vertical waves: displace y based on x
wave = np.sin(2 * np.pi * freq * map_x / w + phase) * amp
map_y = map_y + wave
elif axis == "both":
wave_x = np.sin(2 * np.pi * freq * map_y / h + phase) * amp
wave_y = np.sin(2 * np.pi * freq * map_x / w + phase) * amp
map_x = map_x + wave_x
map_y = map_y + wave_y
return (map_x, map_y)
def prim_swirl_displace(w: int, h: int, strength: float, radius: float = 0.5,
cx: float = None, cy: float = None, falloff: str = "quadratic") -> Tuple[np.ndarray, np.ndarray]:
"""
Create swirl displacement maps.
Args:
w, h: dimensions
strength: swirl strength in radians
radius: effect radius as fraction of max dimension
cx, cy: center (defaults to image center)
falloff: "linear", "quadratic", or "gaussian"
Returns: (map_x, map_y) for use with remap
"""
w, h = int(w), int(h)
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
radius_px = max(w, h) * radius
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
dx = x_coords - cx
dy = y_coords - cy
dist = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(dy, dx)
# Normalized distance for falloff
norm_dist = dist / radius_px
# Calculate falloff factor
if falloff == "linear":
factor = np.maximum(0, 1 - norm_dist)
elif falloff == "gaussian":
factor = np.exp(-norm_dist**2 * 2)
else: # quadratic
factor = np.maximum(0, 1 - norm_dist**2)
# Apply swirl rotation
new_angle = angle + strength * factor
# Calculate new coordinates
map_x = (cx + dist * np.cos(new_angle)).astype(np.float32)
map_y = (cy + dist * np.sin(new_angle)).astype(np.float32)
return (map_x, map_y)
def prim_fisheye_displace(w: int, h: int, strength: float, cx: float = None, cy: float = None,
zoom_correct: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Create fisheye/barrel distortion displacement maps.
Args:
w, h: dimensions
strength: distortion strength (-1 to 1, positive=bulge, negative=pinch)
cx, cy: center (defaults to image center)
zoom_correct: auto-zoom to hide black edges
Returns: (map_x, map_y) for use with remap
"""
w, h = int(w), int(h)
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Normalize coordinates
x_norm = (x_coords - cx) / (w / 2)
y_norm = (y_coords - cy) / (h / 2)
r = np.sqrt(x_norm**2 + y_norm**2)
# Apply barrel/pincushion distortion
if strength > 0:
r_distorted = r * (1 + strength * r**2)
else:
r_distorted = r / (1 - strength * r**2 + 0.001)
# Calculate scale factor
with np.errstate(divide='ignore', invalid='ignore'):
scale = np.where(r > 0, r_distorted / r, 1)
# Apply zoom correction
if zoom_correct and strength > 0:
zoom = 1 + strength * 0.5
scale = scale / zoom
# Calculate new coordinates
map_x = (x_norm * scale * (w / 2) + cx).astype(np.float32)
map_y = (y_norm * scale * (h / 2) + cy).astype(np.float32)
return (map_x, map_y)
def prim_kaleidoscope_displace(w: int, h: int, segments: int, rotation: float = 0,
cx: float = None, cy: float = None, zoom: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
"""
Create kaleidoscope displacement maps.
Args:
w, h: dimensions
segments: number of symmetry segments (3-16)
rotation: rotation angle in degrees
cx, cy: center (defaults to image center)
zoom: zoom factor
Returns: (map_x, map_y) for use with remap
"""
w, h = int(w), int(h)
segments = max(3, min(int(segments), 16))
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
segment_angle = 2 * np.pi / segments
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
# Translate to center
x_centered = x_coords - cx
y_centered = y_coords - cy
# Convert to polar
r = np.sqrt(x_centered**2 + y_centered**2)
theta = np.arctan2(y_centered, x_centered)
# Apply rotation
theta = theta - np.deg2rad(rotation)
# Fold angle into first segment and mirror
theta_normalized = theta % (2 * np.pi)
segment_idx = (theta_normalized / segment_angle).astype(int)
theta_in_segment = theta_normalized - segment_idx * segment_angle
# Mirror alternating segments
mirror_mask = (segment_idx % 2) == 1
theta_in_segment = np.where(mirror_mask, segment_angle - theta_in_segment, theta_in_segment)
# Apply zoom
r = r / zoom
# Convert back to Cartesian
map_x = (r * np.cos(theta_in_segment) + cx).astype(np.float32)
map_y = (r * np.sin(theta_in_segment) + cy).astype(np.float32)
return (map_x, map_y)
# =============================================================================
# Character/ASCII Art Primitives
# =============================================================================
# Character sets ordered by visual density (light to dark)
CHAR_ALPHABETS = {
"standard": " .`'^\",:;Il!i><~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$",
"blocks": " ░▒▓█",
"simple": " .-:=+*#%@",
"digits": " 0123456789",
}
# Global atlas cache
_char_atlas_cache = {}
def _get_char_atlas(alphabet: str, cell_size: int) -> dict:
"""Get or create character atlas for alphabet."""
cache_key = f"{alphabet}_{cell_size}"
if cache_key in _char_atlas_cache:
return _char_atlas_cache[cache_key]
chars = CHAR_ALPHABETS.get(alphabet, alphabet) # Use as literal if not found
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = cell_size / 20.0
thickness = max(1, int(cell_size / 10))
atlas = {}
for char in chars:
char_img = np.zeros((cell_size, cell_size), dtype=np.uint8)
if char != ' ':
try:
(text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness)
text_x = max(0, (cell_size - text_w) // 2)
text_y = (cell_size + text_h) // 2
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
except:
pass
atlas[char] = char_img
_char_atlas_cache[cache_key] = atlas
return atlas
def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Sample image into cell grid, returning average colors and luminances.
Args:
img: source image
cell_size: size of each cell in pixels
Returns: (colors, luminances) tuple
- colors: (rows, cols, 3) array of average RGB per cell
- luminances: (rows, cols) array of average brightness 0-255
"""
cell_size = max(1, int(cell_size))
h, w = img.shape[:2]
rows = h // cell_size
cols = w // cell_size
if rows < 1 or cols < 1:
return (np.zeros((1, 1, 3), dtype=np.uint8),
np.zeros((1, 1), dtype=np.float32))
# Crop to grid
grid_h, grid_w = rows * cell_size, cols * cell_size
cropped = img[:grid_h, :grid_w]
# Reshape and average
reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3)
colors = reshaped.mean(axis=(1, 3)).astype(np.uint8)
# Compute luminance
luminances = (0.299 * colors[:, :, 0] +
0.587 * colors[:, :, 1] +
0.114 * colors[:, :, 2]).astype(np.float32)
return (colors, luminances)
def cell_sample_extended(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray, List[List[ZoneContext]]]:
"""
Sample image into cell grid, returning colors, luminances, and full zone contexts.
Args:
img: source image (RGB)
cell_size: size of each cell in pixels
Returns: (colors, luminances, zone_contexts) tuple
- colors: (rows, cols, 3) array of average RGB per cell
- luminances: (rows, cols) array of average brightness 0-255
- zone_contexts: 2D list of ZoneContext objects with full cell data
"""
cell_size = max(1, int(cell_size))
h, w = img.shape[:2]
rows = h // cell_size
cols = w // cell_size
if rows < 1 or cols < 1:
return (np.zeros((1, 1, 3), dtype=np.uint8),
np.zeros((1, 1), dtype=np.float32),
[[ZoneContext(0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)]])
# Crop to grid
grid_h, grid_w = rows * cell_size, cols * cell_size
cropped = img[:grid_h, :grid_w]
# Reshape and average
reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3)
colors = reshaped.mean(axis=(1, 3)).astype(np.uint8)
# Compute luminance (0-255)
luminances = (0.299 * colors[:, :, 0] +
0.587 * colors[:, :, 1] +
0.114 * colors[:, :, 2]).astype(np.float32)
# Normalize colors to 0-1 for HSV/saturation calculations
colors_float = colors.astype(np.float32) / 255.0
# Compute HSV values for each cell
max_c = colors_float.max(axis=2)
min_c = colors_float.min(axis=2)
diff = max_c - min_c
# Saturation
saturation = np.where(max_c > 0, diff / max_c, 0)
# Hue (0-360)
hue = np.zeros((rows, cols), dtype=np.float32)
# Avoid division by zero
mask = diff > 0
r, g, b = colors_float[:, :, 0], colors_float[:, :, 1], colors_float[:, :, 2]
# Red is max
red_max = mask & (max_c == r)
hue[red_max] = 60 * (((g[red_max] - b[red_max]) / diff[red_max]) % 6)
# Green is max
green_max = mask & (max_c == g)
hue[green_max] = 60 * ((b[green_max] - r[green_max]) / diff[green_max] + 2)
# Blue is max
blue_max = mask & (max_c == b)
hue[blue_max] = 60 * ((r[blue_max] - g[blue_max]) / diff[blue_max] + 4)
# Ensure hue is in 0-360 range
hue = hue % 360
# Build zone contexts
zone_contexts = []
for row in range(rows):
row_contexts = []
for col in range(cols):
ctx = ZoneContext(
row=row,
col=col,
row_norm=row / max(1, rows - 1) if rows > 1 else 0.5,
col_norm=col / max(1, cols - 1) if cols > 1 else 0.5,
luminance=luminances[row, col] / 255.0, # Normalize to 0-1
saturation=float(saturation[row, col]),
hue=float(hue[row, col]),
r=float(colors_float[row, col, 0]),
g=float(colors_float[row, col, 1]),
b=float(colors_float[row, col, 2]),
)
row_contexts.append(ctx)
zone_contexts.append(row_contexts)
return (colors, luminances, zone_contexts)
def prim_luminance_to_chars(luminances: np.ndarray, alphabet: str, contrast: float = 1.0) -> List[List[str]]:
"""
Map luminance values to characters from alphabet.
Args:
luminances: (rows, cols) array of brightness values 0-255
alphabet: character set name or literal string (light to dark)
contrast: contrast boost factor
Returns: 2D list of single-character strings
"""
chars = CHAR_ALPHABETS.get(alphabet, alphabet)
num_chars = len(chars)
# Apply contrast
lum = luminances.astype(np.float32)
if contrast != 1.0:
lum = (lum - 128) * contrast + 128
lum = np.clip(lum, 0, 255)
# Map to indices
indices = ((lum / 255) * (num_chars - 1)).astype(np.int32)
indices = np.clip(indices, 0, num_chars - 1)
# Convert to character array
rows, cols = indices.shape
result = []
for r in range(rows):
row = []
for c in range(cols):
row.append(chars[indices[r, c]])
result.append(row)
return result
def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray,
cell_size: int, color_mode: str = "color",
background_color: str = "black",
invert_colors: bool = False) -> np.ndarray:
"""
Render a grid of characters onto an image.
Args:
img: source image (for dimensions)
chars: 2D list of single characters
colors: (rows, cols, 3) array of colors per cell
cell_size: size of each cell
color_mode: "color" (original colors), "mono" (white), "invert",
or any color name/hex value ("green", "lime", "#00ff00")
background_color: background color name/hex ("black", "navy", "#001100")
invert_colors: if True, swap foreground and background colors
Returns: rendered image
"""
# Parse color_mode - may be a named color or hex value
fg_color = parse_color(color_mode)
# Parse background_color
if isinstance(background_color, (list, tuple)):
# Legacy: accept RGB list
bg_color = tuple(int(c) for c in background_color[:3])
else:
bg_color = parse_color(background_color)
if bg_color is None:
bg_color = (0, 0, 0) # Default to black
# Handle invert_colors - swap fg and bg
if invert_colors and fg_color is not None:
fg_color, bg_color = bg_color, fg_color
cell_size = max(1, int(cell_size))
if not chars or not chars[0]:
return img.copy()
rows = len(chars)
cols = len(chars[0])
h, w = rows * cell_size, cols * cell_size
bg = list(bg_color)
result = np.full((h, w, 3), bg, dtype=np.uint8)
# Collect all unique characters to build minimal atlas
unique_chars = set()
for row in chars:
for ch in row:
unique_chars.add(ch)
# Build atlas for unique chars
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = cell_size / 20.0
thickness = max(1, int(cell_size / 10))
atlas = {}
for char in unique_chars:
char_img = np.zeros((cell_size, cell_size), dtype=np.uint8)
if char and char != ' ':
try:
(text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness)
text_x = max(0, (cell_size - text_w) // 2)
text_y = (cell_size + text_h) // 2
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
except:
pass
atlas[char] = char_img
# Render characters
for r in range(rows):
for c in range(cols):
char = chars[r][c]
if not char or char == ' ':
continue
y1, x1 = r * cell_size, c * cell_size
char_mask = atlas.get(char)
if char_mask is None:
continue
if fg_color is not None:
# Use fixed color (named color or hex value)
color = np.array(fg_color, dtype=np.uint8)
elif color_mode == "mono":
color = np.array([255, 255, 255], dtype=np.uint8)
elif color_mode == "invert":
result[y1:y1+cell_size, x1:x1+cell_size] = colors[r, c]
color = np.array([0, 0, 0], dtype=np.uint8)
else: # color
color = colors[r, c]
mask = char_mask > 0
result[y1:y1+cell_size, x1:x1+cell_size][mask] = color
# Resize to match original if needed
orig_h, orig_w = img.shape[:2]
if result.shape[0] != orig_h or result.shape[1] != orig_w:
padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8)
copy_h = min(h, orig_h)
copy_w = min(w, orig_w)
padded[:copy_h, :copy_w] = result[:copy_h, :copy_w]
result = padded
return result
def prim_render_char_grid_fx(img: np.ndarray, chars: List[List[str]], colors: np.ndarray,
luminances: np.ndarray, cell_size: int,
color_mode: str = "color",
background_color: str = "black",
invert_colors: bool = False,
char_jitter: float = 0.0,
char_scale: float = 1.0,
char_rotation: float = 0.0,
char_hue_shift: float = 0.0,
jitter_source: str = "none",
scale_source: str = "none",
rotation_source: str = "none",
hue_source: str = "none") -> np.ndarray:
"""
Render a grid of characters with per-character effects.
Args:
img: source image (for dimensions)
chars: 2D list of single characters
colors: (rows, cols, 3) array of colors per cell
luminances: (rows, cols) array of luminance values (0-255)
cell_size: size of each cell
color_mode: "color", "mono", "invert", or any color name/hex
background_color: background color name/hex
invert_colors: if True, swap foreground and background colors
char_jitter: base jitter amount in pixels
char_scale: base scale factor (1.0 = normal)
char_rotation: base rotation in degrees
char_hue_shift: base hue shift in degrees (0-360)
jitter_source: source for jitter modulation ("none", "luminance", "position", "random")
scale_source: source for scale modulation
rotation_source: source for rotation modulation
hue_source: source for hue modulation
Per-character effect sources:
"none" - use base value only
"luminance" - modulate by cell luminance (0-1)
"inv_luminance" - modulate by inverse luminance (dark = high)
"saturation" - modulate by cell color saturation
"position_x" - modulate by horizontal position (0-1)
"position_y" - modulate by vertical position (0-1)
"position_diag" - modulate by diagonal position
"random" - random per-cell value (deterministic from position)
"center_dist" - distance from center (0=center, 1=corner)
Returns: rendered image
"""
# Parse colors
fg_color = parse_color(color_mode)
if isinstance(background_color, (list, tuple)):
bg_color = tuple(int(c) for c in background_color[:3])
else:
bg_color = parse_color(background_color)
if bg_color is None:
bg_color = (0, 0, 0)
if invert_colors and fg_color is not None:
fg_color, bg_color = bg_color, fg_color
cell_size = max(1, int(cell_size))
if not chars or not chars[0]:
return img.copy()
rows = len(chars)
cols = len(chars[0])
h, w = rows * cell_size, cols * cell_size
bg = list(bg_color)
result = np.full((h, w, 3), bg, dtype=np.uint8)
# Normalize luminances to 0-1
lum_normalized = luminances.astype(np.float32) / 255.0
# Compute saturation from colors
colors_float = colors.astype(np.float32) / 255.0
max_c = colors_float.max(axis=2)
min_c = colors_float.min(axis=2)
saturation = np.where(max_c > 0, (max_c - min_c) / max_c, 0)
# Helper to get modulation value for a cell
def get_mod_value(source: str, r: int, c: int) -> float:
if source == "none":
return 1.0
elif source == "luminance":
return lum_normalized[r, c]
elif source == "inv_luminance":
return 1.0 - lum_normalized[r, c]
elif source == "saturation":
return saturation[r, c]
elif source == "position_x":
return c / max(1, cols - 1) if cols > 1 else 0.5
elif source == "position_y":
return r / max(1, rows - 1) if rows > 1 else 0.5
elif source == "position_diag":
px = c / max(1, cols - 1) if cols > 1 else 0.5
py = r / max(1, rows - 1) if rows > 1 else 0.5
return (px + py) / 2.0
elif source == "random":
# Deterministic random based on position
seed = (r * 1000 + c) % 10000
return ((seed * 9301 + 49297) % 233280) / 233280.0
elif source == "center_dist":
cx, cy = (cols - 1) / 2.0, (rows - 1) / 2.0
dx = (c - cx) / max(1, cx) if cx > 0 else 0
dy = (r - cy) / max(1, cy) if cy > 0 else 0
return min(1.0, math.sqrt(dx*dx + dy*dy))
else:
return 1.0
# Build character atlas at base size
font = cv2.FONT_HERSHEY_SIMPLEX
base_font_scale = cell_size / 20.0
thickness = max(1, int(cell_size / 10))
unique_chars = set()
for row in chars:
for ch in row:
unique_chars.add(ch)
# For rotation/scale, we need to render characters larger then transform
max_scale = max(1.0, char_scale * 1.5) # Allow headroom for scaling
atlas_size = int(cell_size * max_scale * 1.5)
atlas = {}
for char in unique_chars:
if char and char != ' ':
try:
char_img = np.zeros((atlas_size, atlas_size), dtype=np.uint8)
scaled_font = base_font_scale * max_scale
(text_w, text_h), _ = cv2.getTextSize(char, font, scaled_font, thickness)
text_x = max(0, (atlas_size - text_w) // 2)
text_y = (atlas_size + text_h) // 2
cv2.putText(char_img, char, (text_x, text_y), font, scaled_font, 255, thickness, cv2.LINE_AA)
atlas[char] = char_img
except:
atlas[char] = None
else:
atlas[char] = None
# Render characters with effects
for r in range(rows):
for c in range(cols):
char = chars[r][c]
if not char or char == ' ':
continue
char_img = atlas.get(char)
if char_img is None:
continue
# Get per-cell modulation values
jitter_mod = get_mod_value(jitter_source, r, c)
scale_mod = get_mod_value(scale_source, r, c)
rot_mod = get_mod_value(rotation_source, r, c)
hue_mod = get_mod_value(hue_source, r, c)
# Compute effective values
eff_jitter = char_jitter * jitter_mod
eff_scale = char_scale * (0.5 + 0.5 * scale_mod) if scale_source != "none" else char_scale
eff_rotation = char_rotation * (rot_mod * 2 - 1) # -1 to 1 range
eff_hue_shift = char_hue_shift * hue_mod
# Apply transformations
transformed = char_img.copy()
# Rotation
if abs(eff_rotation) > 0.5:
center = (atlas_size // 2, atlas_size // 2)
rot_matrix = cv2.getRotationMatrix2D(center, eff_rotation, 1.0)
transformed = cv2.warpAffine(transformed, rot_matrix, (atlas_size, atlas_size))
# Scale - resize to target size
target_size = max(1, int(cell_size * eff_scale))
if target_size != atlas_size:
transformed = cv2.resize(transformed, (target_size, target_size), interpolation=cv2.INTER_LINEAR)
# Compute position with jitter
base_y = r * cell_size
base_x = c * cell_size
if eff_jitter > 0:
# Deterministic jitter based on position
jx = ((r * 7 + c * 13) % 100) / 100.0 - 0.5
jy = ((r * 11 + c * 17) % 100) / 100.0 - 0.5
base_x += int(jx * eff_jitter * 2)
base_y += int(jy * eff_jitter * 2)
# Center the character in the cell
offset = (target_size - cell_size) // 2
y1 = base_y - offset
x1 = base_x - offset
# Determine color
if fg_color is not None:
color = np.array(fg_color, dtype=np.uint8)
elif color_mode == "mono":
color = np.array([255, 255, 255], dtype=np.uint8)
elif color_mode == "invert":
# Fill cell with source color first
cy1 = max(0, r * cell_size)
cy2 = min(h, (r + 1) * cell_size)
cx1 = max(0, c * cell_size)
cx2 = min(w, (c + 1) * cell_size)
result[cy1:cy2, cx1:cx2] = colors[r, c]
color = np.array([0, 0, 0], dtype=np.uint8)
else: # color mode
color = colors[r, c].copy()
# Apply hue shift
if abs(eff_hue_shift) > 0.5 and color_mode not in ("mono", "invert") and fg_color is None:
# Convert to HSV, shift hue, convert back
color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV)
# Cast to int to avoid uint8 overflow, then back to uint8
new_hue = (int(color_hsv[0, 0, 0]) + int(eff_hue_shift * 180 / 360)) % 180
color_hsv[0, 0, 0] = np.uint8(new_hue)
color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten()
# Blit character to result
mask = transformed > 0
th, tw = transformed.shape[:2]
for dy in range(th):
for dx in range(tw):
py = y1 + dy
px = x1 + dx
if 0 <= py < h and 0 <= px < w and mask[dy, dx]:
result[py, px] = color
# Resize to match original if needed
orig_h, orig_w = img.shape[:2]
if result.shape[0] != orig_h or result.shape[1] != orig_w:
padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8)
copy_h = min(h, orig_h)
copy_w = min(w, orig_w)
padded[:copy_h, :copy_w] = result[:copy_h, :copy_w]
result = padded
return result
def _render_with_cell_effect(
frame: np.ndarray,
chars: List[List[str]],
colors: np.ndarray,
luminances: np.ndarray,
zone_contexts: List[List['ZoneContext']],
cell_size: int,
bg_color: tuple,
fg_color: tuple,
color_mode: str,
cell_effect, # Lambda or callable: (cell_image, zone_dict) -> cell_image
extra_params: dict,
interp,
env,
result: np.ndarray,
) -> np.ndarray:
"""
Render ASCII art using a cell_effect lambda for arbitrary per-cell transforms.
Each character is rendered to a cell image, the cell_effect is called with
(cell_image, zone_dict), and the returned cell is composited into result.
This allows arbitrary effects (rotate, blur, etc.) to be applied per-character.
"""
grid_rows = len(chars)
grid_cols = len(chars[0]) if chars else 0
out_h, out_w = result.shape[:2]
# Build character atlas (cell-sized colored characters on transparent bg)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = cell_size / 20.0
thickness = max(1, int(cell_size / 10))
# Helper to render a single character cell
def render_char_cell(char: str, color: np.ndarray) -> np.ndarray:
"""Render a character onto a cell-sized RGB image."""
cell = np.full((cell_size, cell_size, 3), bg_color, dtype=np.uint8)
if not char or char == ' ':
return cell
try:
(text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness)
text_x = max(0, (cell_size - text_w) // 2)
text_y = (cell_size + text_h) // 2
# Render character in white on mask, then apply color
mask = np.zeros((cell_size, cell_size), dtype=np.uint8)
cv2.putText(mask, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
# Apply color where mask is set
for ch in range(3):
cell[:, :, ch] = np.where(mask > 0, color[ch], bg_color[ch])
except:
pass
return cell
# Helper to evaluate cell_effect (handles artdag Lambda objects)
def eval_cell_effect(cell_img: np.ndarray, zone_dict: dict) -> np.ndarray:
"""Call cell_effect with (cell_image, zone_dict), handle Lambda objects."""
if callable(cell_effect):
return cell_effect(cell_img, zone_dict)
# Check if it's an artdag Lambda object
try:
from artdag.sexp.parser import Lambda as ArtdagLambda
from artdag.sexp.evaluator import evaluate as artdag_evaluate
if isinstance(cell_effect, ArtdagLambda):
# Build env with closure values
eval_env = dict(cell_effect.closure) if cell_effect.closure else {}
# Bind lambda parameters
if len(cell_effect.params) >= 2:
eval_env[cell_effect.params[0]] = cell_img
eval_env[cell_effect.params[1]] = zone_dict
elif len(cell_effect.params) == 1:
# Single param gets zone_dict with cell as 'cell' key
zone_dict['cell'] = cell_img
eval_env[cell_effect.params[0]] = zone_dict
# Add primitives to eval env
eval_env.update(PRIMITIVES)
# Add effect runner - allows calling any loaded sexp effect on a cell
# Usage: (apply-effect "effect_name" cell {"param" value ...})
# Or: (apply-effect "effect_name" cell) for defaults
def apply_effect_fn(effect_name, frame, params=None):
"""Run a loaded sexp effect on a frame (cell)."""
if interp and hasattr(interp, 'run_effect'):
if params is None:
params = {}
result, _ = interp.run_effect(effect_name, frame, params, {})
return result
return frame
eval_env['apply-effect'] = apply_effect_fn
# Also inject loaded effects directly as callable functions
# These wrappers take positional args in common order for each effect
# Usage: (blur cell 5) or (rotate cell 45) etc.
if interp and hasattr(interp, 'effects'):
for effect_name in interp.effects:
# Create a wrapper that calls run_effect with positional-to-named mapping
def make_effect_fn(name):
def effect_fn(frame, *args):
# Map common positional args to named params
params = {}
if name == 'blur' and len(args) >= 1:
params['radius'] = args[0]
elif name == 'rotate' and len(args) >= 1:
params['angle'] = args[0]
elif name == 'brightness' and len(args) >= 1:
params['factor'] = args[0]
elif name == 'contrast' and len(args) >= 1:
params['factor'] = args[0]
elif name == 'saturation' and len(args) >= 1:
params['factor'] = args[0]
elif name == 'hue_shift' and len(args) >= 1:
params['degrees'] = args[0]
elif name == 'rgb_split' and len(args) >= 1:
params['offset_x'] = args[0]
if len(args) >= 2:
params['offset_y'] = args[1]
elif name == 'pixelate' and len(args) >= 1:
params['block_size'] = args[0]
elif name == 'wave' and len(args) >= 1:
params['amplitude'] = args[0]
if len(args) >= 2:
params['frequency'] = args[1]
elif name == 'noise' and len(args) >= 1:
params['amount'] = args[0]
elif name == 'posterize' and len(args) >= 1:
params['levels'] = args[0]
elif name == 'threshold' and len(args) >= 1:
params['level'] = args[0]
elif name == 'sharpen' and len(args) >= 1:
params['amount'] = args[0]
elif len(args) == 1 and isinstance(args[0], dict):
# Accept dict as single arg
params = args[0]
result, _ = interp.run_effect(name, frame, params, {})
return result
return effect_fn
eval_env[effect_name] = make_effect_fn(effect_name)
result = artdag_evaluate(cell_effect.body, eval_env)
if isinstance(result, np.ndarray):
return result
return cell_img
except ImportError:
pass
# Fallback: return cell unchanged
return cell_img
# Render each cell
for r in range(grid_rows):
for c in range(grid_cols):
char = chars[r][c]
zone = zone_contexts[r][c]
# Determine character color
if fg_color is not None:
color = np.array(fg_color, dtype=np.uint8)
elif color_mode == "mono":
color = np.array([255, 255, 255], dtype=np.uint8)
elif color_mode == "invert":
color = np.array([0, 0, 0], dtype=np.uint8)
else:
color = colors[r, c].copy()
# Render character to cell image
cell_img = render_char_cell(char, color)
# Build zone dict
zone_dict = {
'row': zone.row,
'col': zone.col,
'row-norm': zone.row_norm,
'col-norm': zone.col_norm,
'lum': zone.luminance,
'sat': zone.saturation,
'hue': zone.hue,
'r': zone.r,
'g': zone.g,
'b': zone.b,
'char': char,
'color': color.tolist(),
'cell_size': cell_size,
}
# Add extra params (energy, rotation_scale, etc.)
if extra_params:
zone_dict.update(extra_params)
# Call cell_effect
modified_cell = eval_cell_effect(cell_img, zone_dict)
# Ensure result is valid
if modified_cell is None or not isinstance(modified_cell, np.ndarray):
modified_cell = cell_img
if modified_cell.shape[:2] != (cell_size, cell_size):
# Resize if cell size changed
modified_cell = cv2.resize(modified_cell, (cell_size, cell_size))
if len(modified_cell.shape) == 2:
# Convert grayscale to RGB
modified_cell = cv2.cvtColor(modified_cell, cv2.COLOR_GRAY2RGB)
# Composite into result
y1 = r * cell_size
x1 = c * cell_size
y2 = min(y1 + cell_size, out_h)
x2 = min(x1 + cell_size, out_w)
ch = y2 - y1
cw = x2 - x1
result[y1:y2, x1:x2] = modified_cell[:ch, :cw]
# Resize to match original frame if needed
orig_h, orig_w = frame.shape[:2]
if result.shape[0] != orig_h or result.shape[1] != orig_w:
bg = list(bg_color)
padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8)
copy_h = min(out_h, orig_h)
copy_w = min(out_w, orig_w)
padded[:copy_h, :copy_w] = result[:copy_h, :copy_w]
result = padded
return result
def prim_ascii_fx_zone(
frame: np.ndarray,
cols: int,
char_size_override: int, # If set, overrides cols-based calculation
alphabet: str,
color_mode: str,
background: str,
contrast: float,
char_hue_expr, # Expression, literal, or None
char_sat_expr, # Expression, literal, or None
char_bright_expr, # Expression, literal, or None
char_scale_expr, # Expression, literal, or None
char_rotation_expr, # Expression, literal, or None
char_jitter_expr, # Expression, literal, or None
interp, # Interpreter for expression evaluation
env, # Environment with bound values
extra_params=None, # Extra params to include in zone dict for lambdas
cell_effect=None, # Lambda (cell_image, zone_dict) -> cell_image for arbitrary cell effects
) -> np.ndarray:
"""
Render ASCII art with per-zone expression-driven transforms.
Args:
frame: Source image (H, W, 3) RGB uint8
cols: Number of character columns
char_size_override: If set, use this cell size instead of cols-based
alphabet: Character set name or literal string
color_mode: "color", "mono", "invert", or color name/hex
background: Background color name or hex
contrast: Contrast boost for character selection
char_hue_expr: Expression for hue shift (evaluated per zone)
char_sat_expr: Expression for saturation adjustment (evaluated per zone)
char_bright_expr: Expression for brightness adjustment (evaluated per zone)
char_scale_expr: Expression for scale factor (evaluated per zone)
char_rotation_expr: Expression for rotation degrees (evaluated per zone)
char_jitter_expr: Expression for position jitter (evaluated per zone)
interp: Interpreter instance for expression evaluation
env: Environment with bound variables
cell_effect: Optional lambda that receives (cell_image, zone_dict) and returns
a modified cell_image. When provided, each character is rendered
to a cell image, passed to this lambda, and the result composited.
This allows arbitrary effects to be applied per-character.
Zone variables available in expressions:
zone-row, zone-col: Grid position (integers)
zone-row-norm, zone-col-norm: Normalized position (0-1)
zone-lum: Cell luminance (0-1)
zone-sat: Cell saturation (0-1)
zone-hue: Cell hue (0-360)
zone-r, zone-g, zone-b: RGB components (0-1)
Returns: Rendered image
"""
h, w = frame.shape[:2]
# Use char_size if provided, otherwise calculate from cols
if char_size_override is not None:
cell_size = max(4, int(char_size_override))
else:
cell_size = max(4, w // cols)
# Get zone data using extended sampling
colors, luminances, zone_contexts = cell_sample_extended(frame, cell_size)
# Convert luminances to characters
chars = prim_luminance_to_chars(luminances, alphabet, contrast)
grid_rows = len(chars)
grid_cols = len(chars[0]) if chars else 0
# Parse colors
fg_color = parse_color(color_mode)
if isinstance(background, (list, tuple)):
bg_color = tuple(int(c) for c in background[:3])
else:
bg_color = parse_color(background)
if bg_color is None:
bg_color = (0, 0, 0)
# Arrays for per-zone transform values
hue_shifts = np.zeros((grid_rows, grid_cols), dtype=np.float32)
saturations = np.ones((grid_rows, grid_cols), dtype=np.float32)
brightness = np.ones((grid_rows, grid_cols), dtype=np.float32)
scales = np.ones((grid_rows, grid_cols), dtype=np.float32)
rotations = np.zeros((grid_rows, grid_cols), dtype=np.float32)
jitters = np.zeros((grid_rows, grid_cols), dtype=np.float32)
# Helper to evaluate expression or return literal value
def eval_expr(expr, zone, char):
if expr is None:
return None
if isinstance(expr, (int, float)):
return expr
# Build zone dict for lambda calls
zone_dict = {
'row': zone.row,
'col': zone.col,
'row-norm': zone.row_norm,
'col-norm': zone.col_norm,
'lum': zone.luminance,
'sat': zone.saturation,
'hue': zone.hue,
'r': zone.r,
'g': zone.g,
'b': zone.b,
'char': char,
}
# Add extra params (energy, rotation_scale, etc.) for lambdas to access
if extra_params:
zone_dict.update(extra_params)
# Check if it's a Python callable
if callable(expr):
return expr(zone_dict)
# Check if it's an artdag Lambda object
try:
from artdag.sexp.parser import Lambda as ArtdagLambda
from artdag.sexp.evaluator import evaluate as artdag_evaluate
if isinstance(expr, ArtdagLambda):
# Build env with zone dict and any closure values
eval_env = dict(expr.closure) if expr.closure else {}
# Bind the lambda parameter to zone_dict
if expr.params:
eval_env[expr.params[0]] = zone_dict
return artdag_evaluate(expr.body, eval_env)
except ImportError:
pass
# It's an expression - evaluate with zone context (sexp_effects style)
return interp.eval_with_zone(expr, env, zone)
# Evaluate expressions for each zone
for r in range(grid_rows):
for c in range(grid_cols):
zone = zone_contexts[r][c]
char = chars[r][c]
val = eval_expr(char_hue_expr, zone, char)
if val is not None:
hue_shifts[r, c] = float(val)
val = eval_expr(char_sat_expr, zone, char)
if val is not None:
saturations[r, c] = float(val)
val = eval_expr(char_bright_expr, zone, char)
if val is not None:
brightness[r, c] = float(val)
val = eval_expr(char_scale_expr, zone, char)
if val is not None:
scales[r, c] = float(val)
val = eval_expr(char_rotation_expr, zone, char)
if val is not None:
rotations[r, c] = float(val)
val = eval_expr(char_jitter_expr, zone, char)
if val is not None:
jitters[r, c] = float(val)
# Now render with computed transform arrays
out_h, out_w = grid_rows * cell_size, grid_cols * cell_size
bg = list(bg_color)
result = np.full((out_h, out_w, 3), bg, dtype=np.uint8)
# If cell_effect is provided, use the cell-mapper rendering path
if cell_effect is not None:
return _render_with_cell_effect(
frame, chars, colors, luminances, zone_contexts,
cell_size, bg_color, fg_color, color_mode,
cell_effect, extra_params, interp, env, result
)
# Build character atlas
font = cv2.FONT_HERSHEY_SIMPLEX
base_font_scale = cell_size / 20.0
thickness = max(1, int(cell_size / 10))
unique_chars = set()
for row in chars:
for ch in row:
unique_chars.add(ch)
# For rotation/scale, render characters larger then transform
max_scale = max(1.0, np.max(scales) * 1.5)
atlas_size = int(cell_size * max_scale * 1.5)
atlas = {}
for char in unique_chars:
if char and char != ' ':
try:
char_img = np.zeros((atlas_size, atlas_size), dtype=np.uint8)
scaled_font = base_font_scale * max_scale
(text_w, text_h), _ = cv2.getTextSize(char, font, scaled_font, thickness)
text_x = max(0, (atlas_size - text_w) // 2)
text_y = (atlas_size + text_h) // 2
cv2.putText(char_img, char, (text_x, text_y), font, scaled_font, 255, thickness, cv2.LINE_AA)
atlas[char] = char_img
except:
atlas[char] = None
else:
atlas[char] = None
# Render characters with per-zone effects
for r in range(grid_rows):
for c in range(grid_cols):
char = chars[r][c]
if not char or char == ' ':
continue
char_img = atlas.get(char)
if char_img is None:
continue
# Get per-cell values
eff_scale = scales[r, c]
eff_rotation = rotations[r, c]
eff_jitter = jitters[r, c]
eff_hue_shift = hue_shifts[r, c]
eff_brightness = brightness[r, c]
eff_saturation = saturations[r, c]
# Apply transformations to character
transformed = char_img.copy()
# Rotation
if abs(eff_rotation) > 0.5:
center = (atlas_size // 2, atlas_size // 2)
rot_matrix = cv2.getRotationMatrix2D(center, eff_rotation, 1.0)
transformed = cv2.warpAffine(transformed, rot_matrix, (atlas_size, atlas_size))
# Scale - resize to target size
target_size = max(1, int(cell_size * eff_scale))
if target_size != atlas_size:
transformed = cv2.resize(transformed, (target_size, target_size), interpolation=cv2.INTER_LINEAR)
# Compute position with jitter
base_y = r * cell_size
base_x = c * cell_size
if eff_jitter > 0:
# Deterministic jitter based on position
jx = ((r * 7 + c * 13) % 100) / 100.0 - 0.5
jy = ((r * 11 + c * 17) % 100) / 100.0 - 0.5
base_x += int(jx * eff_jitter * 2)
base_y += int(jy * eff_jitter * 2)
# Center the character in the cell
offset = (target_size - cell_size) // 2
y1 = base_y - offset
x1 = base_x - offset
# Determine color
if fg_color is not None:
color = np.array(fg_color, dtype=np.uint8)
elif color_mode == "mono":
color = np.array([255, 255, 255], dtype=np.uint8)
elif color_mode == "invert":
cy1 = max(0, r * cell_size)
cy2 = min(out_h, (r + 1) * cell_size)
cx1 = max(0, c * cell_size)
cx2 = min(out_w, (c + 1) * cell_size)
result[cy1:cy2, cx1:cx2] = colors[r, c]
color = np.array([0, 0, 0], dtype=np.uint8)
else: # color mode - use source colors
color = colors[r, c].copy()
# Apply hue shift
if abs(eff_hue_shift) > 0.5 and color_mode not in ("mono", "invert") and fg_color is None:
color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV)
new_hue = (int(color_hsv[0, 0, 0]) + int(eff_hue_shift * 180 / 360)) % 180
color_hsv[0, 0, 0] = np.uint8(new_hue)
color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten()
# Apply saturation adjustment
if abs(eff_saturation - 1.0) > 0.01 and color_mode not in ("mono", "invert") and fg_color is None:
color_hsv = cv2.cvtColor(color.reshape(1, 1, 3), cv2.COLOR_RGB2HSV)
new_sat = np.clip(int(color_hsv[0, 0, 1] * eff_saturation), 0, 255)
color_hsv[0, 0, 1] = np.uint8(new_sat)
color = cv2.cvtColor(color_hsv, cv2.COLOR_HSV2RGB).flatten()
# Apply brightness adjustment
if abs(eff_brightness - 1.0) > 0.01:
color = np.clip(color.astype(np.float32) * eff_brightness, 0, 255).astype(np.uint8)
# Blit character to result
mask = transformed > 0
th, tw = transformed.shape[:2]
for dy in range(th):
for dx in range(tw):
py = y1 + dy
px = x1 + dx
if 0 <= py < out_h and 0 <= px < out_w and mask[dy, dx]:
result[py, px] = color
# Resize to match original if needed
orig_h, orig_w = frame.shape[:2]
if result.shape[0] != orig_h or result.shape[1] != orig_w:
padded = np.full((orig_h, orig_w, 3), bg, dtype=np.uint8)
copy_h = min(out_h, orig_h)
copy_w = min(out_w, orig_w)
padded[:copy_h, :copy_w] = result[:copy_h, :copy_w]
result = padded
return result
def prim_make_char_grid(rows: int, cols: int, fill_char: str = " ") -> List[List[str]]:
"""Create a character grid filled with a character."""
return [[fill_char for _ in range(cols)] for _ in range(rows)]
def prim_set_char(chars: List[List[str]], row: int, col: int, char: str) -> List[List[str]]:
"""Set a character at position (returns modified copy)."""
result = [r[:] for r in chars] # shallow copy rows
if 0 <= row < len(result) and 0 <= col < len(result[0]):
result[row][col] = char
return result
def prim_get_char(chars: List[List[str]], row: int, col: int) -> str:
"""Get character at position."""
if 0 <= row < len(chars) and 0 <= col < len(chars[0]):
return chars[row][col]
return " "
def prim_char_grid_dimensions(chars: List[List[str]]) -> Tuple[int, int]:
"""Get (rows, cols) of character grid."""
if not chars:
return (0, 0)
return (len(chars), len(chars[0]) if chars[0] else 0)
def prim_alphabet_char(alphabet: str, index: int) -> str:
"""Get character at index from alphabet (wraps around)."""
chars = CHAR_ALPHABETS.get(alphabet, alphabet)
if not chars:
return " "
return chars[int(index) % len(chars)]
def prim_alphabet_length(alphabet: str) -> int:
"""Get length of alphabet."""
chars = CHAR_ALPHABETS.get(alphabet, alphabet)
return len(chars)
def prim_map_char_grid(chars: List[List[str]], luminances: np.ndarray, fn: Callable) -> List[List[str]]:
"""
Map a function over character grid.
fn receives (row, col, char, luminance) and returns new character.
This allows per-cell character selection based on position, brightness, etc.
Example:
(map-char-grid chars luminances
(lambda (r c ch lum)
(if (> lum 128)
(alphabet-char "blocks" (floor (/ lum 50)))
ch)))
"""
if not chars or not chars[0]:
return chars
rows = len(chars)
cols = len(chars[0])
result = []
for r in range(rows):
row = []
for c in range(cols):
ch = chars[r][c]
lum = float(luminances[r, c]) if r < luminances.shape[0] and c < luminances.shape[1] else 0
new_ch = fn(r, c, ch, lum)
row.append(str(new_ch) if new_ch else " ")
result.append(row)
return result
def prim_map_colors(colors: np.ndarray, fn: Callable) -> np.ndarray:
"""
Map a function over color grid.
fn receives (row, col, color) and returns new [r, g, b].
Color is a list [r, g, b].
"""
if colors.size == 0:
return colors
rows, cols = colors.shape[:2]
result = colors.copy()
for r in range(rows):
for c in range(cols):
color = list(colors[r, c])
new_color = fn(r, c, color)
if new_color is not None:
result[r, c] = new_color[:3]
return result
# =============================================================================
# Glitch Art Primitives
# =============================================================================
def prim_pixelsort(img: np.ndarray, sort_by: str = "lightness",
threshold_low: float = 50, threshold_high: float = 200,
angle: float = 0, reverse: bool = False) -> np.ndarray:
"""
Pixel sorting glitch effect.
Args:
img: source image
sort_by: "lightness", "hue", "saturation", "red", "green", "blue"
threshold_low: pixels below this aren't sorted
threshold_high: pixels above this aren't sorted
angle: 0 = horizontal, 90 = vertical
reverse: reverse sort order
"""
h, w = img.shape[:2]
# Rotate for vertical sorting
if 45 <= (angle % 180) <= 135:
frame = np.transpose(img, (1, 0, 2))
h, w = frame.shape[:2]
rotated = True
else:
frame = img
rotated = False
result = frame.copy()
# Get sort values
if sort_by == "lightness":
sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
elif sort_by == "hue":
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
sort_values = hsv[:, :, 0].astype(np.float32)
elif sort_by == "saturation":
hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
sort_values = hsv[:, :, 1].astype(np.float32)
elif sort_by == "red":
sort_values = frame[:, :, 0].astype(np.float32)
elif sort_by == "green":
sort_values = frame[:, :, 1].astype(np.float32)
elif sort_by == "blue":
sort_values = frame[:, :, 2].astype(np.float32)
else:
sort_values = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY).astype(np.float32)
# Create mask
mask = (sort_values >= threshold_low) & (sort_values <= threshold_high)
# Sort each row
for y in range(h):
row = result[y].copy()
row_mask = mask[y]
row_values = sort_values[y]
# Find contiguous segments
segments = []
start = None
for i, val in enumerate(row_mask):
if val and start is None:
start = i
elif not val and start is not None:
segments.append((start, i))
start = None
if start is not None:
segments.append((start, len(row_mask)))
# Sort each segment
for seg_start, seg_end in segments:
if seg_end - seg_start > 1:
segment_values = row_values[seg_start:seg_end]
sort_indices = np.argsort(segment_values)
if reverse:
sort_indices = sort_indices[::-1]
row[seg_start:seg_end] = row[seg_start:seg_end][sort_indices]
result[y] = row
# Rotate back
if rotated:
result = np.transpose(result, (1, 0, 2))
return np.ascontiguousarray(result)
def prim_datamosh(img: np.ndarray, prev_frame: np.ndarray,
block_size: int = 32, corruption: float = 0.3,
max_offset: int = 50, color_corrupt: bool = True) -> np.ndarray:
"""
Datamosh/glitch block corruption effect.
Args:
img: current frame
prev_frame: previous frame (or None)
block_size: size of corruption blocks
corruption: probability 0-1 of corrupting each block
max_offset: maximum pixel shift
color_corrupt: also apply color channel shifts
"""
if corruption <= 0:
return img.copy()
block_size = max(8, min(int(block_size), 128))
h, w = img.shape[:2]
result = img.copy()
for by in range(0, h, block_size):
for bx in range(0, w, block_size):
bh = min(block_size, h - by)
bw = min(block_size, w - bx)
if _rng.random() < corruption:
corruption_type = _rng.randint(0, 3)
if corruption_type == 0 and max_offset > 0:
# Shift
ox = _rng.randint(-max_offset, max_offset)
oy = _rng.randint(-max_offset, max_offset)
src_x = max(0, min(bx + ox, w - bw))
src_y = max(0, min(by + oy, h - bh))
result[by:by+bh, bx:bx+bw] = img[src_y:src_y+bh, src_x:src_x+bw]
elif corruption_type == 1 and prev_frame is not None:
# Duplicate from previous frame
if prev_frame.shape == img.shape:
result[by:by+bh, bx:bx+bw] = prev_frame[by:by+bh, bx:bx+bw]
elif corruption_type == 2 and color_corrupt:
# Color channel shift
block = result[by:by+bh, bx:bx+bw].copy()
shift = _rng.randint(1, 3)
channel = _rng.randint(0, 2)
block[:, :, channel] = np.roll(block[:, :, channel], shift, axis=0)
result[by:by+bh, bx:bx+bw] = block
else:
# Swap with another block
other_bx = _rng.randint(0, max(0, w - bw))
other_by = _rng.randint(0, max(0, h - bh))
temp = result[by:by+bh, bx:bx+bw].copy()
result[by:by+bh, bx:bx+bw] = img[other_by:other_by+bh, other_bx:other_bx+bw]
result[other_by:other_by+bh, other_bx:other_bx+bw] = temp
return result
def prim_ripple_displace(w: int, h: int, freq: float, amp: float, cx: float = None, cy: float = None,
decay: float = 0, phase: float = 0) -> Tuple[np.ndarray, np.ndarray]:
"""
Create radial ripple displacement maps.
Args:
w, h: dimensions
freq: ripple frequency
amp: ripple amplitude in pixels
cx, cy: center
decay: how fast ripples decay with distance (0 = no decay)
phase: phase offset
Returns: (map_x, map_y) for use with remap
"""
w, h = int(w), int(h)
if cx is None:
cx = w / 2
if cy is None:
cy = h / 2
y_coords, x_coords = np.mgrid[0:h, 0:w].astype(np.float32)
dx = x_coords - cx
dy = y_coords - cy
dist = np.sqrt(dx**2 + dy**2)
# Calculate ripple displacement (radial)
ripple = np.sin(2 * np.pi * freq * dist / max(w, h) + phase) * amp
# Apply decay
if decay > 0:
ripple = ripple * np.exp(-dist * decay / max(w, h))
# Displace along radial direction
with np.errstate(divide='ignore', invalid='ignore'):
norm_dx = np.where(dist > 0, dx / dist, 0)
norm_dy = np.where(dist > 0, dy / dist, 0)
map_x = (x_coords + ripple * norm_dx).astype(np.float32)
map_y = (y_coords + ripple * norm_dy).astype(np.float32)
return (map_x, map_y)
PRIMITIVES = {
# Arithmetic
'+': prim_add,
'-': prim_sub,
'*': prim_mul,
'/': prim_div,
# Comparison
'<': prim_lt,
'>': prim_gt,
'<=': prim_le,
'>=': prim_ge,
'=': prim_eq,
'!=': prim_ne,
# Image
'width': prim_width,
'height': prim_height,
'make-image': prim_make_image,
'copy': prim_copy,
'pixel': prim_pixel,
'set-pixel': prim_set_pixel,
'sample': prim_sample,
'channel': prim_channel,
'merge-channels': prim_merge_channels,
'resize': prim_resize,
'crop': prim_crop,
'paste': prim_paste,
# Color
'rgb': prim_rgb,
'red': prim_red,
'green': prim_green,
'blue': prim_blue,
'luminance': prim_luminance,
'rgb->hsv': prim_rgb_to_hsv,
'hsv->rgb': prim_hsv_to_rgb,
'blend-color': prim_blend_color,
'average-color': prim_average_color,
# Vectorized bulk operations
'color-matrix': prim_color_matrix,
'adjust': prim_adjust,
'mix-gray': prim_mix_gray,
'invert-img': prim_invert_img,
'add-noise': prim_add_noise,
'quantize': prim_quantize,
'shift-hsv': prim_shift_hsv,
# Bulk operations
'map-pixels': prim_map_pixels,
'map-rows': prim_map_rows,
'for-grid': prim_for_grid,
'fold-pixels': prim_fold_pixels,
# Filters
'convolve': prim_convolve,
'blur': prim_blur,
'box-blur': prim_box_blur,
'edges': prim_edges,
'sobel': prim_sobel,
'dilate': prim_dilate,
'erode': prim_erode,
# Geometry
'translate': prim_translate,
'rotate-img': prim_rotate,
'scale-img': prim_scale,
'flip-h': prim_flip_h,
'flip-v': prim_flip_v,
'remap': prim_remap,
'make-coords': prim_make_coords,
# Blending
'blend-images': prim_blend_images,
'blend-mode': prim_blend_mode,
'mask': prim_mask,
# Drawing
'draw-char': prim_draw_char,
'draw-text': prim_draw_text,
'fill-rect': prim_fill_rect,
'draw-line': prim_draw_line,
# Math
'sin': prim_sin,
'cos': prim_cos,
'tan': prim_tan,
'atan2': prim_atan2,
'sqrt': prim_sqrt,
'pow': prim_pow,
'abs': prim_abs,
'floor': prim_floor,
'ceil': prim_ceil,
'round': prim_round,
'min': prim_min,
'max': prim_max,
'clamp': prim_clamp,
'lerp': prim_lerp,
'mod': prim_mod,
'random': prim_random,
'randint': prim_randint,
'gaussian': prim_gaussian,
'assert': prim_assert,
'pi': math.pi,
'tau': math.tau,
# Array
'length': prim_length,
'len': prim_length, # alias
'nth': prim_nth,
'first': prim_first,
'rest': prim_rest,
'take': prim_take,
'drop': prim_drop,
'cons': prim_cons,
'append': prim_append,
'reverse': prim_reverse,
'range': prim_range,
'roll': prim_roll,
'list': prim_list,
# Array math (vectorized operations on coordinate arrays)
'arr+': prim_arr_add,
'arr-': prim_arr_sub,
'arr*': prim_arr_mul,
'arr/': prim_arr_div,
'arr-mod': prim_arr_mod,
'arr-sin': prim_arr_sin,
'arr-cos': prim_arr_cos,
'arr-tan': prim_arr_tan,
'arr-sqrt': prim_arr_sqrt,
'arr-pow': prim_arr_pow,
'arr-abs': prim_arr_abs,
'arr-neg': prim_arr_neg,
'arr-exp': prim_arr_exp,
'arr-atan2': prim_arr_atan2,
'arr-min': prim_arr_min,
'arr-max': prim_arr_max,
'arr-clip': prim_arr_clip,
'arr-where': prim_arr_where,
'arr-floor': prim_arr_floor,
'arr-lerp': prim_arr_lerp,
# Coordinate transformations
'polar-from-center': prim_polar_from_center,
'cart-from-polar': prim_cart_from_polar,
'normalize-coords': prim_normalize_coords,
'coords-x': prim_coords_x,
'coords-y': prim_coords_y,
'make-coords-centered': prim_make_coords_centered,
# Specialized distortion maps
'wave-displace': prim_wave_displace,
'swirl-displace': prim_swirl_displace,
'fisheye-displace': prim_fisheye_displace,
'kaleidoscope-displace': prim_kaleidoscope_displace,
'ripple-displace': prim_ripple_displace,
# Character/ASCII art
'cell-sample': prim_cell_sample,
'cell-sample-extended': cell_sample_extended,
'luminance-to-chars': prim_luminance_to_chars,
'render-char-grid': prim_render_char_grid,
'render-char-grid-fx': prim_render_char_grid_fx,
'ascii-fx-zone': prim_ascii_fx_zone,
'make-char-grid': prim_make_char_grid,
'set-char': prim_set_char,
'get-char': prim_get_char,
'char-grid-dimensions': prim_char_grid_dimensions,
'alphabet-char': prim_alphabet_char,
'alphabet-length': prim_alphabet_length,
'map-char-grid': prim_map_char_grid,
'map-colors': prim_map_colors,
# Glitch art
'pixelsort': prim_pixelsort,
'datamosh': prim_datamosh,
}