Add streaming video compositor with sexp interpreter
- New streaming/ module for real-time video processing: - compositor.py: Main streaming compositor with cycle-crossfade - sexp_executor.py: Executes compiled sexp recipes in real-time - sexp_interp.py: Full S-expression interpreter for SLICE_ON Lambda - recipe_adapter.py: Bridges recipes to streaming compositor - sources.py: Video source with ffmpeg streaming - audio.py: Real-time audio analysis (energy, beats) - output.py: Preview (mpv) and file output with audio muxing - New templates/: - cycle-crossfade.sexp: Smooth zoom-based video cycling - process-pair.sexp: Dual-clip processing with effects - Key features: - Videos cycle in input-videos order (not definition order) - Cumulative whole-spin rotation - Zero-weight sources skip processing - Live audio-reactive effects - New effects: blend_multi for weighted layer compositing - Updated primitives and interpreter for streaming compatibility Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
;; ASCII Art effect - converts image to ASCII characters
|
||||
(require-primitives "ascii")
|
||||
|
||||
(define-effect ascii_art
|
||||
:params (
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
;; ASCII Art FX - converts image to ASCII characters with per-character effects
|
||||
(require-primitives "ascii")
|
||||
|
||||
(define-effect ascii_art_fx
|
||||
:params (
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
;; ASCII Zones effect - different character sets for different brightness zones
|
||||
;; Dark areas use simple chars, mid uses standard, bright uses blocks
|
||||
(require-primitives "ascii")
|
||||
|
||||
(define-effect ascii_zones
|
||||
:params (
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
(opacity :type float :default 0.5)
|
||||
(resize_mode :type string :default "fit")
|
||||
(priority :type string :default "width")
|
||||
(pad_color :type list :default [0 0 0])
|
||||
(pad_color :type list :default (quote [0 0 0]))
|
||||
)
|
||||
(let [a frame-a
|
||||
a-w (width a)
|
||||
|
||||
57
sexp_effects/effects/blend_multi.sexp
Normal file
57
sexp_effects/effects/blend_multi.sexp
Normal file
@@ -0,0 +1,57 @@
|
||||
;; N-way weighted blend effect
|
||||
;;
|
||||
;; Takes N input frames via `inputs` and N per-frame weights.
|
||||
;; Produces a single frame: the normalised weighted composite.
|
||||
;;
|
||||
;; Parameters:
|
||||
;; weights - list of N floats, one per input (resolved per-frame)
|
||||
;; mode - blend mode applied when folding each frame in:
|
||||
;; "alpha" — pure weighted average (default)
|
||||
;; "multiply" — darken by multiplication
|
||||
;; "screen" — lighten (inverse multiply)
|
||||
;; "overlay" — contrast-boosting midtone blend
|
||||
;; "soft-light" — gentle dodge/burn
|
||||
;; "hard-light" — strong dodge/burn
|
||||
;; "color-dodge" — brightens towards white
|
||||
;; "color-burn" — darkens towards black
|
||||
;; "difference" — absolute pixel difference
|
||||
;; "exclusion" — softer difference
|
||||
;; "add" — additive (clamped)
|
||||
;; "subtract" — subtractive (clamped)
|
||||
;; "darken" — per-pixel minimum
|
||||
;; "lighten" — per-pixel maximum
|
||||
;; resize_mode - how to match frame dimensions (fit, crop, stretch)
|
||||
;;
|
||||
;; Uses a left-fold over inputs[1..N-1]. At each step the running
|
||||
;; opacity is: w[i] / (w[0] + w[1] + ... + w[i])
|
||||
;; which produces the correct normalised weighted result.
|
||||
|
||||
(require-primitives "image" "blending")
|
||||
|
||||
(define-effect blend_multi
|
||||
:params (
|
||||
(weights :type list :default (quote []))
|
||||
(mode :type string :default "alpha")
|
||||
(resize_mode :type string :default "fit")
|
||||
)
|
||||
(let [n (len inputs)
|
||||
;; Target dimensions from first frame
|
||||
target-w (width (nth inputs 0))
|
||||
target-h (height (nth inputs 0))
|
||||
;; Fold over indices 1..n-1
|
||||
;; Accumulator is (list blended-frame running-weight-sum)
|
||||
seed (list (nth inputs 0) (nth weights 0))
|
||||
result (reduce (range 1 n) seed
|
||||
(lambda (pair i)
|
||||
(let [acc (nth pair 0)
|
||||
running (nth pair 1)
|
||||
w (nth weights i)
|
||||
new-running (+ running w)
|
||||
opacity (/ w (max new-running 0.001))
|
||||
f (resize (nth inputs i) target-w target-h "linear")
|
||||
;; Apply blend mode then mix with opacity
|
||||
blended (if (= mode "alpha")
|
||||
(blend-images acc f opacity)
|
||||
(blend-images acc (blend-mode acc f mode) opacity))]
|
||||
(list blended new-running))))]
|
||||
(nth result 0)))
|
||||
@@ -1,8 +1,9 @@
|
||||
;; Invert effect - inverts all colors
|
||||
;; Uses vectorized invert-img primitive for fast processing
|
||||
;; amount param: 0 = no invert, 1 = full invert (threshold at 0.5)
|
||||
|
||||
(require-primitives "color_ops")
|
||||
|
||||
(define-effect invert
|
||||
:params ()
|
||||
(invert-img frame))
|
||||
:params ((amount :type float :default 1 :range [0 1]))
|
||||
(if (> amount 0.5) (invert-img frame) frame))
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
;; Ripple effect - radial wave distortion from center
|
||||
(require-primitives "geometry" "image" "math")
|
||||
|
||||
(define-effect ripple
|
||||
:params (
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
;; Zoom effect - zooms in/out from center
|
||||
(require-primitives "geometry")
|
||||
|
||||
(define-effect zoom
|
||||
:params (
|
||||
|
||||
@@ -793,6 +793,35 @@ class Interpreter:
|
||||
return list(self.effects.values())[-1]
|
||||
return None
|
||||
|
||||
def load_effect_from_string(self, sexp_content: str, effect_name: str = None) -> EffectDefinition:
|
||||
"""Load an effect definition from an S-expression string.
|
||||
|
||||
Args:
|
||||
sexp_content: The S-expression content as a string
|
||||
effect_name: Optional name hint (used if effect doesn't define its own name)
|
||||
|
||||
Returns:
|
||||
The loaded EffectDefinition
|
||||
"""
|
||||
expr = parse(sexp_content)
|
||||
|
||||
# Handle multiple top-level expressions
|
||||
if isinstance(expr, list) and expr and isinstance(expr[0], list):
|
||||
for e in expr:
|
||||
self.eval(e)
|
||||
else:
|
||||
self.eval(expr)
|
||||
|
||||
# Return the effect if we can find it by name
|
||||
if effect_name and effect_name in self.effects:
|
||||
return self.effects[effect_name]
|
||||
|
||||
# Return the most recently loaded effect
|
||||
if self.effects:
|
||||
return list(self.effects.values())[-1]
|
||||
|
||||
return None
|
||||
|
||||
def run_effect(self, name: str, frame, params: Dict[str, Any],
|
||||
state: Dict[str, Any]) -> tuple:
|
||||
"""
|
||||
|
||||
@@ -51,22 +51,22 @@ def _parse_color(color_str: str) -> tuple:
|
||||
|
||||
|
||||
def _cell_sample(frame: np.ndarray, cell_size: int):
|
||||
"""Sample frame into cells, returning colors and luminances."""
|
||||
"""Sample frame into cells, returning colors and luminances.
|
||||
|
||||
Uses cv2.resize with INTER_AREA (pixel-area averaging) which is
|
||||
~25x faster than numpy reshape+mean for block downsampling.
|
||||
"""
|
||||
h, w = frame.shape[:2]
|
||||
rows = h // cell_size
|
||||
cols = w // cell_size
|
||||
|
||||
colors = np.zeros((rows, cols, 3), dtype=np.uint8)
|
||||
luminances = np.zeros((rows, cols), dtype=np.float32)
|
||||
# Crop to exact grid then block-average via cv2 area interpolation.
|
||||
cropped = frame[:rows * cell_size, :cols * cell_size]
|
||||
colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
|
||||
|
||||
for r in range(rows):
|
||||
for c in range(cols):
|
||||
y1, y2 = r * cell_size, (r + 1) * cell_size
|
||||
x1, x2 = c * cell_size, (c + 1) * cell_size
|
||||
cell = frame[y1:y2, x1:x2]
|
||||
avg_color = np.mean(cell, axis=(0, 1))
|
||||
colors[r, c] = avg_color.astype(np.uint8)
|
||||
luminances[r, c] = (0.299 * avg_color[0] + 0.587 * avg_color[1] + 0.114 * avg_color[2]) / 255
|
||||
luminances = ((0.299 * colors[:, :, 0] +
|
||||
0.587 * colors[:, :, 1] +
|
||||
0.114 * colors[:, :, 2]) / 255.0).astype(np.float32)
|
||||
|
||||
return colors, luminances
|
||||
|
||||
@@ -303,9 +303,35 @@ def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params):
|
||||
cell_env.set(cell_effect.params[1], zone)
|
||||
|
||||
result = interp.eval(cell_effect.body, cell_env)
|
||||
else:
|
||||
# Fallback: it might be a callable
|
||||
elif isinstance(cell_effect, list):
|
||||
# Raw S-expression lambda like (lambda [cell zone] body) or (fn [cell zone] body)
|
||||
# Check if it's a lambda expression
|
||||
head = cell_effect[0] if cell_effect else None
|
||||
head_name = head.name if head and hasattr(head, 'name') else str(head) if head else None
|
||||
is_lambda = head_name in ('lambda', 'fn')
|
||||
|
||||
if is_lambda:
|
||||
# (lambda [params...] body)
|
||||
params = cell_effect[1] if len(cell_effect) > 1 else []
|
||||
body = cell_effect[2] if len(cell_effect) > 2 else None
|
||||
|
||||
# Bind lambda parameters
|
||||
if isinstance(params, list) and len(params) >= 1:
|
||||
param_name = params[0].name if hasattr(params[0], 'name') else str(params[0])
|
||||
cell_env.set(param_name, cell_img)
|
||||
if isinstance(params, list) and len(params) >= 2:
|
||||
param_name = params[1].name if hasattr(params[1], 'name') else str(params[1])
|
||||
cell_env.set(param_name, zone)
|
||||
|
||||
result = interp.eval(body, cell_env) if body else cell_img
|
||||
else:
|
||||
# Some other expression - just evaluate it
|
||||
result = interp.eval(cell_effect, cell_env)
|
||||
elif callable(cell_effect):
|
||||
# It's a callable
|
||||
result = cell_effect(cell_img, zone)
|
||||
else:
|
||||
raise ValueError(f"cell_effect must be a Lambda, list, or callable, got {type(cell_effect)}")
|
||||
|
||||
if isinstance(result, np.ndarray) and result.shape == cell_img.shape:
|
||||
return result
|
||||
@@ -317,6 +343,46 @@ def _apply_cell_effect(cell_img, zone, cell_effect, interp, env, extra_params):
|
||||
raise ValueError(f"cell_effect must return an image array, got {type(result)}")
|
||||
|
||||
|
||||
def _get_legacy_ascii_primitives():
|
||||
"""Import ASCII primitives from legacy primitives module.
|
||||
|
||||
These are loaded lazily to avoid import issues during module loading.
|
||||
By the time a primitive library is loaded, sexp_effects.primitives
|
||||
is already in sys.modules (imported by sexp_effects.__init__).
|
||||
"""
|
||||
from sexp_effects.primitives import (
|
||||
prim_cell_sample,
|
||||
prim_luminance_to_chars,
|
||||
prim_render_char_grid,
|
||||
prim_render_char_grid_fx,
|
||||
prim_alphabet_char,
|
||||
prim_alphabet_length,
|
||||
prim_map_char_grid,
|
||||
prim_map_colors,
|
||||
prim_make_char_grid,
|
||||
prim_set_char,
|
||||
prim_get_char,
|
||||
prim_char_grid_dimensions,
|
||||
cell_sample_extended,
|
||||
)
|
||||
return {
|
||||
'cell-sample': prim_cell_sample,
|
||||
'cell-sample-extended': cell_sample_extended,
|
||||
'luminance-to-chars': prim_luminance_to_chars,
|
||||
'render-char-grid': prim_render_char_grid,
|
||||
'render-char-grid-fx': prim_render_char_grid_fx,
|
||||
'alphabet-char': prim_alphabet_char,
|
||||
'alphabet-length': prim_alphabet_length,
|
||||
'map-char-grid': prim_map_char_grid,
|
||||
'map-colors': prim_map_colors,
|
||||
'make-char-grid': prim_make_char_grid,
|
||||
'set-char': prim_set_char,
|
||||
'get-char': prim_get_char,
|
||||
'char-grid-dimensions': prim_char_grid_dimensions,
|
||||
}
|
||||
|
||||
|
||||
PRIMITIVES = {
|
||||
'ascii-fx-zone': prim_ascii_fx_zone,
|
||||
**_get_legacy_ascii_primitives(),
|
||||
}
|
||||
|
||||
@@ -39,6 +39,32 @@ def prim_mod(a, b):
|
||||
return a % b
|
||||
|
||||
|
||||
def prim_abs(x):
|
||||
return abs(x)
|
||||
|
||||
|
||||
def prim_min(*args):
|
||||
return min(args)
|
||||
|
||||
|
||||
def prim_max(*args):
|
||||
return max(args)
|
||||
|
||||
|
||||
def prim_round(x):
|
||||
return round(x)
|
||||
|
||||
|
||||
def prim_floor(x):
|
||||
import math
|
||||
return math.floor(x)
|
||||
|
||||
|
||||
def prim_ceil(x):
|
||||
import math
|
||||
return math.ceil(x)
|
||||
|
||||
|
||||
# Comparison
|
||||
def prim_lt(a, b):
|
||||
return a < b
|
||||
@@ -98,6 +124,17 @@ def prim_get(obj, key, default=None):
|
||||
return default
|
||||
|
||||
|
||||
def prim_nth(seq, i):
|
||||
i = int(i)
|
||||
if 0 <= i < len(seq):
|
||||
return seq[i]
|
||||
return None
|
||||
|
||||
|
||||
def prim_first(seq):
|
||||
return seq[0] if seq else None
|
||||
|
||||
|
||||
def prim_length(seq):
|
||||
return len(seq)
|
||||
|
||||
@@ -127,6 +164,31 @@ def prim_is_nil(x):
|
||||
return x is None
|
||||
|
||||
|
||||
# Higher-order / iteration
|
||||
def prim_reduce(seq, init, fn):
|
||||
"""(reduce seq init fn) — fold left: fn(fn(fn(init, s0), s1), s2) ..."""
|
||||
acc = init
|
||||
for item in seq:
|
||||
acc = fn(acc, item)
|
||||
return acc
|
||||
|
||||
|
||||
def prim_map(seq, fn):
|
||||
"""(map seq fn) — apply fn to each element, return new list."""
|
||||
return [fn(item) for item in seq]
|
||||
|
||||
|
||||
def prim_range(*args):
|
||||
"""(range end), (range start end), or (range start end step) — integer range."""
|
||||
if len(args) == 1:
|
||||
return list(range(int(args[0])))
|
||||
elif len(args) == 2:
|
||||
return list(range(int(args[0]), int(args[1])))
|
||||
elif len(args) >= 3:
|
||||
return list(range(int(args[0]), int(args[1]), int(args[2])))
|
||||
return []
|
||||
|
||||
|
||||
# Core primitives dict
|
||||
PRIMITIVES = {
|
||||
# Arithmetic
|
||||
@@ -135,6 +197,12 @@ PRIMITIVES = {
|
||||
'*': prim_mul,
|
||||
'/': prim_div,
|
||||
'mod': prim_mod,
|
||||
'abs': prim_abs,
|
||||
'min': prim_min,
|
||||
'max': prim_max,
|
||||
'round': prim_round,
|
||||
'floor': prim_floor,
|
||||
'ceil': prim_ceil,
|
||||
|
||||
# Comparison
|
||||
'<': prim_lt,
|
||||
@@ -151,6 +219,8 @@ PRIMITIVES = {
|
||||
|
||||
# Data access
|
||||
'get': prim_get,
|
||||
'nth': prim_nth,
|
||||
'first': prim_first,
|
||||
'length': prim_length,
|
||||
'len': prim_length,
|
||||
'list': prim_list,
|
||||
@@ -161,4 +231,10 @@ PRIMITIVES = {
|
||||
'list?': prim_is_list,
|
||||
'dict?': prim_is_dict,
|
||||
'nil?': prim_is_nil,
|
||||
|
||||
# Higher-order / iteration
|
||||
'reduce': prim_reduce,
|
||||
'fold': prim_reduce,
|
||||
'map': prim_map,
|
||||
'range': prim_range,
|
||||
}
|
||||
|
||||
@@ -100,6 +100,24 @@ def prim_affine(img, src_pts, dst_pts):
|
||||
return cv2.warpAffine(img, M, (w, h))
|
||||
|
||||
|
||||
def _get_legacy_geometry_primitives():
|
||||
"""Import geometry primitives from legacy primitives module."""
|
||||
from sexp_effects.primitives import (
|
||||
prim_coords_x,
|
||||
prim_coords_y,
|
||||
prim_ripple_displace,
|
||||
prim_fisheye_displace,
|
||||
prim_kaleidoscope_displace,
|
||||
)
|
||||
return {
|
||||
'coords-x': prim_coords_x,
|
||||
'coords-y': prim_coords_y,
|
||||
'ripple-displace': prim_ripple_displace,
|
||||
'fisheye-displace': prim_fisheye_displace,
|
||||
'kaleidoscope-displace': prim_kaleidoscope_displace,
|
||||
}
|
||||
|
||||
|
||||
PRIMITIVES = {
|
||||
# Basic transforms
|
||||
'translate': prim_translate,
|
||||
@@ -119,4 +137,7 @@ PRIMITIVES = {
|
||||
# Advanced transforms
|
||||
'perspective': prim_perspective,
|
||||
'affine': prim_affine,
|
||||
|
||||
# Displace / coordinate ops (from legacy primitives)
|
||||
**_get_legacy_geometry_primitives(),
|
||||
}
|
||||
|
||||
@@ -1444,42 +1444,80 @@ CHAR_ALPHABETS = {
|
||||
"digits": " 0123456789",
|
||||
}
|
||||
|
||||
# Global atlas cache
|
||||
# Global atlas cache: keyed on (frozenset(chars), cell_size) ->
|
||||
# (atlas_array, char_to_idx) where atlas_array is (N, cell_size, cell_size) uint8.
|
||||
_char_atlas_cache = {}
|
||||
_CHAR_ATLAS_CACHE_MAX = 32
|
||||
|
||||
|
||||
def _get_char_atlas(alphabet: str, cell_size: int) -> dict:
|
||||
"""Get or create character atlas for alphabet."""
|
||||
cache_key = f"{alphabet}_{cell_size}"
|
||||
if cache_key in _char_atlas_cache:
|
||||
return _char_atlas_cache[cache_key]
|
||||
"""Get or create character atlas for alphabet (legacy dict version)."""
|
||||
atlas_arr, char_to_idx = _get_render_atlas(alphabet, cell_size)
|
||||
# Build legacy dict from array
|
||||
idx_to_char = {v: k for k, v in char_to_idx.items()}
|
||||
return {idx_to_char[i]: atlas_arr[i] for i in range(len(atlas_arr))}
|
||||
|
||||
|
||||
def _get_render_atlas(unique_chars_or_alphabet, cell_size: int):
|
||||
"""Get or build a stacked numpy atlas for vectorised rendering.
|
||||
|
||||
Args:
|
||||
unique_chars_or_alphabet: Either an alphabet name (str looked up in
|
||||
CHAR_ALPHABETS), a literal character string, or a set/frozenset
|
||||
of characters.
|
||||
cell_size: Pixel size of each cell.
|
||||
|
||||
Returns:
|
||||
(atlas_array, char_to_idx) where
|
||||
atlas_array: (num_chars, cell_size, cell_size) uint8 masks
|
||||
char_to_idx: dict mapping character -> index in atlas_array
|
||||
"""
|
||||
if isinstance(unique_chars_or_alphabet, (set, frozenset)):
|
||||
chars_tuple = tuple(sorted(unique_chars_or_alphabet))
|
||||
else:
|
||||
resolved = CHAR_ALPHABETS.get(unique_chars_or_alphabet, unique_chars_or_alphabet)
|
||||
chars_tuple = tuple(resolved)
|
||||
|
||||
cache_key = (chars_tuple, cell_size)
|
||||
cached = _char_atlas_cache.get(cache_key)
|
||||
if cached is not None:
|
||||
return cached
|
||||
|
||||
chars = CHAR_ALPHABETS.get(alphabet, alphabet) # Use as literal if not found
|
||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
font_scale = cell_size / 20.0
|
||||
thickness = max(1, int(cell_size / 10))
|
||||
|
||||
atlas = {}
|
||||
for char in chars:
|
||||
char_img = np.zeros((cell_size, cell_size), dtype=np.uint8)
|
||||
if char != ' ':
|
||||
n = len(chars_tuple)
|
||||
atlas = np.zeros((n, cell_size, cell_size), dtype=np.uint8)
|
||||
char_to_idx = {}
|
||||
|
||||
for i, char in enumerate(chars_tuple):
|
||||
char_to_idx[char] = i
|
||||
if char and char != ' ':
|
||||
try:
|
||||
(text_w, text_h), baseline = cv2.getTextSize(char, font, font_scale, thickness)
|
||||
(text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness)
|
||||
text_x = max(0, (cell_size - text_w) // 2)
|
||||
text_y = (cell_size + text_h) // 2
|
||||
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
|
||||
except:
|
||||
cv2.putText(atlas[i], char, (text_x, text_y),
|
||||
font, font_scale, 255, thickness, cv2.LINE_AA)
|
||||
except Exception:
|
||||
pass
|
||||
atlas[char] = char_img
|
||||
|
||||
_char_atlas_cache[cache_key] = atlas
|
||||
return atlas
|
||||
# Evict oldest entry if cache is full
|
||||
if len(_char_atlas_cache) >= _CHAR_ATLAS_CACHE_MAX:
|
||||
_char_atlas_cache.pop(next(iter(_char_atlas_cache)))
|
||||
|
||||
_char_atlas_cache[cache_key] = (atlas, char_to_idx)
|
||||
return atlas, char_to_idx
|
||||
|
||||
|
||||
def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Sample image into cell grid, returning average colors and luminances.
|
||||
|
||||
Uses cv2.resize with INTER_AREA (pixel-area averaging) which is
|
||||
~25x faster than numpy reshape+mean for block downsampling.
|
||||
|
||||
Args:
|
||||
img: source image
|
||||
cell_size: size of each cell in pixels
|
||||
@@ -1497,13 +1535,10 @@ def prim_cell_sample(img: np.ndarray, cell_size: int) -> Tuple[np.ndarray, np.nd
|
||||
return (np.zeros((1, 1, 3), dtype=np.uint8),
|
||||
np.zeros((1, 1), dtype=np.float32))
|
||||
|
||||
# Crop to grid
|
||||
# Crop to exact grid then block-average via cv2 area interpolation.
|
||||
grid_h, grid_w = rows * cell_size, cols * cell_size
|
||||
cropped = img[:grid_h, :grid_w]
|
||||
|
||||
# Reshape and average
|
||||
reshaped = cropped.reshape(rows, cell_size, cols, cell_size, 3)
|
||||
colors = reshaped.mean(axis=(1, 3)).astype(np.uint8)
|
||||
colors = cv2.resize(cropped, (cols, rows), interpolation=cv2.INTER_AREA)
|
||||
|
||||
# Compute luminance
|
||||
luminances = (0.299 * colors[:, :, 0] +
|
||||
@@ -1628,16 +1663,11 @@ def prim_luminance_to_chars(luminances: np.ndarray, alphabet: str, contrast: flo
|
||||
indices = ((lum / 255) * (num_chars - 1)).astype(np.int32)
|
||||
indices = np.clip(indices, 0, num_chars - 1)
|
||||
|
||||
# Convert to character array
|
||||
rows, cols = indices.shape
|
||||
result = []
|
||||
for r in range(rows):
|
||||
row = []
|
||||
for c in range(cols):
|
||||
row.append(chars[indices[r, c]])
|
||||
result.append(row)
|
||||
# Vectorised conversion via numpy char array lookup
|
||||
chars_arr = np.array(list(chars))
|
||||
char_grid = chars_arr[indices.ravel()].reshape(indices.shape)
|
||||
|
||||
return result
|
||||
return char_grid.tolist()
|
||||
|
||||
|
||||
def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.ndarray,
|
||||
@@ -1647,6 +1677,10 @@ def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.nd
|
||||
"""
|
||||
Render a grid of characters onto an image.
|
||||
|
||||
Uses vectorised numpy operations instead of per-cell Python loops:
|
||||
the character atlas is looked up via fancy indexing and the full
|
||||
mask + colour image are assembled in bulk.
|
||||
|
||||
Args:
|
||||
img: source image (for dimensions)
|
||||
chars: 2D list of single characters
|
||||
@@ -1664,12 +1698,11 @@ def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.nd
|
||||
|
||||
# Parse background_color
|
||||
if isinstance(background_color, (list, tuple)):
|
||||
# Legacy: accept RGB list
|
||||
bg_color = tuple(int(c) for c in background_color[:3])
|
||||
else:
|
||||
bg_color = parse_color(background_color)
|
||||
if bg_color is None:
|
||||
bg_color = (0, 0, 0) # Default to black
|
||||
bg_color = (0, 0, 0)
|
||||
|
||||
# Handle invert_colors - swap fg and bg
|
||||
if invert_colors and fg_color is not None:
|
||||
@@ -1686,58 +1719,66 @@ def prim_render_char_grid(img: np.ndarray, chars: List[List[str]], colors: np.nd
|
||||
|
||||
bg = list(bg_color)
|
||||
|
||||
result = np.full((h, w, 3), bg, dtype=np.uint8)
|
||||
|
||||
# Collect all unique characters to build minimal atlas
|
||||
# --- Build atlas & index grid ---
|
||||
unique_chars = set()
|
||||
for row in chars:
|
||||
for ch in row:
|
||||
unique_chars.add(ch)
|
||||
|
||||
# Build atlas for unique chars
|
||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
font_scale = cell_size / 20.0
|
||||
thickness = max(1, int(cell_size / 10))
|
||||
atlas, char_to_idx = _get_render_atlas(unique_chars, cell_size)
|
||||
|
||||
atlas = {}
|
||||
for char in unique_chars:
|
||||
char_img = np.zeros((cell_size, cell_size), dtype=np.uint8)
|
||||
if char and char != ' ':
|
||||
try:
|
||||
(text_w, text_h), _ = cv2.getTextSize(char, font, font_scale, thickness)
|
||||
text_x = max(0, (cell_size - text_w) // 2)
|
||||
text_y = (cell_size + text_h) // 2
|
||||
cv2.putText(char_img, char, (text_x, text_y), font, font_scale, 255, thickness, cv2.LINE_AA)
|
||||
except:
|
||||
pass
|
||||
atlas[char] = char_img
|
||||
# Convert 2D char list to index array using ordinal lookup table
|
||||
# (avoids per-cell Python dict lookup).
|
||||
space_idx = char_to_idx.get(' ', 0)
|
||||
max_ord = max(ord(ch) for ch in char_to_idx) + 1
|
||||
ord_lookup = np.full(max_ord, space_idx, dtype=np.int32)
|
||||
for ch, idx in char_to_idx.items():
|
||||
if ch:
|
||||
ord_lookup[ord(ch)] = idx
|
||||
|
||||
# Render characters
|
||||
for r in range(rows):
|
||||
for c in range(cols):
|
||||
char = chars[r][c]
|
||||
if not char or char == ' ':
|
||||
continue
|
||||
flat = [ch for row in chars for ch in row]
|
||||
ords = np.frombuffer(np.array(flat, dtype='U1'), dtype=np.uint32)
|
||||
char_indices = ord_lookup[ords].reshape(rows, cols)
|
||||
|
||||
y1, x1 = r * cell_size, c * cell_size
|
||||
char_mask = atlas.get(char)
|
||||
# --- Vectorised mask assembly ---
|
||||
# atlas[char_indices] -> (rows, cols, cell_size, cell_size)
|
||||
# Transpose to (rows, cell_size, cols, cell_size) then reshape to full image.
|
||||
all_masks = atlas[char_indices]
|
||||
full_mask = all_masks.transpose(0, 2, 1, 3).reshape(h, w)
|
||||
|
||||
if char_mask is None:
|
||||
continue
|
||||
# Expand per-cell colours to per-pixel (only when needed).
|
||||
need_color_full = (color_mode in ("color", "invert")
|
||||
or (fg_color is None and color_mode != "mono"))
|
||||
|
||||
if fg_color is not None:
|
||||
# Use fixed color (named color or hex value)
|
||||
color = np.array(fg_color, dtype=np.uint8)
|
||||
elif color_mode == "mono":
|
||||
color = np.array([255, 255, 255], dtype=np.uint8)
|
||||
elif color_mode == "invert":
|
||||
result[y1:y1+cell_size, x1:x1+cell_size] = colors[r, c]
|
||||
color = np.array([0, 0, 0], dtype=np.uint8)
|
||||
else: # color
|
||||
color = colors[r, c]
|
||||
if need_color_full:
|
||||
color_full = np.repeat(
|
||||
np.repeat(colors[:rows, :cols], cell_size, axis=0),
|
||||
cell_size, axis=1)
|
||||
|
||||
mask = char_mask > 0
|
||||
result[y1:y1+cell_size, x1:x1+cell_size][mask] = color
|
||||
# --- Vectorised colour composite ---
|
||||
# Use element-wise multiply/np.where instead of boolean-indexed scatter
|
||||
# for much better memory access patterns.
|
||||
mask_u8 = (full_mask > 0).astype(np.uint8)[:, :, np.newaxis]
|
||||
|
||||
if color_mode == "invert":
|
||||
# Background is source colour; characters are black.
|
||||
# result = color_full * (1 - mask)
|
||||
result = color_full * (1 - mask_u8)
|
||||
elif fg_color is not None:
|
||||
# Fixed foreground colour on background.
|
||||
fg = np.array(fg_color, dtype=np.uint8)
|
||||
bg_arr = np.array(bg, dtype=np.uint8)
|
||||
result = np.where(mask_u8, fg, bg_arr).astype(np.uint8)
|
||||
elif color_mode == "mono":
|
||||
bg_arr = np.array(bg, dtype=np.uint8)
|
||||
result = np.where(mask_u8, np.uint8(255), bg_arr).astype(np.uint8)
|
||||
else:
|
||||
# "color" mode – each cell uses its source colour on bg.
|
||||
if bg == [0, 0, 0]:
|
||||
result = color_full * mask_u8
|
||||
else:
|
||||
bg_arr = np.array(bg, dtype=np.uint8)
|
||||
result = np.where(mask_u8, color_full, bg_arr).astype(np.uint8)
|
||||
|
||||
# Resize to match original if needed
|
||||
orig_h, orig_w = img.shape[:2]
|
||||
|
||||
Reference in New Issue
Block a user