Fix completed runs not appearing in list + add purge-failed endpoint

- Update save_run_cache to also update actor_id, recipe, inputs on conflict
- Add logging for actor_id when saving runs to run_cache
- Add admin endpoint DELETE /runs/admin/purge-failed to delete all failed runs

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
giles
2026-02-02 23:24:39 +00:00
parent 581da68b3b
commit d20eef76ad
24 changed files with 1671 additions and 453 deletions

View File

@@ -8,7 +8,7 @@ import logging
from pathlib import Path
from typing import Optional, Dict, Any
from fastapi import APIRouter, Request, Depends, HTTPException, UploadFile, File
from fastapi import APIRouter, Request, Depends, HTTPException, UploadFile, File, Form
from fastapi.responses import HTMLResponse, FileResponse
from pydantic import BaseModel
@@ -208,13 +208,95 @@ async def import_from_ipfs(
return {"cid": cid, "imported": True}
@router.post("/upload")
async def upload_content(
file: UploadFile = File(...),
@router.post("/upload/chunk")
async def upload_chunk(
request: Request,
chunk: UploadFile = File(...),
upload_id: str = Form(...),
chunk_index: int = Form(...),
total_chunks: int = Form(...),
filename: str = Form(...),
display_name: Optional[str] = Form(None),
ctx: UserContext = Depends(require_auth),
cache_service: CacheService = Depends(get_cache_service),
):
"""Upload content to cache and IPFS."""
"""Upload a file chunk. Assembles file when all chunks received."""
import tempfile
import os
# Create temp dir for this upload
chunk_dir = Path(tempfile.gettempdir()) / "uploads" / upload_id
chunk_dir.mkdir(parents=True, exist_ok=True)
# Save this chunk
chunk_path = chunk_dir / f"chunk_{chunk_index:05d}"
chunk_data = await chunk.read()
chunk_path.write_bytes(chunk_data)
# Check if all chunks received
received = len(list(chunk_dir.glob("chunk_*")))
if received < total_chunks:
return {"status": "partial", "received": received, "total": total_chunks}
# All chunks received - assemble file
final_path = chunk_dir / filename
with open(final_path, 'wb') as f:
for i in range(total_chunks):
cp = chunk_dir / f"chunk_{i:05d}"
f.write(cp.read_bytes())
cp.unlink() # Clean up chunk
# Read assembled file
content = final_path.read_bytes()
final_path.unlink()
chunk_dir.rmdir()
# Now do the normal upload flow
cid, ipfs_cid, error = await cache_service.upload_content(
content=content,
filename=filename,
actor_id=ctx.actor_id,
)
if error:
raise HTTPException(400, error)
# Assign friendly name
final_cid = ipfs_cid or cid
from ..services.naming_service import get_naming_service
naming = get_naming_service()
friendly_entry = await naming.assign_name(
cid=final_cid,
actor_id=ctx.actor_id,
item_type="media",
display_name=display_name,
filename=filename,
)
return {
"status": "complete",
"cid": final_cid,
"friendly_name": friendly_entry["friendly_name"],
"filename": filename,
"size": len(content),
"uploaded": True,
}
@router.post("/upload")
async def upload_content(
file: UploadFile = File(...),
display_name: Optional[str] = Form(None),
ctx: UserContext = Depends(require_auth),
cache_service: CacheService = Depends(get_cache_service),
):
"""Upload content to cache and IPFS.
Args:
file: The file to upload
display_name: Optional custom name for the media (used as friendly name)
"""
content = await file.read()
cid, ipfs_cid, error = await cache_service.upload_content(
content=content,
@@ -233,6 +315,7 @@ async def upload_content(
cid=final_cid,
actor_id=ctx.actor_id,
item_type="media",
display_name=display_name, # Use custom name if provided
filename=file.filename,
)
@@ -350,3 +433,83 @@ async def update_metadata_htmx(
<div class="text-green-400 mb-4">Metadata saved!</div>
<script>setTimeout(() => location.reload(), 1000);</script>
''')
# Friendly name editing
@router.get("/{cid}/name-form", response_class=HTMLResponse)
async def get_name_form(
cid: str,
request: Request,
cache_service: CacheService = Depends(get_cache_service),
):
"""Get friendly name editing form (HTMX)."""
ctx = await get_current_user(request)
if not ctx:
return HTMLResponse('<div class="text-red-400">Login required</div>')
# Get current friendly name
from ..services.naming_service import get_naming_service
naming = get_naming_service()
entry = await naming.get_by_cid(ctx.actor_id, cid)
current_name = entry.get("base_name", "") if entry else ""
return HTMLResponse(f'''
<form hx-post="/cache/{cid}/name"
hx-target="#friendly-name-section"
hx-swap="innerHTML"
class="space-y-3">
<div>
<label class="block text-gray-400 text-sm mb-1">Friendly Name</label>
<input type="text" name="display_name" value="{current_name}"
placeholder="e.g., my-background-video"
class="w-full bg-gray-900 border border-gray-600 rounded px-3 py-2 text-white">
<p class="text-gray-500 text-xs mt-1">A name to reference this media in recipes</p>
</div>
<div class="flex space-x-2">
<button type="submit"
class="bg-blue-600 hover:bg-blue-700 px-4 py-2 rounded font-medium">
Save
</button>
<button type="button"
onclick="location.reload()"
class="px-4 py-2 rounded border border-gray-600 hover:bg-gray-700">
Cancel
</button>
</div>
</form>
''')
@router.post("/{cid}/name", response_class=HTMLResponse)
async def update_friendly_name(
cid: str,
request: Request,
):
"""Update friendly name (HTMX form handler)."""
ctx = await get_current_user(request)
if not ctx:
return HTMLResponse('<div class="text-red-400">Login required</div>')
form_data = await request.form()
display_name = form_data.get("display_name", "").strip()
if not display_name:
return HTMLResponse('<div class="text-red-400">Name cannot be empty</div>')
from ..services.naming_service import get_naming_service
naming = get_naming_service()
try:
entry = await naming.assign_name(
cid=cid,
actor_id=ctx.actor_id,
item_type="media",
display_name=display_name,
)
return HTMLResponse(f'''
<div class="text-green-400 mb-2">Name updated!</div>
<script>setTimeout(() => location.reload(), 1000);</script>
''')
except Exception as e:
return HTMLResponse(f'<div class="text-red-400">Error: {e}</div>')

View File

@@ -2,17 +2,17 @@
Effects routes for L1 server.
Handles effect upload, listing, and metadata.
Effects are stored in IPFS like all other content-addressed data.
Effects are S-expression files stored in IPFS like all other content-addressed data.
"""
import hashlib
import json
import logging
import re
import time
from pathlib import Path
from typing import Optional
from fastapi import APIRouter, Request, Depends, HTTPException, UploadFile, File
from fastapi import APIRouter, Request, Depends, HTTPException, UploadFile, File, Form
from fastapi.responses import HTMLResponse, PlainTextResponse
from artdag_common import render
@@ -40,12 +40,11 @@ def get_effects_dir() -> Path:
def parse_effect_metadata(source: str) -> dict:
"""
Parse effect metadata from source code.
Parse effect metadata from S-expression source code.
Extracts PEP 723 dependencies and @-tag metadata from docstring.
Extracts metadata from comment headers (;; @key value format)
or from (defeffect name ...) form.
"""
import re
metadata = {
"name": "",
"version": "1.0.0",
@@ -53,97 +52,54 @@ def parse_effect_metadata(source: str) -> dict:
"temporal": False,
"description": "",
"params": [],
"dependencies": [],
"requires_python": ">=3.10",
}
# Parse PEP 723 dependencies
pep723_match = re.search(r"# /// script\n(.*?)# ///", source, re.DOTALL)
if pep723_match:
block = pep723_match.group(1)
deps_match = re.search(r'# dependencies = \[(.*?)\]', block, re.DOTALL)
if deps_match:
metadata["dependencies"] = re.findall(r'"([^"]+)"', deps_match.group(1))
python_match = re.search(r'# requires-python = "([^"]+)"', block)
if python_match:
metadata["requires_python"] = python_match.group(1)
# Parse comment-based metadata (;; @key value)
for line in source.split("\n"):
stripped = line.strip()
if not stripped.startswith(";"):
# Stop parsing metadata at first non-comment line
if stripped and not stripped.startswith("("):
continue
if stripped.startswith("("):
break
# Parse docstring @-tags
docstring_match = re.search(r'"""(.*?)"""', source, re.DOTALL)
if not docstring_match:
docstring_match = re.search(r"'''(.*?)'''", source, re.DOTALL)
# Remove comment prefix
comment = stripped.lstrip(";").strip()
if docstring_match:
docstring = docstring_match.group(1)
lines = docstring.split("\n")
if comment.startswith("@effect "):
metadata["name"] = comment[8:].strip()
elif comment.startswith("@name "):
metadata["name"] = comment[6:].strip()
elif comment.startswith("@version "):
metadata["version"] = comment[9:].strip()
elif comment.startswith("@author "):
metadata["author"] = comment[8:].strip()
elif comment.startswith("@temporal"):
val = comment[9:].strip().lower() if len(comment) > 9 else "true"
metadata["temporal"] = val in ("true", "yes", "1", "")
elif comment.startswith("@description "):
metadata["description"] = comment[13:].strip()
elif comment.startswith("@param "):
# Format: @param name type [description]
parts = comment[7:].split(None, 2)
if len(parts) >= 2:
param = {"name": parts[0], "type": parts[1]}
if len(parts) > 2:
param["description"] = parts[2]
metadata["params"].append(param)
current_param = None
desc_lines = []
in_description = False
# Also try to extract name from (defeffect "name" ...) or (effect "name" ...)
if not metadata["name"]:
name_match = re.search(r'\((defeffect|effect)\s+"([^"]+)"', source)
if name_match:
metadata["name"] = name_match.group(2)
for line in lines:
stripped = line.strip()
if stripped.startswith("@effect "):
metadata["name"] = stripped[8:].strip()
in_description = False
elif stripped.startswith("@version "):
metadata["version"] = stripped[9:].strip()
elif stripped.startswith("@author "):
metadata["author"] = stripped[8:].strip()
elif stripped.startswith("@temporal "):
val = stripped[10:].strip().lower()
metadata["temporal"] = val in ("true", "yes", "1")
elif stripped.startswith("@description"):
in_description = True
desc_lines = []
elif stripped.startswith("@param "):
in_description = False
if current_param:
metadata["params"].append(current_param)
parts = stripped[7:].split()
if len(parts) >= 2:
current_param = {
"name": parts[0],
"type": parts[1],
"description": "",
}
else:
current_param = None
elif stripped.startswith("@range ") and current_param:
range_parts = stripped[7:].split()
if len(range_parts) >= 2:
try:
current_param["range"] = [float(range_parts[0]), float(range_parts[1])]
except ValueError:
pass
elif stripped.startswith("@default ") and current_param:
current_param["default"] = stripped[9:].strip()
elif stripped.startswith("@example"):
in_description = False
if current_param:
metadata["params"].append(current_param)
current_param = None
elif in_description and stripped:
desc_lines.append(stripped)
elif current_param and stripped and not stripped.startswith("@"):
current_param["description"] = stripped
if in_description:
metadata["description"] = " ".join(desc_lines)
if current_param:
metadata["params"].append(current_param)
# Try to extract name from first (define ...) form
if not metadata["name"]:
define_match = re.search(r'\(define\s+(\w+)', source)
if define_match:
metadata["name"] = define_match.group(1)
return metadata
@@ -151,13 +107,18 @@ def parse_effect_metadata(source: str) -> dict:
@router.post("/upload")
async def upload_effect(
file: UploadFile = File(...),
display_name: Optional[str] = Form(None),
ctx: UserContext = Depends(require_auth),
):
"""
Upload an effect to IPFS.
Upload an S-expression effect to IPFS.
Parses PEP 723 metadata and @-tag docstring.
Parses metadata from comment headers.
Returns IPFS CID for use in recipes.
Args:
file: The .sexp effect file
display_name: Optional custom friendly name for the effect
"""
content = await file.read()
@@ -166,7 +127,7 @@ async def upload_effect(
except UnicodeDecodeError:
raise HTTPException(400, "Effect must be valid UTF-8 text")
# Parse metadata
# Parse metadata from sexp source
try:
meta = parse_effect_metadata(source)
except Exception as e:
@@ -185,7 +146,7 @@ async def upload_effect(
effects_dir = get_effects_dir()
effect_dir = effects_dir / cid
effect_dir.mkdir(parents=True, exist_ok=True)
(effect_dir / "effect.py").write_text(source, encoding="utf-8")
(effect_dir / "effect.sexp").write_text(source, encoding="utf-8")
# Store metadata (locally and in IPFS)
full_meta = {
@@ -209,14 +170,14 @@ async def upload_effect(
filename=file.filename,
)
# Assign friendly name
# Assign friendly name (use custom display_name if provided, else from metadata)
from ..services.naming_service import get_naming_service
naming = get_naming_service()
friendly_entry = await naming.assign_name(
cid=cid,
actor_id=ctx.actor_id,
item_type="effect",
display_name=meta.get("name"),
display_name=display_name or meta.get("name"),
filename=file.filename,
)
@@ -230,7 +191,6 @@ async def upload_effect(
"version": meta.get("version"),
"temporal": meta.get("temporal", False),
"params": meta.get("params", []),
"dependencies": meta.get("dependencies", []),
"uploaded": True,
}
@@ -258,7 +218,7 @@ async def get_effect(
# Cache locally
effect_dir.mkdir(parents=True, exist_ok=True)
source = source_bytes.decode("utf-8")
(effect_dir / "effect.py").write_text(source)
(effect_dir / "effect.sexp").write_text(source)
# Parse metadata from source
parsed_meta = parse_effect_metadata(source)
@@ -297,12 +257,16 @@ async def get_effect_source(
):
"""Get effect source code."""
effects_dir = get_effects_dir()
source_path = effects_dir / cid / "effect.py"
source_path = effects_dir / cid / "effect.sexp"
# Try local cache first
# Try local cache first (check both .sexp and legacy .py)
if source_path.exists():
return PlainTextResponse(source_path.read_text())
legacy_path = effects_dir / cid / "effect.py"
if legacy_path.exists():
return PlainTextResponse(legacy_path.read_text())
# Fetch from IPFS
source_bytes = ipfs_client.get_bytes(cid)
if not source_bytes:

View File

@@ -156,7 +156,6 @@ async def create_run(
async def create_stream_run(
request: StreamRequest,
ctx: UserContext = Depends(require_auth),
redis = Depends(get_redis_client),
):
"""Start a streaming video render.
@@ -166,13 +165,57 @@ async def create_stream_run(
Assets can be referenced by CID or friendly name in the recipe.
"""
import uuid
import tempfile
from pathlib import Path
import database
from tasks.streaming import run_stream
# Generate run ID
run_id = str(uuid.uuid4())
created_at = datetime.now(timezone.utc).isoformat()
# Store recipe in cache so it appears on /recipes page
recipe_id = None
try:
cache_manager = get_cache_manager()
with tempfile.NamedTemporaryFile(delete=False, suffix=".sexp", mode="w") as tmp:
tmp.write(request.recipe_sexp)
tmp_path = Path(tmp.name)
cached, ipfs_cid = cache_manager.put(tmp_path, node_type="recipe", move=True)
recipe_id = cached.cid
# Extract recipe name from S-expression (look for (stream "name" ...) pattern)
import re
name_match = re.search(r'\(stream\s+"([^"]+)"', request.recipe_sexp)
recipe_name = name_match.group(1) if name_match else f"stream-{run_id[:8]}"
# Track ownership in item_types
await database.save_item_metadata(
cid=recipe_id,
actor_id=ctx.actor_id,
item_type="recipe",
description=f"Streaming recipe: {recipe_name}",
filename=f"{recipe_name}.sexp",
)
# Assign friendly name
from ..services.naming_service import get_naming_service
naming = get_naming_service()
await naming.assign_name(
cid=recipe_id,
actor_id=ctx.actor_id,
item_type="recipe",
display_name=recipe_name,
)
logger.info(f"Stored streaming recipe {recipe_id[:16]}... as '{recipe_name}'")
except Exception as e:
logger.warning(f"Failed to store recipe in cache: {e}")
# Continue anyway - run will still work, just won't appear in /recipes
# Submit Celery task
task = run_stream.delay(
run_id=run_id,
recipe_sexp=request.recipe_sexp,
output_name=request.output_name,
duration=request.duration,
@@ -182,21 +225,15 @@ async def create_stream_run(
audio_sexp=request.audio_sexp,
)
# Store run metadata in Redis
run_data = {
"run_id": run_id,
"status": "pending",
"recipe": "streaming",
"actor_id": ctx.actor_id,
"created_at": created_at,
"celery_task_id": task.id,
"output_name": request.output_name,
}
await redis.set(
f"{RUNS_KEY_PREFIX}{run_id}",
json.dumps(run_data),
ex=86400 * 7 # 7 days
# Store in database for durability
pending = await database.create_pending_run(
run_id=run_id,
celery_task_id=task.id,
recipe=recipe_id or "streaming", # Use recipe CID if available
inputs=[], # Streaming recipes don't have traditional inputs
actor_id=ctx.actor_id,
dag_json=request.recipe_sexp, # Store recipe content for viewing
output_name=request.output_name,
)
logger.info(f"Started stream run {run_id} with task {task.id}")
@@ -204,8 +241,8 @@ async def create_stream_run(
return RunStatus(
run_id=run_id,
status="pending",
recipe="streaming",
created_at=created_at,
recipe=recipe_id or "streaming",
created_at=pending.get("created_at"),
celery_task_id=task.id,
)
@@ -305,6 +342,32 @@ async def get_run(
except Exception as e:
logger.warning(f"Failed to load recipe for plan: {e}")
# Handle streaming runs - detect by recipe_sexp content or legacy "streaming" marker
recipe_sexp_content = run.get("recipe_sexp")
is_streaming = run.get("recipe") == "streaming" # Legacy marker
if not is_streaming and recipe_sexp_content:
# Check if content starts with (stream after skipping comments
for line in recipe_sexp_content.split('\n'):
stripped = line.strip()
if not stripped or stripped.startswith(';'):
continue
is_streaming = stripped.startswith('(stream')
break
if is_streaming and recipe_sexp_content and not plan:
plan_sexp = recipe_sexp_content
plan = {
"steps": [{
"id": "stream",
"type": "STREAM",
"name": "Streaming Recipe",
"inputs": [],
"config": {},
"status": "completed" if run.get("status") == "completed" else "pending",
}]
}
run["total_steps"] = 1
run["executed"] = 1 if run.get("status") == "completed" else 0
# Helper to convert simple type to MIME type prefix for template
def type_to_mime(simple_type: str) -> str:
if simple_type == "video":
@@ -564,10 +627,14 @@ async def run_detail(
"analysis": analysis,
}
# Extract plan_sexp for streaming runs
plan_sexp = plan.get("sexp") if plan else None
templates = get_templates(request)
return render(templates, "runs/detail.html", request,
run=run,
plan=plan,
plan_sexp=plan_sexp,
artifacts=artifacts,
analysis=analysis,
dag_elements=dag_elements,
@@ -824,3 +891,26 @@ async def publish_run(
return HTMLResponse(f'<span class="text-green-400">Shared: {ipfs_cid[:16]}...</span>')
return {"ipfs_cid": ipfs_cid, "output_cid": output_cid, "published": True}
@router.delete("/admin/purge-failed")
async def purge_failed_runs(
ctx: UserContext = Depends(require_auth),
):
"""Delete all failed runs from pending_runs table."""
import database
# Get all failed runs
failed_runs = await database.list_pending_runs(status="failed")
deleted = []
for run in failed_runs:
run_id = run.get("run_id")
try:
await database.delete_pending_run(run_id)
deleted.append(run_id)
except Exception as e:
logger.warning(f"Failed to delete run {run_id}: {e}")
logger.info(f"Purged {len(deleted)} failed runs")
return {"purged": len(deleted), "run_ids": deleted}