- Update save_run_cache to also update actor_id, recipe, inputs on conflict - Add logging for actor_id when saving runs to run_cache - Add admin endpoint DELETE /runs/admin/purge-failed to delete all failed runs Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
37 lines
1.0 KiB
Python
37 lines
1.0 KiB
Python
"""
|
|
Art DAG Celery Application
|
|
|
|
Streaming video rendering for the Art DAG system.
|
|
Uses S-expression recipes with frame-by-frame processing.
|
|
"""
|
|
|
|
import os
|
|
from celery import Celery
|
|
|
|
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379/5')
|
|
|
|
app = Celery(
|
|
'art_celery',
|
|
broker=REDIS_URL,
|
|
backend=REDIS_URL,
|
|
include=['tasks', 'tasks.streaming', 'tasks.ipfs_upload']
|
|
)
|
|
|
|
app.conf.update(
|
|
result_expires=86400 * 7, # 7 days - allow time for recovery after restarts
|
|
task_serializer='json',
|
|
accept_content=['json', 'pickle'], # pickle needed for internal Celery messages
|
|
result_serializer='json',
|
|
event_serializer='json',
|
|
timezone='UTC',
|
|
enable_utc=True,
|
|
task_track_started=True,
|
|
task_acks_late=True, # Don't ack until task completes - survives worker restart
|
|
worker_prefetch_multiplier=1,
|
|
task_reject_on_worker_lost=True, # Re-queue if worker dies
|
|
task_acks_on_failure_or_timeout=True, # Ack failed tasks so they don't retry forever
|
|
)
|
|
|
|
if __name__ == '__main__':
|
|
app.start()
|