- Store pending runs in PostgreSQL for durability across restarts - Add recovery method for orphaned runs - Increase Celery result_expires to 7 days - Add task_reject_on_worker_lost for automatic re-queuing - Add logging to recipe list to debug filter issues Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
37 lines
1.0 KiB
Python
37 lines
1.0 KiB
Python
"""
|
|
Art DAG Celery Application
|
|
|
|
Distributed rendering for the Art DAG system.
|
|
Uses the foundational artdag language from GitHub.
|
|
"""
|
|
|
|
import os
|
|
from celery import Celery
|
|
|
|
REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379/5')
|
|
|
|
app = Celery(
|
|
'art_celery',
|
|
broker=REDIS_URL,
|
|
backend=REDIS_URL,
|
|
include=['legacy_tasks', 'tasks', 'tasks.analyze', 'tasks.execute', 'tasks.orchestrate']
|
|
)
|
|
|
|
app.conf.update(
|
|
result_expires=86400 * 7, # 7 days - allow time for recovery after restarts
|
|
task_serializer='json',
|
|
accept_content=['json', 'pickle'], # pickle needed for internal Celery messages
|
|
result_serializer='json',
|
|
event_serializer='json',
|
|
timezone='UTC',
|
|
enable_utc=True,
|
|
task_track_started=True,
|
|
task_acks_late=True, # Don't ack until task completes - survives worker restart
|
|
worker_prefetch_multiplier=1,
|
|
task_reject_on_worker_lost=True, # Re-queue if worker dies
|
|
task_acks_on_failure_or_timeout=True, # Ack failed tasks so they don't retry forever
|
|
)
|
|
|
|
if __name__ == '__main__':
|
|
app.start()
|