Add durable pending runs and recipe list debugging
- Store pending runs in PostgreSQL for durability across restarts - Add recovery method for orphaned runs - Increase Celery result_expires to 7 days - Add task_reject_on_worker_lost for automatic re-queuing - Add logging to recipe list to debug filter issues Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -46,23 +46,30 @@ class RecipeService:
|
||||
|
||||
async def list_recipes(self, actor_id: str = None, offset: int = 0, limit: int = 20) -> list:
|
||||
"""
|
||||
List available recipes.
|
||||
List available recipes for a user.
|
||||
|
||||
L1 data is isolated per-user - only shows recipes owned by actor_id.
|
||||
|
||||
Note: This scans the cache for recipe files. For production,
|
||||
you might want a database index of recipes by owner.
|
||||
"""
|
||||
import logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Get all cached items and filter for recipes
|
||||
# This is a simplified implementation - production would use a proper index
|
||||
recipes = []
|
||||
|
||||
# Check if cache has a list method for recipes
|
||||
if hasattr(self.cache, 'list_by_type'):
|
||||
items = self.cache.list_by_type('recipe')
|
||||
logger.info(f"Found {len(items)} recipe items in cache")
|
||||
for content_hash in items:
|
||||
recipe = await self.get_recipe(content_hash)
|
||||
if recipe:
|
||||
# Filter by actor if specified
|
||||
if actor_id is None or recipe.get("uploader") == actor_id:
|
||||
uploader = recipe.get("uploader")
|
||||
logger.info(f"Recipe {content_hash[:12]}: uploader={uploader}, actor_id={actor_id}")
|
||||
# Filter by actor - L1 is per-user
|
||||
if actor_id is None or uploader == actor_id:
|
||||
recipes.append(recipe)
|
||||
|
||||
# Sort by name
|
||||
|
||||
Reference in New Issue
Block a user