Monorepo: consolidate 7 repos into one
All checks were successful
Build and Deploy / build-and-deploy (push) Successful in 1m5s
All checks were successful
Build and Deploy / build-and-deploy (push) Successful in 1m5s
Combines shared, blog, market, cart, events, federation, and account into a single repository. Eliminates submodule sync, sibling model copying at build time, and per-app CI orchestration. Changes: - Remove per-app .git, .gitmodules, .gitea, submodule shared/ dirs - Remove stale sibling model copies from each app - Update all 6 Dockerfiles for monorepo build context (root = .) - Add build directives to docker-compose.yml - Add single .gitea/workflows/ci.yml with change detection - Add .dockerignore for monorepo build context - Create __init__.py for federation and account (cross-app imports)
This commit is contained in:
9
shared/events/__init__.py
Normal file
9
shared/events/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .bus import emit_activity, register_activity_handler, get_activity_handlers
|
||||
from .processor import EventProcessor
|
||||
|
||||
__all__ = [
|
||||
"emit_activity",
|
||||
"register_activity_handler",
|
||||
"get_activity_handlers",
|
||||
"EventProcessor",
|
||||
]
|
||||
126
shared/events/bus.py
Normal file
126
shared/events/bus.py
Normal file
@@ -0,0 +1,126 @@
|
||||
"""
|
||||
Unified activity bus.
|
||||
|
||||
emit_activity() writes an APActivity row with process_state='pending' within
|
||||
the caller's existing DB transaction — atomic with the domain change.
|
||||
|
||||
register_activity_handler() registers async handler functions that the
|
||||
EventProcessor dispatches when processing pending activities.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
from typing import Awaitable, Callable, Dict, List, Tuple
|
||||
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.models.federation import APActivity
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Activity-handler registry
|
||||
# ---------------------------------------------------------------------------
|
||||
# Handler signature: async def handler(activity: APActivity, session: AsyncSession) -> None
|
||||
ActivityHandlerFn = Callable[[APActivity, AsyncSession], Awaitable[None]]
|
||||
|
||||
# Keyed by (activity_type, object_type). object_type="*" is wildcard.
|
||||
_activity_handlers: Dict[Tuple[str, str], List[ActivityHandlerFn]] = defaultdict(list)
|
||||
|
||||
|
||||
def register_activity_handler(
|
||||
activity_type: str,
|
||||
fn: ActivityHandlerFn,
|
||||
*,
|
||||
object_type: str | None = None,
|
||||
) -> None:
|
||||
"""Register an async handler for an activity type + optional object type.
|
||||
|
||||
Use ``activity_type="*"`` as a wildcard that fires for every activity
|
||||
(e.g. federation delivery handler).
|
||||
"""
|
||||
key = (activity_type, object_type or "*")
|
||||
_activity_handlers[key].append(fn)
|
||||
log.info("Registered activity handler %s.%s for key %s", fn.__module__, fn.__qualname__, key)
|
||||
|
||||
|
||||
def get_activity_handlers(
|
||||
activity_type: str,
|
||||
object_type: str | None = None,
|
||||
) -> List[ActivityHandlerFn]:
|
||||
"""Return all matching handlers for an activity.
|
||||
|
||||
Matches in order:
|
||||
1. Exact (activity_type, object_type)
|
||||
2. (activity_type, "*") — type-level wildcard
|
||||
3. ("*", "*") — global wildcard (e.g. delivery)
|
||||
"""
|
||||
handlers: List[ActivityHandlerFn] = []
|
||||
ot = object_type or "*"
|
||||
|
||||
# Exact match
|
||||
if ot != "*":
|
||||
handlers.extend(_activity_handlers.get((activity_type, ot), []))
|
||||
# Type-level wildcard
|
||||
handlers.extend(_activity_handlers.get((activity_type, "*"), []))
|
||||
# Global wildcard
|
||||
if activity_type != "*":
|
||||
handlers.extend(_activity_handlers.get(("*", "*"), []))
|
||||
|
||||
return handlers
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# emit_activity — the primary way to emit events
|
||||
# ---------------------------------------------------------------------------
|
||||
async def emit_activity(
|
||||
session: AsyncSession,
|
||||
*,
|
||||
activity_type: str,
|
||||
actor_uri: str,
|
||||
object_type: str,
|
||||
object_data: dict | None = None,
|
||||
source_type: str | None = None,
|
||||
source_id: int | None = None,
|
||||
visibility: str = "internal",
|
||||
actor_profile_id: int | None = None,
|
||||
origin_app: str | None = None,
|
||||
) -> APActivity:
|
||||
"""
|
||||
Write an AP-shaped activity to ap_activities with process_state='pending'.
|
||||
|
||||
Called inside a service function using the same session that performs the
|
||||
domain change. The activity and the change commit together.
|
||||
"""
|
||||
if not origin_app:
|
||||
try:
|
||||
from quart import current_app
|
||||
origin_app = current_app.name
|
||||
except (ImportError, RuntimeError):
|
||||
pass
|
||||
|
||||
activity_uri = f"internal:{uuid.uuid4()}" if visibility == "internal" else f"urn:uuid:{uuid.uuid4()}"
|
||||
|
||||
activity = APActivity(
|
||||
activity_id=activity_uri,
|
||||
activity_type=activity_type,
|
||||
actor_profile_id=actor_profile_id,
|
||||
actor_uri=actor_uri,
|
||||
object_type=object_type,
|
||||
object_data=object_data or {},
|
||||
is_local=True,
|
||||
source_type=source_type,
|
||||
source_id=source_id,
|
||||
visibility=visibility,
|
||||
process_state="pending",
|
||||
origin_app=origin_app,
|
||||
)
|
||||
session.add(activity)
|
||||
await session.flush()
|
||||
# Wake any listening EventProcessor as soon as this transaction commits.
|
||||
# NOTIFY is transactional — delivered only after commit.
|
||||
await session.execute(text("NOTIFY ap_activity_pending"))
|
||||
return activity
|
||||
10
shared/events/handlers/__init__.py
Normal file
10
shared/events/handlers/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""Shared event handlers."""
|
||||
|
||||
|
||||
def register_shared_handlers():
|
||||
"""Import handler modules to trigger registration. Call at app startup."""
|
||||
import shared.events.handlers.container_handlers # noqa: F401
|
||||
import shared.events.handlers.login_handlers # noqa: F401
|
||||
import shared.events.handlers.order_handlers # noqa: F401
|
||||
import shared.events.handlers.ap_delivery_handler # noqa: F401
|
||||
import shared.events.handlers.external_delivery_handler # noqa: F401
|
||||
250
shared/events/handlers/ap_delivery_handler.py
Normal file
250
shared/events/handlers/ap_delivery_handler.py
Normal file
@@ -0,0 +1,250 @@
|
||||
"""Deliver AP activities to remote followers.
|
||||
|
||||
Registered as a wildcard handler — fires for every activity. Skips
|
||||
non-public activities and those without an actor profile.
|
||||
|
||||
Per-app delivery: activities are delivered using the domain that matches
|
||||
the follower's subscription. A follower of ``@alice@blog.rose-ash.com``
|
||||
receives activities with ``actor: https://blog.rose-ash.com/users/alice``
|
||||
and signatures using that domain's key_id. Aggregate followers
|
||||
(``app_domain='federation'``) receive the federation domain identity.
|
||||
|
||||
Idempotent: successful deliveries are recorded in ap_delivery_log.
|
||||
On retry (at-least-once reaper), already-delivered inboxes are skipped.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
import httpx
|
||||
from sqlalchemy import select, or_
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.events.bus import register_activity_handler
|
||||
from shared.models.federation import ActorProfile, APActivity, APFollower, APDeliveryLog
|
||||
from shared.services.registry import services
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
AP_CONTENT_TYPE = "application/activity+json"
|
||||
DELIVERY_TIMEOUT = 15 # seconds per request
|
||||
|
||||
|
||||
def _domain_for_app(app_name: str) -> str:
|
||||
"""Resolve the public AP domain for an app name."""
|
||||
from shared.infrastructure.activitypub import _ap_domain
|
||||
return _ap_domain(app_name)
|
||||
|
||||
|
||||
def _build_activity_json(activity: APActivity, actor: ActorProfile, domain: str) -> dict:
|
||||
"""Build the full AP activity JSON-LD for delivery."""
|
||||
username = actor.preferred_username
|
||||
actor_url = f"https://{domain}/users/{username}"
|
||||
|
||||
obj = dict(activity.object_data or {})
|
||||
|
||||
# Rewrite all URLs from the federation domain to the delivery domain
|
||||
# so Mastodon's origin check passes (all IDs must match actor host).
|
||||
import re
|
||||
fed_domain = os.getenv("AP_DOMAIN", "federation.rose-ash.com")
|
||||
|
||||
def _rewrite(url: str) -> str:
|
||||
if isinstance(url, str) and fed_domain in url:
|
||||
return url.replace(f"https://{fed_domain}", f"https://{domain}")
|
||||
return url
|
||||
|
||||
activity_id = _rewrite(activity.activity_id)
|
||||
object_id = activity_id + "/object"
|
||||
|
||||
# Rewrite any federation-domain URLs in object_data
|
||||
if "id" in obj:
|
||||
obj["id"] = _rewrite(obj["id"])
|
||||
if "attributedTo" in obj:
|
||||
obj["attributedTo"] = _rewrite(obj["attributedTo"])
|
||||
|
||||
if activity.activity_type == "Delete":
|
||||
obj.setdefault("id", object_id)
|
||||
obj.setdefault("type", "Tombstone")
|
||||
else:
|
||||
obj.setdefault("id", object_id)
|
||||
obj.setdefault("type", activity.object_type)
|
||||
obj.setdefault("attributedTo", actor_url)
|
||||
obj.setdefault("published", activity.published.isoformat() if activity.published else None)
|
||||
obj.setdefault("to", ["https://www.w3.org/ns/activitystreams#Public"])
|
||||
obj.setdefault("cc", [f"{actor_url}/followers"])
|
||||
if activity.activity_type == "Update":
|
||||
from datetime import datetime, timezone
|
||||
obj["updated"] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
return {
|
||||
"@context": [
|
||||
"https://www.w3.org/ns/activitystreams",
|
||||
"https://w3id.org/security/v1",
|
||||
],
|
||||
"id": activity_id,
|
||||
"type": activity.activity_type,
|
||||
"actor": actor_url,
|
||||
"published": activity.published.isoformat() if activity.published else None,
|
||||
"to": ["https://www.w3.org/ns/activitystreams#Public"],
|
||||
"cc": [f"{actor_url}/followers"],
|
||||
"object": obj,
|
||||
}
|
||||
|
||||
|
||||
async def _deliver_to_inbox(
|
||||
client: httpx.AsyncClient,
|
||||
inbox_url: str,
|
||||
body: dict,
|
||||
actor: ActorProfile,
|
||||
domain: str,
|
||||
) -> int | None:
|
||||
"""POST signed activity to a single inbox. Returns status code or None on error."""
|
||||
from shared.utils.http_signatures import sign_request
|
||||
from urllib.parse import urlparse
|
||||
import json
|
||||
|
||||
body_bytes = json.dumps(body).encode()
|
||||
key_id = f"https://{domain}/users/{actor.preferred_username}#main-key"
|
||||
|
||||
parsed = urlparse(inbox_url)
|
||||
headers = sign_request(
|
||||
private_key_pem=actor.private_key_pem,
|
||||
key_id=key_id,
|
||||
method="POST",
|
||||
path=parsed.path,
|
||||
host=parsed.netloc,
|
||||
body=body_bytes,
|
||||
)
|
||||
headers["Content-Type"] = AP_CONTENT_TYPE
|
||||
|
||||
try:
|
||||
resp = await client.post(
|
||||
inbox_url,
|
||||
content=body_bytes,
|
||||
headers=headers,
|
||||
timeout=DELIVERY_TIMEOUT,
|
||||
)
|
||||
if resp.status_code < 300:
|
||||
log.info("Delivered to %s → %d", inbox_url, resp.status_code)
|
||||
else:
|
||||
log.warning("Delivery to %s → %d: %s", inbox_url, resp.status_code, resp.text[:200])
|
||||
return resp.status_code
|
||||
except Exception:
|
||||
log.exception("Delivery failed for %s", inbox_url)
|
||||
return None
|
||||
|
||||
|
||||
async def on_any_activity(activity: APActivity, session: AsyncSession) -> None:
|
||||
"""Deliver a public activity to all matching followers of its actor."""
|
||||
|
||||
# Only deliver public activities that have an actor profile
|
||||
if activity.visibility != "public":
|
||||
return
|
||||
if activity.actor_profile_id is None:
|
||||
return
|
||||
if not services.has("federation"):
|
||||
return
|
||||
|
||||
# Load actor with private key
|
||||
actor = (
|
||||
await session.execute(
|
||||
select(ActorProfile).where(ActorProfile.id == activity.actor_profile_id)
|
||||
)
|
||||
).scalar_one_or_none()
|
||||
if not actor or not actor.private_key_pem:
|
||||
log.warning("Actor not found or missing key for activity %s", activity.activity_id)
|
||||
return
|
||||
|
||||
# Load matching followers.
|
||||
# Aggregate followers (app_domain='federation') always get everything.
|
||||
# Per-app followers only get activities from their app.
|
||||
origin_app = activity.origin_app
|
||||
follower_filters = [APFollower.actor_profile_id == actor.id]
|
||||
|
||||
if origin_app and origin_app != "federation":
|
||||
follower_filters.append(
|
||||
or_(
|
||||
APFollower.app_domain == "federation",
|
||||
APFollower.app_domain == origin_app,
|
||||
)
|
||||
)
|
||||
|
||||
followers = (
|
||||
await session.execute(
|
||||
select(APFollower).where(*follower_filters)
|
||||
)
|
||||
).scalars().all()
|
||||
|
||||
if not followers:
|
||||
log.debug("No followers to deliver to for %s", activity.activity_id)
|
||||
return
|
||||
|
||||
# Check delivery log — skip (inbox, domain) pairs already delivered (idempotency)
|
||||
existing = (
|
||||
await session.execute(
|
||||
select(APDeliveryLog.inbox_url, APDeliveryLog.app_domain).where(
|
||||
APDeliveryLog.activity_id == activity.id,
|
||||
APDeliveryLog.status_code < 300,
|
||||
)
|
||||
)
|
||||
).all()
|
||||
already_delivered: set[tuple[str, str]] = {(r[0], r[1]) for r in existing}
|
||||
|
||||
# Collect all (inbox, app_domain) pairs to deliver to.
|
||||
# Each follower subscription gets its own delivery with the correct
|
||||
# actor identity, so followers of @user@blog and @user@federation
|
||||
# both see posts on their respective actor profiles.
|
||||
delivery_pairs: set[tuple[str, str]] = set()
|
||||
for f in followers:
|
||||
if not f.follower_inbox:
|
||||
continue
|
||||
app_dom = f.app_domain or "federation"
|
||||
pair = (f.follower_inbox, app_dom)
|
||||
if pair not in already_delivered:
|
||||
delivery_pairs.add(pair)
|
||||
|
||||
if not delivery_pairs:
|
||||
if already_delivered:
|
||||
log.info("All deliveries already done for %s", activity.activity_id)
|
||||
return
|
||||
|
||||
if already_delivered:
|
||||
log.info(
|
||||
"Skipping %d already-delivered, delivering to %d remaining",
|
||||
len(already_delivered), len(delivery_pairs),
|
||||
)
|
||||
|
||||
# Group by domain to reuse activity JSON per domain
|
||||
domain_inboxes: dict[str, list[str]] = defaultdict(list)
|
||||
for inbox_url, app_dom in delivery_pairs:
|
||||
domain_inboxes[app_dom].append(inbox_url)
|
||||
|
||||
log.info(
|
||||
"Delivering %s to %d target(s) for @%s across %d domain(s)",
|
||||
activity.activity_type, len(delivery_pairs),
|
||||
actor.preferred_username, len(domain_inboxes),
|
||||
)
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
for app_dom, inboxes in domain_inboxes.items():
|
||||
domain = _domain_for_app(app_dom)
|
||||
activity_json = _build_activity_json(activity, actor, domain)
|
||||
|
||||
for inbox_url in inboxes:
|
||||
status_code = await _deliver_to_inbox(
|
||||
client, inbox_url, activity_json, actor, domain
|
||||
)
|
||||
if status_code is not None and status_code < 300:
|
||||
session.add(APDeliveryLog(
|
||||
activity_id=activity.id,
|
||||
inbox_url=inbox_url,
|
||||
app_domain=app_dom,
|
||||
status_code=status_code,
|
||||
))
|
||||
await session.flush()
|
||||
|
||||
|
||||
# Wildcard: fires for every activity
|
||||
register_activity_handler("*", on_any_activity)
|
||||
19
shared/events/handlers/container_handlers.py
Normal file
19
shared/events/handlers/container_handlers.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.events import register_activity_handler
|
||||
from shared.models.federation import APActivity
|
||||
from shared.services.navigation import rebuild_navigation
|
||||
|
||||
|
||||
async def on_child_attached(activity: APActivity, session: AsyncSession) -> None:
|
||||
await rebuild_navigation(session)
|
||||
|
||||
|
||||
async def on_child_detached(activity: APActivity, session: AsyncSession) -> None:
|
||||
await rebuild_navigation(session)
|
||||
|
||||
|
||||
register_activity_handler("Add", on_child_attached, object_type="rose:ContainerRelation")
|
||||
register_activity_handler("Remove", on_child_detached, object_type="rose:ContainerRelation")
|
||||
101
shared/events/handlers/external_delivery_handler.py
Normal file
101
shared/events/handlers/external_delivery_handler.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Deliver activities to external service inboxes via signed HTTP POST.
|
||||
|
||||
External services (like artdag) that don't share the coop database receive
|
||||
activities via HTTP, authenticated with the same HTTP Signatures used for
|
||||
ActivityPub federation.
|
||||
|
||||
Config via env: EXTERNAL_INBOXES=name|url,name2|url2,...
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import httpx
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.events.bus import register_activity_handler
|
||||
from shared.models.federation import ActorProfile, APActivity
|
||||
from shared.utils.http_signatures import sign_request
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Activity types to deliver externally
|
||||
_DELIVERABLE_TYPES = {"rose:DeviceAuth"}
|
||||
|
||||
|
||||
def _get_external_inboxes() -> list[tuple[str, str]]:
|
||||
"""Parse EXTERNAL_INBOXES env var into [(name, url), ...]."""
|
||||
raw = os.environ.get("EXTERNAL_INBOXES", "")
|
||||
if not raw:
|
||||
return []
|
||||
result = []
|
||||
for entry in raw.split(","):
|
||||
entry = entry.strip()
|
||||
if "|" in entry:
|
||||
name, url = entry.split("|", 1)
|
||||
result.append((name.strip(), url.strip()))
|
||||
return result
|
||||
|
||||
|
||||
def _get_ap_domain() -> str:
|
||||
return os.environ.get("AP_DOMAIN", "federation.rose-ash.com")
|
||||
|
||||
|
||||
async def on_external_activity(activity: APActivity, session: AsyncSession) -> None:
|
||||
"""Deliver matching activities to configured external inboxes."""
|
||||
if activity.activity_type not in _DELIVERABLE_TYPES:
|
||||
return
|
||||
|
||||
inboxes = _get_external_inboxes()
|
||||
if not inboxes:
|
||||
return
|
||||
|
||||
# Get the first actor profile for signing
|
||||
actor = await session.scalar(select(ActorProfile).limit(1))
|
||||
if not actor:
|
||||
log.warning("No ActorProfile available for signing external deliveries")
|
||||
return
|
||||
|
||||
domain = _get_ap_domain()
|
||||
key_id = f"https://{domain}/users/{actor.preferred_username}#main-key"
|
||||
|
||||
payload = {
|
||||
"@context": "https://www.w3.org/ns/activitystreams",
|
||||
"type": activity.activity_type,
|
||||
"actor": activity.actor_uri,
|
||||
"object": activity.object_data,
|
||||
}
|
||||
if activity.published:
|
||||
payload["published"] = activity.published.isoformat()
|
||||
|
||||
body_bytes = json.dumps(payload).encode()
|
||||
|
||||
for name, inbox_url in inboxes:
|
||||
parsed = urlparse(inbox_url)
|
||||
headers = sign_request(
|
||||
private_key_pem=actor.private_key_pem,
|
||||
key_id=key_id,
|
||||
method="POST",
|
||||
path=parsed.path,
|
||||
host=parsed.netloc,
|
||||
body=body_bytes,
|
||||
)
|
||||
headers["Content-Type"] = "application/activity+json"
|
||||
try:
|
||||
async with httpx.AsyncClient(timeout=3) as client:
|
||||
resp = await client.post(inbox_url, content=body_bytes, headers=headers)
|
||||
log.info(
|
||||
"External delivery to %s: %d",
|
||||
name, resp.status_code,
|
||||
)
|
||||
except Exception:
|
||||
log.warning("External delivery to %s failed", name, exc_info=True)
|
||||
|
||||
|
||||
# Register for all deliverable types
|
||||
for _t in _DELIVERABLE_TYPES:
|
||||
register_activity_handler(_t, on_external_activity)
|
||||
23
shared/events/handlers/login_handlers.py
Normal file
23
shared/events/handlers/login_handlers.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.events import register_activity_handler
|
||||
from shared.models.federation import APActivity
|
||||
from shared.services.registry import services
|
||||
|
||||
|
||||
async def on_user_logged_in(activity: APActivity, session: AsyncSession) -> None:
|
||||
data = activity.object_data
|
||||
user_id = data["user_id"]
|
||||
session_id = data["session_id"]
|
||||
|
||||
if services.has("cart"):
|
||||
await services.cart.adopt_cart_for_user(session, user_id, session_id)
|
||||
|
||||
if services.has("calendar"):
|
||||
await services.calendar.adopt_entries_for_user(session, user_id, session_id)
|
||||
await services.calendar.adopt_tickets_for_user(session, user_id, session_id)
|
||||
|
||||
|
||||
register_activity_handler("rose:Login", on_user_logged_in)
|
||||
22
shared/events/handlers/order_handlers.py
Normal file
22
shared/events/handlers/order_handlers.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.events import register_activity_handler
|
||||
from shared.models.federation import APActivity
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def on_order_created(activity: APActivity, session: AsyncSession) -> None:
|
||||
log.info("order.created: order_id=%s", activity.object_data.get("order_id"))
|
||||
|
||||
|
||||
async def on_order_paid(activity: APActivity, session: AsyncSession) -> None:
|
||||
log.info("order.paid: order_id=%s", activity.object_data.get("order_id"))
|
||||
|
||||
|
||||
register_activity_handler("Create", on_order_created, object_type="rose:Order")
|
||||
register_activity_handler("rose:OrderPaid", on_order_paid)
|
||||
243
shared/events/processor.py
Normal file
243
shared/events/processor.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""
|
||||
Event processor — polls the ap_activities table and dispatches to registered
|
||||
activity handlers.
|
||||
|
||||
Runs as an asyncio background task within each app process.
|
||||
Uses SELECT ... FOR UPDATE SKIP LOCKED for safe concurrent processing.
|
||||
|
||||
A dedicated asyncpg LISTEN connection wakes the poll loop immediately when
|
||||
emit_activity() fires NOTIFY ap_activity_pending, so latency drops from
|
||||
~2 seconds (poll interval) to sub-100 ms. The fixed-interval poll remains
|
||||
as a safety-net fallback.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import traceback
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import asyncpg
|
||||
from sqlalchemy import select, update
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from shared.db.session import get_session, DATABASE_URL
|
||||
from shared.models.federation import APActivity
|
||||
from .bus import get_activity_handlers
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventProcessor:
|
||||
"""Background event processor that polls the ap_activities table."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
app_name: str | None = None,
|
||||
poll_interval: float = 2.0,
|
||||
batch_size: int = 10,
|
||||
stuck_timeout: float = 300.0,
|
||||
):
|
||||
self._app_name = app_name
|
||||
self._poll_interval = poll_interval
|
||||
self._batch_size = batch_size
|
||||
self._stuck_timeout = stuck_timeout # seconds before "processing" → "pending"
|
||||
self._task: asyncio.Task | None = None
|
||||
self._listen_task: asyncio.Task | None = None
|
||||
self._listen_conn: asyncpg.Connection | None = None
|
||||
self._wake = asyncio.Event()
|
||||
self._running = False
|
||||
self._reap_counter = 0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the background polling loop."""
|
||||
if self._task is not None:
|
||||
return
|
||||
self._running = True
|
||||
self._listen_task = asyncio.create_task(self._listen_for_notify())
|
||||
self._task = asyncio.create_task(self._poll_loop())
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the background polling loop gracefully."""
|
||||
self._running = False
|
||||
if self._listen_task is not None:
|
||||
self._listen_task.cancel()
|
||||
try:
|
||||
await self._listen_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._listen_task = None
|
||||
if self._listen_conn is not None and not self._listen_conn.is_closed():
|
||||
await self._listen_conn.close()
|
||||
self._listen_conn = None
|
||||
if self._task is not None:
|
||||
self._task.cancel()
|
||||
try:
|
||||
await self._task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._task = None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# LISTEN — wake poll loop on NOTIFY
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def _listen_for_notify(self) -> None:
|
||||
"""Maintain a LISTEN connection and wake the poll loop on NOTIFY."""
|
||||
dsn = DATABASE_URL.replace("+asyncpg", "")
|
||||
while self._running:
|
||||
try:
|
||||
self._listen_conn = await asyncpg.connect(dsn)
|
||||
await self._listen_conn.add_listener(
|
||||
"ap_activity_pending", self._on_notify
|
||||
)
|
||||
log.info("LISTEN ap_activity_pending active")
|
||||
# Keep alive with periodic health check
|
||||
while self._running:
|
||||
await asyncio.sleep(30)
|
||||
await self._listen_conn.execute("SELECT 1")
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception:
|
||||
log.warning("LISTEN connection lost, reconnecting…", exc_info=True)
|
||||
await asyncio.sleep(2)
|
||||
finally:
|
||||
if self._listen_conn is not None and not self._listen_conn.is_closed():
|
||||
await self._listen_conn.close()
|
||||
self._listen_conn = None
|
||||
|
||||
def _on_notify(self, conn, pid, channel, payload) -> None:
|
||||
"""Called by asyncpg when a NOTIFY arrives."""
|
||||
self._wake.set()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Poll loop
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def _poll_loop(self) -> None:
|
||||
while self._running:
|
||||
try:
|
||||
# Periodically recover stuck activities (~every 30 cycles)
|
||||
self._reap_counter += 1
|
||||
if self._reap_counter >= 30:
|
||||
self._reap_counter = 0
|
||||
await self._recover_stuck()
|
||||
|
||||
# Clear before processing so any NOTIFY that arrives during
|
||||
# _process_batch sets the event and we loop immediately.
|
||||
self._wake.clear()
|
||||
processed = await self._process_batch()
|
||||
if processed == 0:
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
self._wake.wait(), timeout=self._poll_interval
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
# processed > 0 → loop immediately to drain the queue
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
await asyncio.sleep(self._poll_interval)
|
||||
|
||||
async def _recover_stuck(self) -> None:
|
||||
"""Reset activities stuck in 'processing' back to 'pending'.
|
||||
|
||||
This handles the case where a process crashed mid-handler.
|
||||
Combined with idempotent handlers, this gives at-least-once delivery.
|
||||
"""
|
||||
cutoff = datetime.now(timezone.utc) - timedelta(seconds=self._stuck_timeout)
|
||||
try:
|
||||
async with get_session() as session:
|
||||
filters = [
|
||||
APActivity.process_state == "processing",
|
||||
APActivity.created_at < cutoff,
|
||||
]
|
||||
if self._app_name:
|
||||
filters.append(APActivity.origin_app == self._app_name)
|
||||
result = await session.execute(
|
||||
update(APActivity)
|
||||
.where(*filters)
|
||||
.values(process_state="pending")
|
||||
.returning(APActivity.id)
|
||||
)
|
||||
recovered = result.scalars().all()
|
||||
await session.commit()
|
||||
if recovered:
|
||||
log.warning(
|
||||
"Recovered %d stuck activities: %s",
|
||||
len(recovered), recovered,
|
||||
)
|
||||
except Exception:
|
||||
log.exception("Failed to recover stuck activities")
|
||||
|
||||
async def _process_batch(self) -> int:
|
||||
"""Fetch and process a batch of pending activities. Returns count processed."""
|
||||
processed = 0
|
||||
async with get_session() as session:
|
||||
filters = [
|
||||
APActivity.process_state == "pending",
|
||||
APActivity.process_attempts < APActivity.process_max_attempts,
|
||||
]
|
||||
if self._app_name:
|
||||
filters.append(APActivity.origin_app == self._app_name)
|
||||
stmt = (
|
||||
select(APActivity)
|
||||
.where(*filters)
|
||||
.order_by(APActivity.created_at)
|
||||
.limit(self._batch_size)
|
||||
.with_for_update(skip_locked=True)
|
||||
)
|
||||
result = await session.execute(stmt)
|
||||
activities = result.scalars().all()
|
||||
|
||||
for activity in activities:
|
||||
await self._process_one(session, activity)
|
||||
processed += 1
|
||||
|
||||
await session.commit()
|
||||
return processed
|
||||
|
||||
async def _process_one(self, session: AsyncSession, activity: APActivity) -> None:
|
||||
"""Run all handlers for a single activity."""
|
||||
handlers = get_activity_handlers(activity.activity_type, activity.object_type)
|
||||
now = datetime.now(timezone.utc)
|
||||
|
||||
log.info(
|
||||
"Processing activity %s: type=%s object_type=%s visibility=%s actor_profile_id=%s — %d handler(s) found",
|
||||
activity.id, activity.activity_type, activity.object_type,
|
||||
activity.visibility, activity.actor_profile_id, len(handlers),
|
||||
)
|
||||
for h in handlers:
|
||||
log.info(" handler: %s.%s", h.__module__, h.__qualname__)
|
||||
|
||||
activity.process_state = "processing"
|
||||
activity.process_attempts += 1
|
||||
await session.flush()
|
||||
|
||||
if not handlers:
|
||||
activity.process_state = "completed"
|
||||
activity.processed_at = now
|
||||
return
|
||||
|
||||
try:
|
||||
for handler in handlers:
|
||||
log.info(" calling %s.%s …", handler.__module__, handler.__qualname__)
|
||||
await handler(activity, session)
|
||||
log.info(" done %s.%s", handler.__module__, handler.__qualname__)
|
||||
activity.process_state = "completed"
|
||||
activity.processed_at = now
|
||||
except Exception as exc:
|
||||
log.exception("Handler failed for activity %s", activity.id)
|
||||
activity.process_error = f"{exc.__class__.__name__}: {exc}"
|
||||
if activity.process_attempts >= activity.process_max_attempts:
|
||||
activity.process_state = "failed"
|
||||
activity.processed_at = now
|
||||
else:
|
||||
activity.process_state = "pending" # retry
|
||||
Reference in New Issue
Block a user