from ai_infra.callbacks import CallbackManagerManages multiple callback handlers. Dispatches events to all registered callbacks. Errors in callbacks are caught and logged, not propagated.
manager = CallbackManager([ LoggingCallbacks(), MetricsCallbacks(), ]) # Fire event manager.on_llm_start(LLMStartEvent(...)) # Or use context manager for timing with manager.llm_call("openai", "gpt-4o", messages) as ctx: response = await do_llm_call() ctx.set_response(response, tokens=150)