Why: In serverless, the first request (“cold start”) initializes everything from scratch. Subsequent requests (“warm starts”) reuse the same container. Lazy initialization takes advantage of this - initialize the tracer once, reuse it across warm requests.
from honeyhive import HoneyHiveTracer, traceimport osfrom typing import Optional_tracer: Optional[HoneyHiveTracer] = None # Survives warm startsdef get_tracer() -> HoneyHiveTracer: global _tracer if _tracer is None: _tracer = HoneyHiveTracer.init( api_key=os.getenv("HH_API_KEY"), project=os.getenv("HH_PROJECT"), source="lambda", disable_batch=True, # Recommended for serverless ) return _tracerdef lambda_handler(event, context): tracer = get_tracer() result = process_event(event) tracer.enrich_session( outputs={"result": result}, metadata={"request_id": context.aws_request_id} ) tracer.flush() # No-op with disable_batch=True, but harmless safety net return result@trace()def process_event(event): get_tracer().enrich_span(metadata={"event_type": event.get("type")}) return {"status": "success"}
Alternative: LRU cache achieves the same lazy initialization:
from functools import lru_cache@lru_cache(maxsize=1)def get_tracer(): return HoneyHiveTracer.init( api_key=os.getenv("HH_API_KEY"), project=os.getenv("HH_PROJECT"), disable_batch=True, # Recommended for serverless )
Why: Web servers handle many concurrent requests. Initialize the tracer once when the app starts, then create a new session per request using create_session() (sync) or acreate_session() (async) so each request gets isolated traces.
For multi-turn conversations, custom session IDs, and scoped sessions, see Tracer Initialization.