Overview
Composo’s tracing module provides automatic instrumentation for LLM calls and manual tracking for multi-agent systems. Capture detailed interaction data to evaluate agent performance and debug complex workflows.
ComposoTracer
Initialize automatic instrumentation for LLM provider APIs.
init()
Configure tracing for one or more LLM providers.
from composo import ComposoTracer, Instruments
ComposoTracer.init(instruments=Instruments.OPENAI)
Parameters
instruments
Instruments | list[Instruments]
Single instrument or list of instruments to enable tracing for.Available Instruments:
Instruments.OPENAI: Trace OpenAI API calls
Instruments.ANTHROPIC: Trace Anthropic API calls
Instruments.GOOGLE_GENAI: Trace Google Gemini API calls
If None, initializes tracing without provider-specific instrumentation.
Examples
Single Provider
from composo import ComposoTracer, Instruments
from openai import OpenAI
# Initialize tracing for OpenAI
ComposoTracer.init(instruments=Instruments.OPENAI)
# All OpenAI calls are now automatically traced
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
Multiple Providers
from composo import ComposoTracer, Instruments
from openai import OpenAI
from anthropic import Anthropic
# Initialize tracing for multiple providers
ComposoTracer.init(instruments=[
Instruments.OPENAI,
Instruments.ANTHROPIC,
Instruments.GOOGLE_GENAI
])
# All providers are now traced
openai_client = OpenAI()
anthropic_client = Anthropic()
AgentTracer
Context manager for tracking agent interactions and organizing traces by agent.
Constructor
from composo import AgentTracer
with AgentTracer(name="my_agent", agent_id="agent-123") as tracer:
# Agent code here
pass
Parameters
Human-readable agent name. If not provided, generates a name like agent_abc123.
Unique identifier for the agent. If not provided, generates a UUID.
Usage as Context Manager
from composo import AgentTracer, ComposoTracer, Instruments
from openai import OpenAI
# Initialize tracing
ComposoTracer.init(instruments=Instruments.OPENAI)
client = OpenAI()
# Track agent interactions
with AgentTracer(name="research_agent") as tracer:
# All LLM calls within this context are associated with this agent
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Research quantum computing"}]
)
# Agent ID is available
print(f"Agent ID: {tracer.agent_id}")
Nested Agents
Track hierarchical agent systems with parent-child relationships:
from composo import AgentTracer
from openai import OpenAI
client = OpenAI()
with AgentTracer(name="orchestrator") as orchestrator:
# Parent agent
with AgentTracer(name="researcher") as researcher:
# Child agent
research = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Research topic"}]
)
with AgentTracer(name="summarizer") as summarizer:
# Another child agent
summary = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Summarize findings"}]
)
# Trace captures parent-child relationships
@agent_tracer Decorator
Decorator for automatically tracing agent functions.
from composo import agent_tracer
@agent_tracer(name="my_agent")
def my_agent_function(input_data):
# Function implementation
return result
Parameters
Agent name. If not provided, uses the function name.
Examples
Basic Usage
from composo import agent_tracer, ComposoTracer, Instruments
from openai import OpenAI
ComposoTracer.init(instruments=Instruments.OPENAI)
client = OpenAI()
@agent_tracer(name="helper_agent")
def process_query(query):
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": query}]
)
return response.choices[0].message.content
# Automatically traced
result = process_query("What is Python?")
Multi-Agent Workflow
from composo import agent_tracer, ComposoTracer, Instruments
from openai import OpenAI
ComposoTracer.init(instruments=Instruments.OPENAI)
client = OpenAI()
@agent_tracer(name="analyzer")
def analyze_data(data):
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Analyze: {data}"}]
)
return response.choices[0].message.content
@agent_tracer(name="validator")
def validate_analysis(analysis):
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Validate: {analysis}"}]
)
return response.choices[0].message.content
@agent_tracer(name="orchestrator")
def process_workflow(data):
# Nested agent calls are automatically tracked
analysis = analyze_data(data)
validation = validate_analysis(analysis)
return validation
# Entire workflow traced with agent hierarchy
result = process_workflow("my data")
Async Functions
import asyncio
from composo import agent_tracer, ComposoTracer, Instruments
from openai import AsyncOpenAI
ComposoTracer.init(instruments=Instruments.OPENAI)
client = AsyncOpenAI()
@agent_tracer(name="async_agent")
async def async_process(query):
response = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": query}]
)
return response.choices[0].message.content
# Async agent automatically traced
result = asyncio.run(async_process("What is async?"))
Complete Example: Multi-Agent System
from composo import (
Composo,
ComposoTracer,
Instruments,
agent_tracer
)
from openai import OpenAI
# Step 1: Initialize tracing
ComposoTracer.init(instruments=Instruments.OPENAI)
# Step 2: Create clients
openai_client = OpenAI()
composo_client = Composo()
# Step 3: Define agents
@agent_tracer(name="research_agent")
def research_agent(topic):
"""Research a given topic"""
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a research assistant."},
{"role": "user", "content": f"Research: {topic}"}
]
)
return response.choices[0].message.content
@agent_tracer(name="fact_checker")
def fact_check_agent(content):
"""Verify facts in content"""
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a fact checker."},
{"role": "user", "content": f"Verify these facts: {content}"}
]
)
return response.choices[0].message.content
@agent_tracer(name="summarizer")
def summarize_agent(content):
"""Summarize content"""
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a summarizer."},
{"role": "user", "content": f"Summarize: {content}"}
]
)
return response.choices[0].message.content
@agent_tracer(name="orchestrator")
def orchestrator(topic):
"""Orchestrate the multi-agent workflow"""
# Step 1: Research
research = research_agent(topic)
# Step 2: Fact check
verified = fact_check_agent(research)
# Step 3: Summarize
summary = summarize_agent(verified)
return summary
# Step 4: Run the workflow
result = orchestrator("Climate change impacts")
# Step 5: Evaluate the trace
# (Note: Trace evaluation requires exporting the trace data,
# which depends on your OpenTelemetry backend configuration)
print(f"Final result: {result}")
Instruments Enum
Available instrumentation providers:
Automatically trace OpenAI API calls (chat, completions, embeddings, etc.)
Automatically trace Anthropic API calls (Claude models)
Automatically trace Google Gemini API calls