# Agno

Agno is an open-source framework designed for building, deploying, and orchestrating multi-agent systems with high performance. This guide covers tracing Agno applications through InteractiveAI for complete visibility into agent execution, tool usage, and model interactions.

### Prerequisites

* InteractiveAI account with API credentials
* LLM provider credentials (OpenAI, Ollama, or other supported provider)

***

### Installation

Install the required packages:

```bash
pip install agno openai interactiveai openlit yfinance ddgs
```

***

### Configuration

Set your API credentials as environment variables and initialize the InteractiveAI client to verify connectivity:

```python
INTERACTIVEAI_PUBLIC_KEY=pk-...
INTERACTIVEAI_SECRET_KEY=sk-...
```

```python
from interactiveai import Interactive
import os
from dotenv import load_dotenv

load_dotenv()

interactiveai = Interactive(
    secret_key=os.getenv("INTERACTIVEAI_SECRET_KEY"),
    public_key=os.getenv("INTERACTIVEAI_PUBLIC_KEY")
)

interactiveai.auth_check()
print("Connection established")
```

If authentication fails, an exception is raised.

***

### Instrumenting Agno with OpenLIT

OpenLIT provides automatic instrumentation for Agno agent. Initialize OpenLIT to instrument Agno agents to capture all agent activity:

```python
import os
import openlit
from interactiveai import Interactive
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.yfinance import YFinanceTools
from dotenv import load_dotenv

load_dotenv()

interactiveai = Interactive(
    secret_key=os.getenv("INTERACTIVEAI_SECRET_KEY"),
    public_key=os.getenv("INTERACTIVEAI_PUBLIC_KEY")
)

openlit.init(tracer=interactiveai._otel_tracer, disable_batch=True)

query = "Summarize analyst recommendations for NVIDIA."

with interactiveai.start_as_current_span(
    name="agno-yfinance-agent",
    input={"query": query},
) as span:
    agent = Agent(
        model=OpenAIChat(id="gpt-4o-mini"),
        tools=[YFinanceTools()],
        markdown=True,
        debug_mode=True,
    )

    result = agent.run(query)
    answer = getattr(result, "content", None) or str(result)

    # Optional: put a clean final output on the root span
    span.update(output={"answer": answer, "status": "completed"})

interactiveai.flush()

```

After execution, traces appear in the InteractiveAI dashboard showing the complete agent workflow: model calls, tool invocations, and response generation.

{% hint style="info" %}
Optional: `span.update(output=...)` copies the final answer onto the root span so the trace shows a clear “Input → Final Output” summary at the top level. If you remove it, the output will still exist in child spans, but the root span may only show metadata.
{% endhint %}

***

### Adding Context to Traces

Enrich traces with user identifiers, session data, and custom metadata using the SDK alongside OpenLIT instrumentation:

<pre class="language-python"><code class="lang-python">import os
import openlit
from interactiveai import Interactive
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.yfinance import YFinanceTools
from dotenv import load_dotenv

load_dotenv()

interactiveai = Interactive(
    secret_key=os.getenv("INTERACTIVEAI_SECRET_KEY"),
    public_key=os.getenv("INTERACTIVEAI_PUBLIC_KEY")
)

openlit.init(tracer=interactiveai._otel_tracer, disable_batch=True)

query = "Summarize analyst recommendations for NVIDIA"

with interactiveai.start_as_current_span(
    name="financial-analysis-agent",
    input={"query": query},
) as span:
<strong>    interactiveai.update_current_trace(
</strong><strong>        user_id="user_agno",
</strong><strong>        session_id="session_agno",
</strong><strong>        tags=["agno", "financial-analysis"],
</strong><strong>        metadata={"department": "research", "priority": "high"},
</strong><strong>    )
</strong>
    agent = Agent(
        model=OpenAIChat(id="gpt-4o-mini"),
        tools=[YFinanceTools()],
        instructions=["Present key points."],
        markdown=True,
    )

    result = agent.run(query)

    answer = getattr(result, "content", None) or str(result)
    span.update(output={"answer": answer, "status": "completed"})

interactiveai.flush()
</code></pre>

This approach combines automatic OpenLIT instrumentation with manual trace enrichment, providing both detailed agent telemetry and business context.

***

### Multi-Agent Workflows

For systems with multiple coordinating agents, each agent's activity is captured within the trace hierarchy:

```python
import os
import openlit
from interactiveai import Interactive
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.duckduckgo import DuckDuckGoTools
from agno.tools.yfinance import YFinanceTools
from dotenv import load_dotenv

load_dotenv()

interactiveai = Interactive(
    secret_key=os.getenv("INTERACTIVEAI_SECRET_KEY"),
    public_key=os.getenv("INTERACTIVEAI_PUBLIC_KEY")
)

openlit.init(tracer=interactiveai._otel_tracer, disable_batch=True)

research_query = "Recent news about Tesla"
analysis_query = "Tesla stock performance this quarter"

with interactiveai.start_as_current_span(
    name="multi-agent-research",
    input={"research_query": research_query, "analysis_query": analysis_query},
) as span:
    interactiveai.update_current_trace(
        metadata={
            "agents_used": ["Research Agent", "Analysis Agent"],
            "workflow": "parallel-research",
        }
    )

    research_agent = Agent(
        name="Research Agent",
        model=OpenAIChat(id="gpt-4o-mini"),
        tools=[DuckDuckGoTools()],
        instructions=["Gather current information on the topic."],
        markdown=True,
    )

    analysis_agent = Agent(
        name="Analysis Agent",
        model=OpenAIChat(id="gpt-4o-mini"),
        tools=[YFinanceTools()],
        instructions=["Provide quantitative analysis."],
        markdown=True,
    )

    research_result = research_agent.run(research_query)
    analysis_result = analysis_agent.run(analysis_query)

    research_answer = getattr(research_result, "content", None) or str(research_result)
    analysis_answer = getattr(analysis_result, "content", None) or str(analysis_result)

    # Put the final summaries on the root span
    span.update(
        output={
            "research_summary": research_answer,
            "analysis_summary": analysis_answer,
        }
    )

interactiveai.flush()

```

The resulting trace displays both agents' operations nested under the parent span, enabling analysis of coordination patterns and individual agent performance.
