LlamaIndex
The LlamaIndex provider transforms Devcaster tools into LlamaIndex's FunctionTool format with built-in execution.
Install
pip install devcaster devcaster_llamaindex llama-index llama-index-llms-openainpm install @devcaster/core @devcaster/llamaindex @llamaindex/openai @llamaindex/workflowConfigure API Keys
Set DEVCASTER_API_KEY with your API key from Settings and OPENAI_API_KEY with your OpenAI API key.
DEVCASTER_API_KEY=xxxxxxxxx
OPENAI_API_KEY=xxxxxxxxxCreate session and run
import asyncio
from devcaster import Devcaster
from devcaster_llamaindex import LlamaIndexProvider
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
devcaster = Devcaster(provider=LlamaIndexProvider())
llm = OpenAI(model="gpt-5.2")
# Create a session for your user
session = devcaster.create(user_id="user_123")
tools = session.tools()
agent = FunctionAgent(tools=tools, llm=llm)
async def main():
result = await agent.run(
user_msg="Send an email to john@example.com with the subject 'Hello' and body 'Hello from Devcaster!'"
)
print(result)
asyncio.run(main())import { Devcaster } from '@devcaster/core';
import { LlamaindexProvider } from '@devcaster/llamaindex';
import { openai } from '@llamaindex/openai';
import { agent } from '@llamaindex/workflow';
const devcaster = new Devcaster({
provider: new LlamaindexProvider(),
});
// Create a session for your user
const session = await devcaster.create("user_123");
const tools = await session.tools();
const myAgent = agent({
llm: openai({ model: 'gpt-5.2' }),
tools,
});
const result = await myAgent.run(
"Send an email to john@example.com with the subject 'Hello' and body 'Hello from Devcaster!'"
);
console.log(result.data.result);