OpenAI
The OpenAI Provider is the default provider for the Devcaster SDK. It transforms Devcaster tools into a format compatible with OpenAI's function calling capabilities through both the Responses and Chat Completion APIs.
Looking for the OpenAI Agents SDK? See the OpenAI Agents SDK provider page.
Install
pip install devcaster devcaster_openai openainpm install @devcaster/core @devcaster/openai openaiConfigure API Keys
Set DEVCASTER_API_KEY with your API key from Settings and OPENAI_API_KEY with your OpenAI API key.
DEVCASTER_API_KEY=xxxxxxxxx
OPENAI_API_KEY=xxxxxxxxxCreate session and run
The Responses API is the recommended way to build agentic flows with OpenAI.
import json
from openai import OpenAI
from devcaster import Devcaster
from devcaster_openai import OpenAIResponsesProvider
devcaster = Devcaster(provider=OpenAIResponsesProvider())
client = OpenAI()
# Create a session for your user
session = devcaster.create(user_id="user_123")
tools = session.tools()
response = client.responses.create(
model="gpt-5.2",
tools=tools,
input=[
{
"role": "user",
"content": "Send an email to john@example.com with the subject 'Hello' and body 'Hello from Devcaster!'"
}
]
)
# Agentic loop — keep executing tool calls until the model responds with text
while True:
tool_calls = [o for o in response.output if o.type == "function_call"]
if not tool_calls:
break
results = devcaster.provider.handle_tool_calls(response=response, user_id="user_123")
response = client.responses.create(
model="gpt-5.2",
tools=tools,
previous_response_id=response.id,
input=[
{"type": "function_call_output", "call_id": tool_calls[i].call_id, "output": json.dumps(result)}
for i, result in enumerate(results)
]
)
# Print final response
for item in response.output:
if item.type == "message":
print(item.content[0].text)import OpenAI from 'openai';
import { Devcaster } from '@devcaster/core';
import { OpenAIResponsesProvider } from '@devcaster/openai';
const devcaster = new Devcaster({
provider: new OpenAIResponsesProvider(),
});
const client = new OpenAI();
// Create a session for your user
const session = await devcaster.create("user_123");
const tools = await session.tools();
let response = await client.responses.create({
model: "gpt-5.2",
tools: tools,
input: [
{
role: "user",
content: "Send an email to john@example.com with the subject 'Hello' and body 'Hello from Devcaster!'"
},
],
});
// Agentic loop — keep executing tool calls until the model responds with text
while (true) {
const toolCalls = response.output.filter((o) => o.type === "function_call");
if (toolCalls.length === 0) break;
const results = await devcaster.provider.handleToolCalls("user_123", response.output);
response = await client.responses.create({
model: "gpt-5.2",
tools: tools,
previous_response_id: response.id,
input: results.map((result, i) => ({
type: "function_call_output" as const,
call_id: toolCalls[i].call_id,
output: JSON.stringify(result),
})),
});
}
// Print final response
for (const item of response.output) {
if (item.type === "message") {
const block = item.content[0];
if (block.type === "output_text") {
console.log(block.text);
}
}
}