Practical implementation examples for common use cases with the Freddy API.
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
result = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
inputs=[{"role": "user", "content": "Explain quantum computing in simple terms."}],
)
print(result.response[0]["text"])Use thread IDs to maintain conversation history across requests:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
# Start a thread
first = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
inputs=[{"role": "user", "content": "I'm building a Python web app."}],
)
thread_id = first.thread_id
# Continue the conversation
followup = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
thread_id=thread_id,
inputs=[{"role": "user", "content": "What framework should I use?"}],
)
print(followup.response[0]["text"])Assistants carry persistent instructions and model configuration:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
result = client.responses.create_response(
organization_id="org_your_org_id",
assistant_id="asst_your_assistant_id",
inputs=[{"role": "user", "content": "What is your return policy?"}],
)
print(result.response[0]["text"])For real-time output, set stream=True and iterate the SSE events:
import httpx
import json
import os
api_key = os.environ["FREDDY_API_KEY"]
with httpx.stream(
"POST",
"https://api.aitronos.com/v1/model/response",
headers={"X-API-Key": api_key, "Content-Type": "application/json"},
json={
"organization_id": "org_your_org_id",
"model": "gpt-4o",
"inputs": [{"role": "user", "content": "Tell me a short story."}],
"stream": True,
},
timeout=60,
) as response:
for line in response.iter_lines():
if line.startswith("data: "):
data = json.loads(line[6:])
if data.get("event") == "response.delta":
print(data.get("delta", ""), end="", flush=True)
elif data.get("event") == "response.completed":
breakEnable web search to ground responses in real-time information:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
result = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
inputs=[{"role": "user", "content": "What happened in tech news today?"}],
system_tools={"web_search": {"mode": "on"}},
include=["web_search.sources"],
)
print(result.response[0]["text"])Define custom tools the model can call:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get current weather for a city",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string", "description": "City name"},
},
"required": ["city"],
},
}
]
result = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
inputs=[{"role": "user", "content": "What's the weather in Berlin?"}],
tools=tools,
)
# Check if the model wants to call a function
if result.stop_reason == "tool_use":
for block in result.response:
if block.get("type") == "tool_use":
print(f"Calling: {block['name']} with {block['input']}")Upload a document and use it as context in a response:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
# Upload a file
with open("report.pdf", "rb") as f:
file_result = client.files.upload_file(
organization_id="org_your_org_id",
file=("report.pdf", f, "application/pdf"),
)
file_id = file_result.file_id
# Reference it in a response
result = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
inputs=[{
"role": "user",
"content": "Summarize the key findings from this report.",
"files": [{"file_id": file_id}],
}],
)
print(result.response[0]["text"])Store documents for semantic retrieval:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
# Create a vector store
vs = client.vector_stores.create_vector_store(
organization_id="org_your_org_id",
name="Product Documentation",
)
vs_id = vs.id
# Add a file (must be uploaded first)
client.vector_store_files.add_file_to_vector_store(
vector_store_id=vs_id,
file_id="file_abc123",
)
# Query via responses
result = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
vector_store_ids=[vs_id],
inputs=[{"role": "user", "content": "How do I reset my password?"}],
)
print(result.response[0]["text"])Force the model to return a JSON object:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
result = client.responses.create_response(
organization_id="org_your_org_id",
model="gpt-4o",
inputs=[{
"role": "user",
"content": "Extract: name, email, and company from this text: 'Alice Smith, alice@acme.com, Acme Corp'"
}],
output_mode="json",
)
import json
data = json.loads(result.response[0]["text"])
print(data) # {"name": "Alice Smith", "email": "alice@acme.com", "company": "Acme Corp"}Retrieve token usage for your organization:
from aitronos import Aitronos
client = Aitronos(api_key="your-api-key")
summary = client.analytics_usage.get_usage_summary(org_id="org_your_org_id")
print(f"Total synapse usage this month: {summary.total_synapses}")For complete, real-world application walkthroughs see:
- Customer Support Bot
- E-commerce Assistant
- Timesheet Extraction
- Weather Report Workflow
- Agentic Tool Calling
- Best Practices — Recommended patterns
- Authentication Guide — Secure API access
- Error Handling — Comprehensive error management
- API Reference — Full endpoint documentation