Complete, ready-to-run examples demonstrating agentic workflows for common use cases.
Get weather information for multiple cities with automatic multi-step execution.
import os
import requests
# Set your API key
API_KEY = os.environ['FREDDY_API_KEY']
ORG_ID = "your_org_id"
# Define tools
tools = [
{
"type": "function",
"name": "get_cities",
"description": "Get a list of 5 major cities for a specific geographic region",
"parameters": {
"type": "object",
"properties": {
"region": {
"type": "string",
"enum": ["asia", "europe", "americas", "africa", "oceania"],
"description": "Geographic region to get cities from"
}
},
"required": ["region"]
}
},
{
"type": "function",
"name": "get_temperature",
"description": "Get current temperature in Celsius for a specific city",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "City name (e.g., 'Paris', 'Tokyo')"
}
},
"required": ["city"]
}
}
]
# Single API call - backend handles everything!
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
},
json={
"organizationId": ORG_ID,
"model": "gpt-5",
"inputs": [
{
"role": "user",
"texts": [{
"text": "Get a comprehensive weather report for major European cities"
}]
}
],
"functions": tools,
"parallelToolCalls": False,
"maxToolCalls": 10
}
)
result = response.json()
print("="*80)
print("WEATHER REPORT")
print("="*80)
print(result['response'])
print("\n" + "="*80)
print(f"Iterations: {result['metadata']['iterations']}")
print(f"Tool Calls: {result['metadata']['tool_calls']}")
print(f"Execution Time: {result['metadata']['execution_time_ms']}ms")const fetch = require('node-fetch');
const API_KEY = process.env.FREDDY_API_KEY;
const ORG_ID = 'your_org_id';
// Define tools
const tools = [
{
type: 'function',
name: 'get_cities',
description: 'Get a list of 5 major cities for a specific geographic region',
parameters: {
type: 'object',
properties: {
region: {
type: 'string',
enum: ['asia', 'europe', 'americas', 'africa', 'oceania'],
description: 'Geographic region to get cities from'
}
},
required: ['region']
}
},
{
type: 'function',
name: 'get_temperature',
description: 'Get current temperature in Celsius for a specific city',
parameters: {
type: 'object',
properties: {
city: {
type: 'string',
description: 'City name (e.g., "Paris", "Tokyo")'
}
},
required: ['city']
}
}
];
async function getWeatherReport() {
const response = await fetch('https://api.freddy.aitronos.com/v1/model/response', {
method: 'POST',
headers: {
'Authorization': `Bearer ${API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
organizationId: ORG_ID,
model: 'gpt-5',
inputs: [
{
role: 'user',
texts: [{
text: 'Get a comprehensive weather report for major European cities'
}]
}
],
functions: tools,
parallelToolCalls: false,
maxToolCalls: 10
})
});
const result = await response.json();
console.log('='.repeat(80));
console.log('WEATHER REPORT');
console.log('='.repeat(80));
console.log(result.response);
console.log('\n' + '='.repeat(80));
console.log(`Iterations: ${result.metadata.iterations}`);
console.log(`Tool Calls: ${result.metadata.tool_calls}`);
console.log(`Execution Time: ${result.metadata.execution_time_ms}ms`);
}
getWeatherReport();Compare weather across different regions using parallel execution (Claude).
import requests
import os
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {os.environ['FREDDY_API_KEY']}"},
json={
"organizationId": "your_org_id",
"model": "claude-sonnet-4-20250514", # Parallel execution
"inputs": [
{
"role": "user",
"texts": [{
"text": "Compare the weather in Asia, Europe, and Americas. Get the temperature for all major cities and create a regional comparison report."
}]
}
],
"functions": [
{
"type": "function",
"name": "get_cities",
"description": "Get 5 cities for a region",
"parameters": {
"type": "object",
"properties": {
"region": {
"type": "string",
"enum": ["asia", "europe", "americas"]
}
},
"required": ["region"]
}
},
{
"type": "function",
"name": "get_temperature",
"description": "Get temperature for a city",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string"}
},
"required": ["city"]
}
}
],
"parallelToolCalls": True, # Claude can handle parallel calls!
"maxToolCalls": 20
}
)
result = response.json()
print(result['response'])
# Claude efficiently handles:
# 1. Call get_cities for all 3 regions (parallel)
# 2. Call get_temperature for all 15 cities (parallel batches)
# 3. Generate comprehensive comparisonCombine web search with data processing.
import requests
import os
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {os.environ['FREDDY_API_KEY']}"},
json={
"organizationId": "your_org_id",
"model": "gpt-4o",
"inputs": [
{
"role": "user",
"texts": [{
"text": "Research the latest AI developments in 2025 and create a summary with key trends"
}]
}
],
"tools": [
{
"type": "webSearch", # Built-in tool!
"searchContextSize": "high"
}
],
"maxToolCalls": 5
}
)
result = response.json()
print(result['response'])
# Backend automatically:
# 1. Performs web search for "AI developments 2025"
# 2. Analyzes search results
# 3. May perform additional targeted searches
# 4. Synthesizes findings into summaryAnalyze data and create visualizations.
import requests
import os
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {os.environ['FREDDY_API_KEY']}"},
json={
"organizationId": "your_org_id",
"model": "gpt-4o",
"inputs": [
{
"role": "user",
"texts": [{
"text": "Analyze the sales data in sales.csv and create a chart showing monthly revenue trends"
}],
"files": [{"fileId": "file_sales_csv_123"}]
}
],
"tools": [
{
"type": "codeInterpreter",
"container": {
"type": "auto",
"fileIds": ["file_sales_csv_123"]
}
}
],
"include": ["code_interpreter.outputs"] # Get generated files
}
)
result = response.json()
print(result['response'])
# Access generated chart
for output in result.get('outputs', []):
if output['type'] == 'image':
print(f"Chart URL: {output['imageUrl']}")
# Backend automatically:
# 1. Loads sales.csv into Python environment
# 2. Analyzes data with pandas
# 3. Creates visualization with matplotlib
# 4. Generates insights reportExecute different actions based on conditions.
import requests
import os
def check_and_reorder():
"""Check inventory and reorder if stock is low"""
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {os.environ['FREDDY_API_KEY']}"},
json={
"organizationId": "your_org_id",
"model": "gpt-5",
"inputs": [
{
"role": "user",
"texts": [{
"text": "Check inventory for product WIDGET-100. If stock is below 10 units, automatically place a reorder for 50 units."
}]
}
],
"functions": [
{
"type": "function",
"name": "check_inventory",
"description": "Check current stock level for a product",
"parameters": {
"type": "object",
"properties": {
"product_id": {"type": "string"}
},
"required": ["product_id"]
}
},
{
"type": "function",
"name": "place_reorder",
"description": "Place a reorder for a product",
"parameters": {
"type": "object",
"properties": {
"product_id": {"type": "string"},
"quantity": {"type": "integer"}
},
"required": ["product_id", "quantity"]
}
}
],
"parallelToolCalls": False
}
)
result = response.json()
print(result['response'])
# Model automatically:
# 1. Calls check_inventory("WIDGET-100")
# 2. Sees stock is 5 (below threshold)
# 3. Calls place_reorder("WIDGET-100", 50)
# 4. Confirms action taken
check_and_reorder()Continue a workflow across multiple requests using threads.
import requests
import os
API_KEY = os.environ['FREDDY_API_KEY']
ORG_ID = "your_org_id"
THREAD_ID = "thread_persistent_123"
# First request
response1 = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {API_KEY}"},
json={
"organizationId": ORG_ID,
"model": "gpt-5",
"threadId": THREAD_ID, # Create thread
"inputs": [
{
"role": "user",
"texts": [{"text": "Get weather for European cities"}]
}
],
"functions": [
{"type": "function", "name": "get_cities", "parameters": {...}},
{"type": "function", "name": "get_temperature", "parameters": {...}}
]
}
)
print("First Response:")
print(response1.json()['response'])
# Follow-up request - uses same thread!
response2 = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {API_KEY}"},
json={
"organizationId": ORG_ID,
"model": "gpt-5",
"threadId": THREAD_ID, # Same thread
"inputs": [
{
"role": "user",
"texts": [{"text": "Now compare those with Asian cities"}]
}
],
"functions": [
{"type": "function", "name": "get_cities", "parameters": {...}},
{"type": "function", "name": "get_temperature", "parameters": {...}}
]
}
)
print("\nFollow-up Response:")
print(response2.json()['response'])
# Model remembers:
# - Previous conversation
# - European cities and temperatures
# - Can make comparisonsGracefully handle tool failures.
import requests
import os
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {os.environ['FREDDY_API_KEY']}"},
json={
"organizationId": "your_org_id",
"model": "gpt-5",
"inputs": [
{
"role": "user",
"texts": [{"text": "Get weather data from our weather service"}]
}
],
"functions": [
{
"type": "function",
"name": "get_weather_primary",
"description": "Primary weather service (may be unavailable)",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string"}
},
"required": ["city"]
}
},
{
"type": "function",
"name": "get_weather_backup",
"description": "Backup weather service (always available)",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string"}
},
"required": ["city"]
}
}
]
}
)
result = response.json()
print(result['response'])
# Model automatically:
# 1. Tries get_weather_primary()
# 2. Receives error response
# 3. Switches to get_weather_backup()
# 4. Returns results successfully
#
# No manual error handling needed!See progress as the workflow executes (advanced).
import requests
import os
import json
response = requests.post(
"https://api.freddy.aitronos.com/v1/model/response",
headers={"Authorization": f"Bearer {os.environ['FREDDY_API_KEY']}"},
json={
"organizationId": "your_org_id",
"model": "gpt-5",
"stream": True, # Enable streaming
"inputs": [
{
"role": "user",
"texts": [{"text": "Get weather for European cities"}]
}
],
"functions": [
{"type": "function", "name": "get_cities", "parameters": {...}},
{"type": "function", "name": "get_temperature", "parameters": {...}}
]
},
stream=True
)
print("Streaming workflow progress:\n")
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
data = json.loads(line[6:])
# Tool call event
if data.get('type') == 'tool_call.started':
print(f"🔧 Calling {data['tool']['name']}...")
# Tool result event
elif data.get('type') == 'tool_call.completed':
print(f"✅ {data['tool']['name']} completed")
# Text delta
elif data.get('type') == 'response.text.delta':
print(data['delta'], end='', flush=True)
# Final completion
elif data.get('type') == 'response.completed':
print("\n\n✅ Workflow completed!"){
"maxToolCalls": 15, # Prevent runaway execution
"parallelToolCalls": False # Sequential for dependent tasks
}{
"name": "get_customer_orders",
"description": "Retrieve all orders for a specific customer. Returns order ID, date, total, and status for each order. Use this when you need to check order history or find specific purchases."
}{
"tools": [
{"type": "webSearch"}, # Built-in: Fast server-side
{
"type": "function", # Custom: Your business logic
"name": "save_to_crm",
"parameters": {...}
}
]
}{
"include": [
"function_calls.logs",
"request.logs",
"usage.detailed"
]
}{
"threadId": "thread_user_session_123", # Maintain context
"threadContextMode": "smart" # Intelligent history management
}# Get basic data then enrich with additional calls
{
"inputs": [{
"role": "user",
"texts": [{"text": "Get customer profile and enrich with order history and preferences"}]
}],
"functions": [
"get_customer_profile", # Returns basic info
"get_order_history", # Called with customer_id from profile
"get_preferences" # Called with customer_id from profile
]
}# Validate before taking action
{
"inputs": [{
"role": "user",
"texts": [{"text": "Process refund for order #12345 if it's eligible"}]
}],
"functions": [
"check_refund_eligibility", # Validation
"process_refund" # Action (only if eligible)
]
}# Search multiple sources and synthesize
{
"tools": [
{"type": "webSearch"},
{"type": "fileSearch", "vectorStoreIds": ["vs_docs"]}
],
"inputs": [{
"role": "user",
"texts": [{"text": "Find information about X from web and our docs, then summarize"}]
}]
}Symptoms: Model responds without calling tools
Solutions:
{
"toolChoice": "required", # Force tool usage
# Or be more specific in the prompt:
"text": "Use the get_weather tool to check the temperature in Paris"
}Symptoms: Workflow takes too long, excessive iterations
Solutions:
{
"maxToolCalls": 5, # Lower limit
# Or improve tool descriptions to be more specific
}Symptoms: Response doesn't include expected information
Solutions:
{
"parallelToolCalls": False, # Ensure sequential processing
# Or add more explicit instructions:
"text": "Make sure to check temperature for ALL cities before responding"
}- Function Calling Guide - Comprehensive reference
- Agentic Workflows Guide - Deep dive
- API Reference - Complete API documentation
- System Tools - Built-in tool capabilities