🔨 In Development — This section is still being developed and may change.
Learn how to structure inputs and understand outputs when working with model responses.
Freddy accepts flexible input formats ranging from simple text strings to complex multi-modal arrays with roles, images, audio, and files. The API automatically handles conversation context, content type detection, and proper formatting.
Inputs are always provided as an array of structured items:
{
"model": "gpt-4.1",
"inputs": [
{
"role": "system",
"texts": [{ "text": "You are a helpful geography expert" }]
},
{
"role": "user",
"texts": [{ "text": "What is the capital of France?" }]
}
]
}High-priority instructions that guide the model's behavior throughout the conversation.
When to use:
- Set behavior guidelines
- Define expertise or persona
- Provide important constraints
- Give formatting instructions
Example:
{
"role": "system",
"texts": [{
"text": "You are a Python expert. Always include code examples. Be concise."
}]
}End-user messages, questions, and instructions.
When to use:
- User questions
- Task descriptions
- Follow-up messages
- Most conversation inputs
Example:
{
"role": "user",
"texts": [{
"text": "How do I sort a list in Python?"
}]
}Previous model responses, used for conversation history or few-shot examples.
When to use:
- Conversation history
- Few-shot examples
- Guiding response style
Example:
{
"role": "assistant",
"texts": [{
"text": "You can use the sorted() function or .sort() method."
}]
}{
"role": "user",
"texts": [
{ "text": "First paragraph" },
{ "text": "Second paragraph" }
]
}By file ID:
{
"role": "user",
"texts": [{ "text": "What's in this image?" }],
"images": [
{ "fileId": "file_abc123" }
]
}By URL:
{
"role": "user",
"texts": [{ "text": "Describe this image" }],
"images": [
{ "url": "https://example.com/image.jpg" }
]
}{
"role": "user",
"texts": [{ "text": "Transcribe this audio" }],
"audio": [
{ "fileId": "file_audio123" }
]
}{
"role": "user",
"texts": [{ "text": "Summarize this PDF" }],
"files": [
{ "fileId": "file_doc123" }
]
}Combine multiple content types:
{
"role": "user",
"texts": [
{ "text": "Compare these two images" }
],
"images": [
{ "fileId": "file_img1" },
{ "fileId": "file_img2" }
]
}const response = await fetch('/v1/model/response', {
method: 'POST',
body: JSON.stringify({
model: 'gpt-4.1',
inputs: [
{
role: 'system',
texts: [{ text: 'You are a helpful coding assistant' }]
},
{
role: 'user',
texts: [{ text: 'Write a function to reverse a string' }]
}
]
})
});conversation = [
{'role': 'user', 'texts': [{'text': 'Hello'}]},
{'role': 'assistant', 'texts': [{'text': 'Hi! How can I help?'}]},
{'role': 'user', 'texts': [{'text': 'Tell me about Python'}]}
]
response = requests.post(
'https://api.freddy.aitronos.com/v1/model/response',
json={'model': 'gpt-4.1', 'inputs': conversation}
){
"model": "gpt-4.1",
"inputs": [
{
"role": "system",
"texts": [{ "text": "Classify sentiment as positive, negative, or neutral" }]
},
{
"role": "user",
"texts": [{ "text": "I love this product!" }]
},
{
"role": "assistant",
"texts": [{ "text": "Sentiment: positive" }]
},
{
"role": "user",
"texts": [{ "text": "This is terrible." }]
},
{
"role": "assistant",
"texts": [{ "text": "Sentiment: negative" }]
},
{
"role": "user",
"texts": [{ "text": "The package arrived today." }]
}
]
}{
"model": "gpt-4.1-vision",
"inputs": [
{
"role": "user",
"texts": [{ "text": "Describe this image in detail" }],
"images": [{ "fileId": "file_abc123" }]
}
]
}{
"id": "resp_67ccd2bed1ec8190",
"object": "response",
"created_at": 1741476542,
"status": "completed",
"model": "gpt-4.1",
"output": [
{
"type": "message",
"id": "msg_67ccd2bf17f0",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "The capital of France is Paris.",
"annotations": []
}
]
}
],
"usage": {
"input_tokens": 15,
"output_tokens": 8,
"total_tokens": 23
}
}{
"output": [
{
"type": "tool_call",
"tool_name": "web_search",
"query": "latest AI news",
"result": "Recent developments include..."
},
{
"type": "message",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "Based on recent news, here's what's happening in AI..."
}
]
}
]
}# ✅ Good - clear separation
inputs = [
{'role': 'system', 'texts': [{'text': 'Be concise'}]},
{'role': 'user', 'texts': [{'text': 'Explain AI'}]}
]
# ❌ Bad - mixing in user message
inputs = [{
'role': 'user',
'texts': [{'text': 'Be concise. Explain AI'}]
}]// ✅ Good - preserves context
const messages = [...previousMessages, newMessage];
const response = await sendMessage(messages);
// ❌ Bad - loses context
const response = await sendMessage(newMessage);