Python SDK
Python client library for building AI agents with Mix. Supports both synchronous and asynchronous operations.
Installation
uv add mix-python-sdk
For additional recipes and examples, see the Mix Python cookbook.
Quickstart
Simple Streaming (Recommended)
import asyncio
import os
from dotenv import load_dotenv
from mix_python_sdk import Mix
from mix_python_sdk.helpers import StreamingSession
async def main():
load_dotenv()
async with Mix(server_url=os.getenv("MIX_SERVER_URL")) as mix:
# Store API key
mix.authentication.store_api_key(
api_key=os.getenv("OPENROUTER_API_KEY"),
provider="openrouter"
)
# Auto-managed session with streaming
async with StreamingSession(mix, title="Demo") as session:
await session.send(
"What's the capital of France?",
on_content=lambda text: print(text, end="", flush=True),
on_complete=lambda: print("\n")
)
if __name__ == "__main__":
asyncio.run(main())
Direct REST API
from mix_python_sdk import Mix
import os
with Mix(server_url=os.getenv("MIX_SERVER_URL")) as mix:
mix.authentication.store_api_key(
api_key=os.getenv("OPENROUTER_API_KEY"),
provider="openrouter"
)
session = mix.sessions.create(title="Demo")
response = mix.messages.send(id=session.id, text="Hello")
print(response.assistant_response)
Functions
query()
Async iterator for streaming message interactions.
async def query(
mix: Mix,
session_id: str,
message: str,
) -> AsyncIterator[StreamEvent]
Parameters:
Parameter | Type | Description |
---|---|---|
mix | Mix | Mix client instance |
session_id | str | Session identifier |
message | str | User message |
Returns: AsyncIterator[StreamEvent]
- Stream of events (thinking, content, tool, complete)
Example:
async for event in query(mix, session.id, "What's 2+2?"):
if event.type == "content":
print(event.content, end="", flush=True)
elif event.type == "complete":
print("\nDone!")
send_with_callbacks()
Callback-based streaming for ergonomic event handling.
async def send_with_callbacks(
mix: Mix,
session_id: str,
message: str,
*,
on_thinking: Optional[Callable[[str], None]] = None,
on_content: Optional[Callable[[str], None]] = None,
on_tool: Optional[Callable[[Any], None]] = None,
on_tool_execution_start: Optional[Callable[[Any], None]] = None,
on_tool_execution_complete: Optional[Callable[[Any], None]] = None,
on_error: Optional[Callable[[str], None]] = None,
on_permission: Optional[Callable[[Any], None]] = None,
on_complete: Optional[Callable[[], None]] = None,
) -> None
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
mix | Mix | Required | Mix client instance |
session_id | str | Required | Session identifier |
message | str | Required | User message |
on_thinking | Callable[[str], None] | None | Callback for reasoning content |
on_content | Callable[[str], None] | None | Callback for response content |
on_tool | Callable[[Any], None] | None | Callback for tool execution events |
on_tool_execution_start | Callable[[Any], None] | None | Callback when tool execution starts |
on_tool_execution_complete | Callable[[Any], None] | None | Callback when tool execution completes |
on_error | Callable[[str], None] | None | Callback for errors |
on_permission | Callable[[Any], None] | None | Callback for permission requests |
on_complete | Callable[[], None] | None | Callback when stream completes |
Example:
await send_with_callbacks(
mix,
session_id=session.id,
message="Hello!",
on_content=lambda text: print(text, end="", flush=True),
on_tool=lambda tool: print(f"\nTool: {tool.name}"),
on_complete=lambda: print("\nDone!")
)
Classes
Mix
Main SDK client for interacting with Mix REST API.
Constructor:
Mix(
server_idx: Optional[int] = None,
server_url: Optional[str] = None,
url_params: Optional[Dict[str, str]] = None,
client: Optional[HttpClient] = None,
async_client: Optional[AsyncHttpClient] = None,
retry_config: OptionalNullable[RetryConfig] = None,
timeout_ms: Optional[int] = None,
debug_logger: Optional[Logger] = None
)
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
server_idx | int | None | Server index for all methods |
server_url | str | None | Server URL override (e.g., "http://localhost:8088") |
url_params | Dict[str, str] | None | URL template parameters |
client | HttpClient | None | Custom HTTP client for sync methods |
async_client | AsyncHttpClient | None | Custom HTTP client for async methods |
retry_config | RetryConfig | None | Retry configuration |
timeout_ms | int | None | Request timeout in milliseconds |
debug_logger | Logger | None | Debug logger instance |
Context Manager Support:
# Synchronous
with Mix(server_url="http://localhost:8088") as mix:
# Operations here
pass
# Asynchronous
async with Mix(server_url="http://localhost:8088") as mix:
# Operations here
pass
Sub-SDKs:
Property | Description |
---|---|
authentication | Authentication operations (API keys, OAuth) |
sessions | Session management (create, delete, fork, rewind) |
messages | Message operations (send, history, list) |
files | File management (upload, download, list, delete) |
streaming | SSE streaming for real-time updates |
permissions | Permission management (grant, deny) |
preferences | User preferences (models, providers, tokens) |
system | System info and health checks |
tools | Tool status and authentication |
StreamingSession
Context manager for streaming sessions with automatic lifecycle management.
Constructor:
StreamingSession(
mix: Mix,
title: str,
custom_system_prompt: Optional[str] = None
)
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
mix | Mix | Required | Mix client instance |
title | str | Required | Session title |
custom_system_prompt | str | None | Custom system prompt |
Methods:
# Query pattern (iterator)
async for event in session.query(message="Hello"):
if event.type == "content":
print(event.content)
# Callback pattern
await session.send(
message="Hello",
on_content=lambda text: print(text),
on_complete=lambda: print("Done")
)
Properties:
Property | Type | Description |
---|---|---|
id | str | Session identifier |
session | SessionData | Session data object |
Example:
async with StreamingSession(mix, title="Demo") as session:
print(f"Session ID: {session.id}")
await session.send("Hello!", on_content=lambda t: print(t))
StreamEvent
Helper class for stream event handling.
Properties:
Property | Type | Description |
---|---|---|
type | str | Event type (thinking, content, tool, complete, error) |
data | Any | Event data |
content | str | Content text (for content events) |
thinking | str | Thinking text (for thinking events) |
tool_name | str | Tool name (for tool events) |
Types
Configuration Types
Provider
class Provider(str, Enum):
ANTHROPIC = "anthropic"
OPENAI = "openai"
OPENROUTER = "openrouter"
GEMINI = "gemini"
BRAVE = "brave"
PromptMode
class PromptMode(str, Enum):
DEFAULT = "default" # Use base system prompt only
APPEND = "append" # Append customSystemPrompt to base (50KB limit)
REPLACE = "replace" # Replace base prompt with custom (100KB limit)
RetryConfig
class RetryConfig(BaseModel):
strategy: str # Retry strategy ("backoff")
backoff: BackoffStrategy # Backoff configuration
retry_connection_errors: bool # Whether to retry connection errors
Message and Content Types
SessionData
class SessionData(BaseModel):
id: str # Unique session identifier
title: str # Session title
created_at: date # Creation timestamp
assistant_message_count: int # Number of assistant messages
user_message_count: int # Number of user messages
tool_call_count: int # Number of tool calls
prompt_tokens: int # Total prompt tokens
completion_tokens: int # Total completion tokens
cost: float # Total cost
first_user_message: Optional[str] # First message (optional)
BackendMessage
class BackendMessage(BaseModel):
id: str # Unique message ID
session_id: str # Session ID
role: str # Role: user/assistant/tool
user_input: str # User's input
assistant_response: Optional[str] # Assistant's response
reasoning: Optional[str] # Reasoning process
reasoning_duration: Optional[int] # Reasoning duration (ms)
tool_calls: List[ToolCallData] # Tool calls made
ToolCallData
class ToolCallData(BaseModel):
id: str # Unique tool call ID
name: str # Tool name
input: str # Input parameters
result: Optional[str] # Execution result
finished: bool # Whether complete
is_error: Optional[bool] # Whether errored
type: str # Tool type
FileInfo
class FileInfo(BaseModel):
name: str # File name
size: int # Size in bytes
modified: int # Unix timestamp
is_dir: bool # Is directory
url: str # Static access URL
Server-Sent Events (SSE) Types
SSEEventStream
Base structure for all SSE events:
{
"event": EventTypeEnum, # Event type identifier
"id": str, # Sequential event ID
"retry": Optional[int], # Client retry interval (ms)
"data": EventData # Event-specific data
}
Event Types:
Event | Description | Data Fields |
---|---|---|
SSEConnectedEvent | Initial connection established | - |
SSEContentEvent | Streaming content delta | content: str , type: str |
SSEThinkingEvent | Thinking/reasoning content | content: str |
SSEToolEvent | Tool usage notification | name: str , metadata |
SSEToolExecutionStartEvent | Tool execution started | Tool metadata |
SSEToolExecutionCompleteEvent | Tool execution completed | Tool result |
SSECompleteEvent | Message processing complete | done: bool , content: str , message_id: str , reasoning: str , reasoning_duration: int |
SSEErrorEvent | Error occurred | error: str |
SSEPermissionEvent | Permission request | Permission details |
SSEHeartbeatEvent | Connection keepalive | - |
Error Types
MixError
Base class for all HTTP error responses.
class MixError(Exception):
message: str # Error message
status_code: int # HTTP status code
headers: httpx.Headers # Response headers
body: str # Response body
raw_response: httpx.Response # Raw HTTP response
data: Any # Optional structured error data
ErrorResponse
class ErrorResponse(BaseModel):
error: RESTError
code: int # HTTP status code
message: str # Error message
type: Type # Error type
Network Errors
From httpx library:
httpx.RequestError
- Base request errorhttpx.ConnectError
- Connection failedhttpx.TimeoutException
- Request timeout
ResponseValidationError
Type mismatch between response and Pydantic model.
Tool Input/Output Types
bash
Execute bash commands on the system.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
command | str | Required | The bash command to execute |
timeout | int | 60000 | Timeout in milliseconds (max: 600000) |
Response Metadata:
class BashResponseMetadata(BaseModel):
start_time: int # Unix timestamp (ms) when command started
end_time: int # Unix timestamp (ms) when command completed
ReadText
Read text files from filesystem or URLs.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
file_path | str | Required | Absolute path or URL (http/https) to read |
offset | int | None | Line number to start reading (0-based) |
limit | int | 2000 | Number of lines to read |
Response Metadata:
class ReadTextResponseMetadata(BaseModel):
file_path: str # The path/URL that was read
content: str # The raw content without line numbers
glob
Fast file pattern matching using glob patterns.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
pattern | str | Required | The glob pattern to match files (e.g., "**/*.js") |
path | str | None | Directory to search in (defaults to session storage) |
Response Metadata:
class GlobResponseMetadata(BaseModel):
number_of_files: int # Count of files returned
truncated: bool # Whether results were limited to 100 files
ReadMedia
Analyze media files (images, audio, video, PDF) using AI.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
file_path | str | Required | Path to file or URL for analysis |
media_type | MediaAnalysisType | Required | Type of media analysis (image, audio, video, pdf) |
prompt | str | Required | Analysis prompt for the media content |
pdf_pages | str | None | PDF page selection (e.g., '5' or '1-3,7,10-12') |
video_interval | str | None | Video time interval (e.g., '00:20:50-00:26:10') |
MediaAnalysisType:
class MediaAnalysisType(str, Enum):
IMAGE = "image"
AUDIO = "audio"
VIDEO = "video"
PDF = "pdf"
Response:
class ReadMediaResponse(BaseModel):
results: List[ReadMediaResult] # List of analysis results
summary: str # Overall summary
class ReadMediaResult(BaseModel):
file_path: str # Path to analyzed file
media_type: str # Type of media analyzed
analysis: str # Analysis results from AI
error: Optional[str] # Error message if failed
grep
Search for patterns in files using regex.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
pattern | str | Required | The regex pattern to search for |
path | str | None | Directory to search (defaults to session storage) |
include | str | None | File pattern to include (e.g., ".js", ".py") |
literal_text | bool | False | Treat pattern as literal text |
Response Metadata:
class GrepResponseMetadata(BaseModel):
number_of_matches: int # Total number of matches found
truncated: bool # True if results limited to 100 matches
write
Write content to files.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
file_path | str | Required | Path to the file to write |
content | str | Required | Content to write to the file |
Response Metadata:
class WriteResponseMetadata(BaseModel):
diff: str # Diff text showing changes
additions: int # Number of lines added
removals: int # Number of lines removed
edit
Perform string replacements in files.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
file_path | str | Required | Absolute path to the file to modify |
old_string | str | Required | Text to replace |
new_string | str | Required | Text to replace it with |
Response Metadata:
class EditResponseMetadata(BaseModel):
diff: str # Diff text showing changes
additions: int # Number of lines added
removals: int # Number of lines removed
python_execution
Execute Python code in a sandboxed environment.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
code | str | Required | The Python code to execute |
Response:
class PythonExecutionResult(BaseModel):
type: str # Always "code_execution_result"
stdout: str # Standard output
stderr: str # Standard error
return_code: int # Exit code (0 = success)
search
Perform web, image, or video searches.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
query | str | Required | Search query (min length: 2) |
search_type | SearchType | "web" | Type of search (web, images, videos) |
allowed_domains | List[str] | None | Only include results from these domains |
blocked_domains | List[str] | None | Never include results from these domains |
safesearch | Safesearch | "strict" | Safe search level (strict, moderate, off) |
spellcheck | bool | True | Enable spellcheck |
SearchType:
class SearchType(str, Enum):
WEB = "web"
IMAGES = "images"
VIDEOS = "videos"
Safesearch:
class Safesearch(str, Enum):
STRICT = "strict"
MODERATE = "moderate"
OFF = "off"
Response (Web):
class BraveSearchResponse(BaseModel):
type: str
web: WebResults
results: List[SearchResult]
title: str
url: str
description: str
Response (Images):
class ImageSearchResponse(BaseModel):
type: str
results: List[ImageResult]
title: str
url: str
source: str
thumbnail: ImageResultThumbnail
src: str
properties: ImageResultProperties
url: str
Response (Videos):
class VideoSearchResponse(BaseModel):
type: str
results: List[VideoResult]
title: str
url: str
source: str
thumbnail: VideoResultThumbnail
src: str
properties: VideoResultProperties
url: str
duration: str
views: str
todo_write
Manage todo items and track task progress.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
todos | List[Todo] | Required | Array of todo items to manage |
Todo:
class Todo(BaseModel):
id: str # Unique identifier
content: str # Task description
status: TodoStatus # Current status (pending, in_progress, completed)
priority: TodoPriority # Priority level (low, medium, high)
exit_plan_mode
Exit planning mode and present a plan to the user.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
plan | str | Required | The plan to present (supports markdown) |
show_media
Display media outputs to the user.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
outputs | List[MediaOutput] | Required | Array of media outputs to showcase |
MediaOutput:
class MediaOutput(BaseModel):
path: Optional[str] # Absolute path or URL to media file
type: MediaType # Type of media (image, video, audio, gsap_animation, pdf, csv)
title: str # Title or name for the output
description: Optional[str] # Optional description
config: Optional[Dict] # Configuration data (gsap_animation only)
start_time: Optional[int] # Start time in seconds (video/audio)
duration: Optional[int] # Duration in seconds (video/audio)
task
Delegate complex tasks to specialized sub-agents.
Parameters:
Parameter | Type | Default | Description |
---|---|---|---|
description | str | Required | Short 3-5 word task description |
prompt | str | Required | The task for the agent to perform |
subagent_type | str | Required | Type of specialized agent ("general-purpose") |
Advanced Features
Continuous Conversations
Sessions maintain conversation context automatically:
async with StreamingSession(mix, title="Multi-turn") as session:
# First question
await session.send("What's the capital of France?")
# Follow-up maintains context
await session.send("What about Italy?")
# Context is preserved throughout the session
await session.send("Which has a larger population?")
Session Forking
Create conversation branches at any message index:
# Original session
session = mix.sessions.create(title="Original")
mix.messages.send(id=session.id, text="Hello")
mix.messages.send(id=session.id, text="Tell me about Python")
# Fork at message index 1
forked = mix.sessions.fork(
id=session.id,
message_index=1,
title="Forked - Alternative Path"
)
# Forked session continues from message 1
mix.messages.send(id=forked.id, text="Tell me about Rust instead")
Session Rewinding
Delete messages after a specific point:
# Rewind to specific message
mix.sessions.rewind_session(
id=session.id,
message_id="msg_123",
cleanup_media=True # Also delete associated media files
)
File Management with Thumbnails
# Upload image
with open("photo.jpg", "rb") as f:
mix.files.upload_session_file(
id=session_id,
file={
"file_name": "photo.jpg",
"content": f,
"content_type": "image/jpeg"
}
)
# Generate thumbnails
thumb_100 = mix.files.get_session_file(
id=session_id,
filename="photo.jpg",
thumb="100" # 100x100 box
)
thumb_w200 = mix.files.get_session_file(
id=session_id,
filename="photo.jpg",
thumb="w200" # Width-constrained to 200px
)
Dual-Agent Configuration
Configure separate models for main and sub-agents:
# Main agent: complex reasoning tasks
mix.preferences.update_preferences(
main_agent_model="openrouter.deepseek-v3.1",
main_agent_max_tokens=8000,
main_agent_reasoning_effort="high"
)
# Sub agent: quick supporting tasks
mix.preferences.update_preferences(
sub_agent_model="openrouter.zai-glm-4.5-air",
sub_agent_max_tokens=2000,
sub_agent_reasoning_effort="low"
)
Retry Configuration
from mix_python_sdk.utils import BackoffStrategy, RetryConfig
retry = RetryConfig(
"backoff",
BackoffStrategy(
initial_interval=1,
max_interval=50,
exponent=1.1,
max_elapsed_time=100
),
retry_connection_errors=False
)
mix = Mix(server_url="http://localhost:8088", retry_config=retry)
Debug Logging
import logging
logging.basicConfig(level=logging.DEBUG)
mix = Mix(
server_url="http://localhost:8088",
debug_logger=logging.getLogger("mix_python_sdk")
)
# Or use environment variable
# MIX_DEBUG=true
Low-Level SSE Streaming (CQRS Pattern)
For maximum control over streaming:
import threading
import time
def stream_message(mix, session_id: str, message: str):
# Read path: SSE connection
stream_response = mix.streaming.stream_events(session_id=session_id)
time.sleep(0.5) # Allow SSE connection to establish
# Write path: REST API (separate thread)
send_thread = threading.Thread(
target=lambda: mix.messages.send(id=session_id, text=message),
daemon=True
)
send_thread.start()
# Process events
with stream_response.result as event_stream:
for event in event_stream:
if isinstance(event, SSEContentEvent):
print(event.data.content, end="", flush=True)
elif isinstance(event, SSECompleteEvent):
break
send_thread.join(timeout=1.0)
Example Usage
Basic Synchronous Client
from mix_python_sdk import Mix
import os
with Mix(server_url=os.getenv("MIX_SERVER_URL")) as mix:
# Health check
health = mix.system.get_health()
print(f"Status: {health.status}")
# Store API key
mix.authentication.store_api_key(
api_key=os.getenv("OPENROUTER_API_KEY"),
provider="openrouter"
)
# Create and use session
session = mix.sessions.create(title="Demo")
response = mix.messages.send(id=session.id, text="Hello")
print(response.assistant_response)
# Clean up
mix.sessions.delete(id=session.id)
Async Iterator Pattern
import asyncio
from mix_python_sdk import Mix
from mix_python_sdk.helpers import query
async def main():
async with Mix(server_url="http://localhost:8088") as mix:
mix.authentication.store_api_key(api_key=api_key, provider="openrouter")
session = mix.sessions.create(title="Demo")
async for event in query(mix, session.id, "What's 2+2?"):
if event.type == "content":
print(event.content, end="", flush=True)
elif event.type == "tool":
print(f"\nTool: {event.tool_name}")
mix.sessions.delete(id=session.id)
asyncio.run(main())
Comprehensive Callback Handling
import asyncio
from mix_python_sdk import Mix
from mix_python_sdk.helpers import send_with_callbacks
async def main():
async with Mix(server_url="http://localhost:8088") as mix:
mix.authentication.store_api_key(api_key=api_key, provider="openrouter")
session = mix.sessions.create(title="Demo")
await send_with_callbacks(
mix,
session_id=session.id,
message="What's your working directory?",
on_thinking=lambda text: print(f"Thinking: {text}", end="", flush=True),
on_content=lambda text: print(f"\nResponse: {text}", end="", flush=True),
on_tool=lambda tool: print(f"\nTool: {tool.name}"),
on_tool_execution_complete=lambda data: print(f"Result: {data.progress}"),
on_error=lambda error: print(f"\nError: {error}"),
on_complete=lambda: print("\nComplete!")
)
mix.sessions.delete(id=session.id)
asyncio.run(main())
Session Analytics
# List sessions with metadata
sessions = mix.sessions.list()
for s in sessions:
print(f"Title: {s.title}")
print(f"Messages: {s.user_message_count} user, {s.assistant_message_count} assistant")
print(f"Tokens: {s.prompt_tokens} prompt, {s.completion_tokens} completion")
print(f"Cost: ${s.cost:.6f}")
print(f"Tool calls: {s.tool_call_count}")
Session Export
# Export complete session transcript
export = mix.sessions.export_session(id=session_id)
# Access full message history with tool calls
for msg in export.messages:
print(f"Role: {msg.role}")
print(f"Content: {msg.user_input or msg.assistant_response}")
if msg.reasoning:
print(f"Reasoning time: {msg.reasoning_duration}ms")
for tool_call in msg.tool_calls:
print(f"Tool: {tool_call.name}")
print(f"Input: {tool_call.input}")
print(f"Result: {tool_call.result}")
Tool Discovery
# Get tools status
tools_status = mix.tools.get_tools_status()
for category_name, category_info in tools_status.categories.items():
print(f"Category: {category_info.display_name}")
for tool in category_info.tools:
auth_status = "Ready" if tool.authenticated else "Needs auth"
print(f" - {tool.display_name}: {auth_status}")