Mix LogoMix

Python SDK

Python client library for building AI agents with Mix. Supports both synchronous and asynchronous operations.

Installation

uv add mix-python-sdk

For additional recipes and examples, see the Mix Python cookbook.

Quickstart

import asyncio
import os
from dotenv import load_dotenv
from mix_python_sdk import Mix
from mix_python_sdk.helpers import StreamingSession

async def main():
    load_dotenv()

    async with Mix(server_url=os.getenv("MIX_SERVER_URL")) as mix:
        # Store API key
        mix.authentication.store_api_key(
            api_key=os.getenv("OPENROUTER_API_KEY"),
            provider="openrouter"
        )

        # Auto-managed session with streaming
        async with StreamingSession(mix, title="Demo") as session:
            await session.send(
                "What's the capital of France?",
                on_content=lambda text: print(text, end="", flush=True),
                on_complete=lambda: print("\n")
            )

if __name__ == "__main__":
    asyncio.run(main())

Direct REST API

from mix_python_sdk import Mix
import os

with Mix(server_url=os.getenv("MIX_SERVER_URL")) as mix:
    mix.authentication.store_api_key(
        api_key=os.getenv("OPENROUTER_API_KEY"),
        provider="openrouter"
    )

    session = mix.sessions.create(title="Demo")
    response = mix.messages.send(id=session.id, text="Hello")
    print(response.assistant_response)

Functions

query()

Async iterator for streaming message interactions.

async def query(
    mix: Mix,
    session_id: str,
    message: str,
) -> AsyncIterator[StreamEvent]

Parameters:

ParameterTypeDescription
mixMixMix client instance
session_idstrSession identifier
messagestrUser message

Returns: AsyncIterator[StreamEvent] - Stream of events (thinking, content, tool, complete)

Example:

async for event in query(mix, session.id, "What's 2+2?"):
    if event.type == "content":
        print(event.content, end="", flush=True)
    elif event.type == "complete":
        print("\nDone!")

send_with_callbacks()

Callback-based streaming for ergonomic event handling.

async def send_with_callbacks(
    mix: Mix,
    session_id: str,
    message: str,
    *,
    on_thinking: Optional[Callable[[str], None]] = None,
    on_content: Optional[Callable[[str], None]] = None,
    on_tool: Optional[Callable[[Any], None]] = None,
    on_tool_execution_start: Optional[Callable[[Any], None]] = None,
    on_tool_execution_complete: Optional[Callable[[Any], None]] = None,
    on_error: Optional[Callable[[str], None]] = None,
    on_permission: Optional[Callable[[Any], None]] = None,
    on_complete: Optional[Callable[[], None]] = None,
) -> None

Parameters:

ParameterTypeDefaultDescription
mixMixRequiredMix client instance
session_idstrRequiredSession identifier
messagestrRequiredUser message
on_thinkingCallable[[str], None]NoneCallback for reasoning content
on_contentCallable[[str], None]NoneCallback for response content
on_toolCallable[[Any], None]NoneCallback for tool execution events
on_tool_execution_startCallable[[Any], None]NoneCallback when tool execution starts
on_tool_execution_completeCallable[[Any], None]NoneCallback when tool execution completes
on_errorCallable[[str], None]NoneCallback for errors
on_permissionCallable[[Any], None]NoneCallback for permission requests
on_completeCallable[[], None]NoneCallback when stream completes

Example:

await send_with_callbacks(
    mix,
    session_id=session.id,
    message="Hello!",
    on_content=lambda text: print(text, end="", flush=True),
    on_tool=lambda tool: print(f"\nTool: {tool.name}"),
    on_complete=lambda: print("\nDone!")
)

Classes

Mix

Main SDK client for interacting with Mix REST API.

Constructor:

Mix(
    server_idx: Optional[int] = None,
    server_url: Optional[str] = None,
    url_params: Optional[Dict[str, str]] = None,
    client: Optional[HttpClient] = None,
    async_client: Optional[AsyncHttpClient] = None,
    retry_config: OptionalNullable[RetryConfig] = None,
    timeout_ms: Optional[int] = None,
    debug_logger: Optional[Logger] = None
)

Parameters:

ParameterTypeDefaultDescription
server_idxintNoneServer index for all methods
server_urlstrNoneServer URL override (e.g., "http://localhost:8088")
url_paramsDict[str, str]NoneURL template parameters
clientHttpClientNoneCustom HTTP client for sync methods
async_clientAsyncHttpClientNoneCustom HTTP client for async methods
retry_configRetryConfigNoneRetry configuration
timeout_msintNoneRequest timeout in milliseconds
debug_loggerLoggerNoneDebug logger instance

Context Manager Support:

# Synchronous
with Mix(server_url="http://localhost:8088") as mix:
    # Operations here
    pass

# Asynchronous
async with Mix(server_url="http://localhost:8088") as mix:
    # Operations here
    pass

Sub-SDKs:

PropertyDescription
authenticationAuthentication operations (API keys, OAuth)
sessionsSession management (create, delete, fork, rewind)
messagesMessage operations (send, history, list)
filesFile management (upload, download, list, delete)
streamingSSE streaming for real-time updates
permissionsPermission management (grant, deny)
preferencesUser preferences (models, providers, tokens)
systemSystem info and health checks
toolsTool status and authentication

StreamingSession

Context manager for streaming sessions with automatic lifecycle management.

Constructor:

StreamingSession(
    mix: Mix,
    title: str,
    custom_system_prompt: Optional[str] = None
)

Parameters:

ParameterTypeDefaultDescription
mixMixRequiredMix client instance
titlestrRequiredSession title
custom_system_promptstrNoneCustom system prompt

Methods:

# Query pattern (iterator)
async for event in session.query(message="Hello"):
    if event.type == "content":
        print(event.content)

# Callback pattern
await session.send(
    message="Hello",
    on_content=lambda text: print(text),
    on_complete=lambda: print("Done")
)

Properties:

PropertyTypeDescription
idstrSession identifier
sessionSessionDataSession data object

Example:

async with StreamingSession(mix, title="Demo") as session:
    print(f"Session ID: {session.id}")
    await session.send("Hello!", on_content=lambda t: print(t))

StreamEvent

Helper class for stream event handling.

Properties:

PropertyTypeDescription
typestrEvent type (thinking, content, tool, complete, error)
dataAnyEvent data
contentstrContent text (for content events)
thinkingstrThinking text (for thinking events)
tool_namestrTool name (for tool events)

Types

Configuration Types

Provider

class Provider(str, Enum):
    ANTHROPIC = "anthropic"
    OPENAI = "openai"
    OPENROUTER = "openrouter"
    GEMINI = "gemini"
    BRAVE = "brave"

PromptMode

class PromptMode(str, Enum):
    DEFAULT = "default"    # Use base system prompt only
    APPEND = "append"      # Append customSystemPrompt to base (50KB limit)
    REPLACE = "replace"    # Replace base prompt with custom (100KB limit)

RetryConfig

class RetryConfig(BaseModel):
    strategy: str                    # Retry strategy ("backoff")
    backoff: BackoffStrategy         # Backoff configuration
    retry_connection_errors: bool    # Whether to retry connection errors

Message and Content Types

SessionData

class SessionData(BaseModel):
    id: str                              # Unique session identifier
    title: str                           # Session title
    created_at: date                     # Creation timestamp
    assistant_message_count: int         # Number of assistant messages
    user_message_count: int              # Number of user messages
    tool_call_count: int                 # Number of tool calls
    prompt_tokens: int                   # Total prompt tokens
    completion_tokens: int               # Total completion tokens
    cost: float                          # Total cost
    first_user_message: Optional[str]    # First message (optional)

BackendMessage

class BackendMessage(BaseModel):
    id: str                              # Unique message ID
    session_id: str                      # Session ID
    role: str                            # Role: user/assistant/tool
    user_input: str                      # User's input
    assistant_response: Optional[str]    # Assistant's response
    reasoning: Optional[str]             # Reasoning process
    reasoning_duration: Optional[int]    # Reasoning duration (ms)
    tool_calls: List[ToolCallData]       # Tool calls made

ToolCallData

class ToolCallData(BaseModel):
    id: str                      # Unique tool call ID
    name: str                    # Tool name
    input: str                   # Input parameters
    result: Optional[str]        # Execution result
    finished: bool               # Whether complete
    is_error: Optional[bool]     # Whether errored
    type: str                    # Tool type

FileInfo

class FileInfo(BaseModel):
    name: str       # File name
    size: int       # Size in bytes
    modified: int   # Unix timestamp
    is_dir: bool    # Is directory
    url: str        # Static access URL

Server-Sent Events (SSE) Types

SSEEventStream

Base structure for all SSE events:

{
    "event": EventTypeEnum,      # Event type identifier
    "id": str,                   # Sequential event ID
    "retry": Optional[int],      # Client retry interval (ms)
    "data": EventData            # Event-specific data
}

Event Types:

EventDescriptionData Fields
SSEConnectedEventInitial connection established-
SSEContentEventStreaming content deltacontent: str, type: str
SSEThinkingEventThinking/reasoning contentcontent: str
SSEToolEventTool usage notificationname: str, metadata
SSEToolExecutionStartEventTool execution startedTool metadata
SSEToolExecutionCompleteEventTool execution completedTool result
SSECompleteEventMessage processing completedone: bool, content: str, message_id: str, reasoning: str, reasoning_duration: int
SSEErrorEventError occurrederror: str
SSEPermissionEventPermission requestPermission details
SSEHeartbeatEventConnection keepalive-

Error Types

MixError

Base class for all HTTP error responses.

class MixError(Exception):
    message: str                  # Error message
    status_code: int              # HTTP status code
    headers: httpx.Headers        # Response headers
    body: str                     # Response body
    raw_response: httpx.Response  # Raw HTTP response
    data: Any                     # Optional structured error data

ErrorResponse

class ErrorResponse(BaseModel):
    error: RESTError
        code: int        # HTTP status code
        message: str     # Error message
        type: Type       # Error type

Network Errors

From httpx library:

  • httpx.RequestError - Base request error
  • httpx.ConnectError - Connection failed
  • httpx.TimeoutException - Request timeout

ResponseValidationError

Type mismatch between response and Pydantic model.

Tool Input/Output Types

bash

Execute bash commands on the system.

Parameters:

ParameterTypeDefaultDescription
commandstrRequiredThe bash command to execute
timeoutint60000Timeout in milliseconds (max: 600000)

Response Metadata:

class BashResponseMetadata(BaseModel):
    start_time: int  # Unix timestamp (ms) when command started
    end_time: int    # Unix timestamp (ms) when command completed

ReadText

Read text files from filesystem or URLs.

Parameters:

ParameterTypeDefaultDescription
file_pathstrRequiredAbsolute path or URL (http/https) to read
offsetintNoneLine number to start reading (0-based)
limitint2000Number of lines to read

Response Metadata:

class ReadTextResponseMetadata(BaseModel):
    file_path: str  # The path/URL that was read
    content: str    # The raw content without line numbers

glob

Fast file pattern matching using glob patterns.

Parameters:

ParameterTypeDefaultDescription
patternstrRequiredThe glob pattern to match files (e.g., "**/*.js")
pathstrNoneDirectory to search in (defaults to session storage)

Response Metadata:

class GlobResponseMetadata(BaseModel):
    number_of_files: int  # Count of files returned
    truncated: bool       # Whether results were limited to 100 files

ReadMedia

Analyze media files (images, audio, video, PDF) using AI.

Parameters:

ParameterTypeDefaultDescription
file_pathstrRequiredPath to file or URL for analysis
media_typeMediaAnalysisTypeRequiredType of media analysis (image, audio, video, pdf)
promptstrRequiredAnalysis prompt for the media content
pdf_pagesstrNonePDF page selection (e.g., '5' or '1-3,7,10-12')
video_intervalstrNoneVideo time interval (e.g., '00:20:50-00:26:10')

MediaAnalysisType:

class MediaAnalysisType(str, Enum):
    IMAGE = "image"
    AUDIO = "audio"
    VIDEO = "video"
    PDF = "pdf"

Response:

class ReadMediaResponse(BaseModel):
    results: List[ReadMediaResult]  # List of analysis results
    summary: str                    # Overall summary

class ReadMediaResult(BaseModel):
    file_path: str            # Path to analyzed file
    media_type: str           # Type of media analyzed
    analysis: str             # Analysis results from AI
    error: Optional[str]      # Error message if failed

grep

Search for patterns in files using regex.

Parameters:

ParameterTypeDefaultDescription
patternstrRequiredThe regex pattern to search for
pathstrNoneDirectory to search (defaults to session storage)
includestrNoneFile pattern to include (e.g., ".js", ".py")
literal_textboolFalseTreat pattern as literal text

Response Metadata:

class GrepResponseMetadata(BaseModel):
    number_of_matches: int  # Total number of matches found
    truncated: bool         # True if results limited to 100 matches

write

Write content to files.

Parameters:

ParameterTypeDefaultDescription
file_pathstrRequiredPath to the file to write
contentstrRequiredContent to write to the file

Response Metadata:

class WriteResponseMetadata(BaseModel):
    diff: str        # Diff text showing changes
    additions: int   # Number of lines added
    removals: int    # Number of lines removed

edit

Perform string replacements in files.

Parameters:

ParameterTypeDefaultDescription
file_pathstrRequiredAbsolute path to the file to modify
old_stringstrRequiredText to replace
new_stringstrRequiredText to replace it with

Response Metadata:

class EditResponseMetadata(BaseModel):
    diff: str        # Diff text showing changes
    additions: int   # Number of lines added
    removals: int    # Number of lines removed

python_execution

Execute Python code in a sandboxed environment.

Parameters:

ParameterTypeDefaultDescription
codestrRequiredThe Python code to execute

Response:

class PythonExecutionResult(BaseModel):
    type: str          # Always "code_execution_result"
    stdout: str        # Standard output
    stderr: str        # Standard error
    return_code: int   # Exit code (0 = success)

Perform web, image, or video searches.

Parameters:

ParameterTypeDefaultDescription
querystrRequiredSearch query (min length: 2)
search_typeSearchType"web"Type of search (web, images, videos)
allowed_domainsList[str]NoneOnly include results from these domains
blocked_domainsList[str]NoneNever include results from these domains
safesearchSafesearch"strict"Safe search level (strict, moderate, off)
spellcheckboolTrueEnable spellcheck

SearchType:

class SearchType(str, Enum):
    WEB = "web"
    IMAGES = "images"
    VIDEOS = "videos"

Safesearch:

class Safesearch(str, Enum):
    STRICT = "strict"
    MODERATE = "moderate"
    OFF = "off"

Response (Web):

class BraveSearchResponse(BaseModel):
    type: str
    web: WebResults
        results: List[SearchResult]
            title: str
            url: str
            description: str

Response (Images):

class ImageSearchResponse(BaseModel):
    type: str
    results: List[ImageResult]
        title: str
        url: str
        source: str
        thumbnail: ImageResultThumbnail
            src: str
        properties: ImageResultProperties
            url: str

Response (Videos):

class VideoSearchResponse(BaseModel):
    type: str
    results: List[VideoResult]
        title: str
        url: str
        source: str
        thumbnail: VideoResultThumbnail
            src: str
        properties: VideoResultProperties
            url: str
            duration: str
            views: str

todo_write

Manage todo items and track task progress.

Parameters:

ParameterTypeDefaultDescription
todosList[Todo]RequiredArray of todo items to manage

Todo:

class Todo(BaseModel):
    id: str                # Unique identifier
    content: str           # Task description
    status: TodoStatus     # Current status (pending, in_progress, completed)
    priority: TodoPriority # Priority level (low, medium, high)

exit_plan_mode

Exit planning mode and present a plan to the user.

Parameters:

ParameterTypeDefaultDescription
planstrRequiredThe plan to present (supports markdown)

show_media

Display media outputs to the user.

Parameters:

ParameterTypeDefaultDescription
outputsList[MediaOutput]RequiredArray of media outputs to showcase

MediaOutput:

class MediaOutput(BaseModel):
    path: Optional[str]           # Absolute path or URL to media file
    type: MediaType               # Type of media (image, video, audio, gsap_animation, pdf, csv)
    title: str                    # Title or name for the output
    description: Optional[str]    # Optional description
    config: Optional[Dict]        # Configuration data (gsap_animation only)
    start_time: Optional[int]     # Start time in seconds (video/audio)
    duration: Optional[int]       # Duration in seconds (video/audio)

task

Delegate complex tasks to specialized sub-agents.

Parameters:

ParameterTypeDefaultDescription
descriptionstrRequiredShort 3-5 word task description
promptstrRequiredThe task for the agent to perform
subagent_typestrRequiredType of specialized agent ("general-purpose")

Advanced Features

Continuous Conversations

Sessions maintain conversation context automatically:

async with StreamingSession(mix, title="Multi-turn") as session:
    # First question
    await session.send("What's the capital of France?")

    # Follow-up maintains context
    await session.send("What about Italy?")

    # Context is preserved throughout the session
    await session.send("Which has a larger population?")

Session Forking

Create conversation branches at any message index:

# Original session
session = mix.sessions.create(title="Original")
mix.messages.send(id=session.id, text="Hello")
mix.messages.send(id=session.id, text="Tell me about Python")

# Fork at message index 1
forked = mix.sessions.fork(
    id=session.id,
    message_index=1,
    title="Forked - Alternative Path"
)

# Forked session continues from message 1
mix.messages.send(id=forked.id, text="Tell me about Rust instead")

Session Rewinding

Delete messages after a specific point:

# Rewind to specific message
mix.sessions.rewind_session(
    id=session.id,
    message_id="msg_123",
    cleanup_media=True  # Also delete associated media files
)

File Management with Thumbnails

# Upload image
with open("photo.jpg", "rb") as f:
    mix.files.upload_session_file(
        id=session_id,
        file={
            "file_name": "photo.jpg",
            "content": f,
            "content_type": "image/jpeg"
        }
    )

# Generate thumbnails
thumb_100 = mix.files.get_session_file(
    id=session_id,
    filename="photo.jpg",
    thumb="100"  # 100x100 box
)

thumb_w200 = mix.files.get_session_file(
    id=session_id,
    filename="photo.jpg",
    thumb="w200"  # Width-constrained to 200px
)

Dual-Agent Configuration

Configure separate models for main and sub-agents:

# Main agent: complex reasoning tasks
mix.preferences.update_preferences(
    main_agent_model="openrouter.deepseek-v3.1",
    main_agent_max_tokens=8000,
    main_agent_reasoning_effort="high"
)

# Sub agent: quick supporting tasks
mix.preferences.update_preferences(
    sub_agent_model="openrouter.zai-glm-4.5-air",
    sub_agent_max_tokens=2000,
    sub_agent_reasoning_effort="low"
)

Retry Configuration

from mix_python_sdk.utils import BackoffStrategy, RetryConfig

retry = RetryConfig(
    "backoff",
    BackoffStrategy(
        initial_interval=1,
        max_interval=50,
        exponent=1.1,
        max_elapsed_time=100
    ),
    retry_connection_errors=False
)

mix = Mix(server_url="http://localhost:8088", retry_config=retry)

Debug Logging

import logging

logging.basicConfig(level=logging.DEBUG)
mix = Mix(
    server_url="http://localhost:8088",
    debug_logger=logging.getLogger("mix_python_sdk")
)

# Or use environment variable
# MIX_DEBUG=true

Low-Level SSE Streaming (CQRS Pattern)

For maximum control over streaming:

import threading
import time

def stream_message(mix, session_id: str, message: str):
    # Read path: SSE connection
    stream_response = mix.streaming.stream_events(session_id=session_id)
    time.sleep(0.5)  # Allow SSE connection to establish

    # Write path: REST API (separate thread)
    send_thread = threading.Thread(
        target=lambda: mix.messages.send(id=session_id, text=message),
        daemon=True
    )
    send_thread.start()

    # Process events
    with stream_response.result as event_stream:
        for event in event_stream:
            if isinstance(event, SSEContentEvent):
                print(event.data.content, end="", flush=True)
            elif isinstance(event, SSECompleteEvent):
                break

    send_thread.join(timeout=1.0)

Example Usage

Basic Synchronous Client

from mix_python_sdk import Mix
import os

with Mix(server_url=os.getenv("MIX_SERVER_URL")) as mix:
    # Health check
    health = mix.system.get_health()
    print(f"Status: {health.status}")

    # Store API key
    mix.authentication.store_api_key(
        api_key=os.getenv("OPENROUTER_API_KEY"),
        provider="openrouter"
    )

    # Create and use session
    session = mix.sessions.create(title="Demo")
    response = mix.messages.send(id=session.id, text="Hello")
    print(response.assistant_response)

    # Clean up
    mix.sessions.delete(id=session.id)

Async Iterator Pattern

import asyncio
from mix_python_sdk import Mix
from mix_python_sdk.helpers import query

async def main():
    async with Mix(server_url="http://localhost:8088") as mix:
        mix.authentication.store_api_key(api_key=api_key, provider="openrouter")
        session = mix.sessions.create(title="Demo")

        async for event in query(mix, session.id, "What's 2+2?"):
            if event.type == "content":
                print(event.content, end="", flush=True)
            elif event.type == "tool":
                print(f"\nTool: {event.tool_name}")

        mix.sessions.delete(id=session.id)

asyncio.run(main())

Comprehensive Callback Handling

import asyncio
from mix_python_sdk import Mix
from mix_python_sdk.helpers import send_with_callbacks

async def main():
    async with Mix(server_url="http://localhost:8088") as mix:
        mix.authentication.store_api_key(api_key=api_key, provider="openrouter")
        session = mix.sessions.create(title="Demo")

        await send_with_callbacks(
            mix,
            session_id=session.id,
            message="What's your working directory?",
            on_thinking=lambda text: print(f"Thinking: {text}", end="", flush=True),
            on_content=lambda text: print(f"\nResponse: {text}", end="", flush=True),
            on_tool=lambda tool: print(f"\nTool: {tool.name}"),
            on_tool_execution_complete=lambda data: print(f"Result: {data.progress}"),
            on_error=lambda error: print(f"\nError: {error}"),
            on_complete=lambda: print("\nComplete!")
        )

        mix.sessions.delete(id=session.id)

asyncio.run(main())

Session Analytics

# List sessions with metadata
sessions = mix.sessions.list()
for s in sessions:
    print(f"Title: {s.title}")
    print(f"Messages: {s.user_message_count} user, {s.assistant_message_count} assistant")
    print(f"Tokens: {s.prompt_tokens} prompt, {s.completion_tokens} completion")
    print(f"Cost: ${s.cost:.6f}")
    print(f"Tool calls: {s.tool_call_count}")

Session Export

# Export complete session transcript
export = mix.sessions.export_session(id=session_id)

# Access full message history with tool calls
for msg in export.messages:
    print(f"Role: {msg.role}")
    print(f"Content: {msg.user_input or msg.assistant_response}")
    if msg.reasoning:
        print(f"Reasoning time: {msg.reasoning_duration}ms")
    for tool_call in msg.tool_calls:
        print(f"Tool: {tool_call.name}")
        print(f"Input: {tool_call.input}")
        print(f"Result: {tool_call.result}")

Tool Discovery

# Get tools status
tools_status = mix.tools.get_tools_status()

for category_name, category_info in tools_status.categories.items():
    print(f"Category: {category_info.display_name}")
    for tool in category_info.tools:
        auth_status = "Ready" if tool.authenticated else "Needs auth"
        print(f"  - {tool.display_name}: {auth_status}")