Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ Here are some specific use cases we enable:
- **Isolated workspaces** with central view on git divergence
- **Local**: git worktrees on your local machine ([docs](https://cmux.io/local.html))
- **SSH**: regular git clones on a remote server ([docs](https://cmux.io/ssh.html))
- **Multi-model** (`sonnet-4-*`, `gpt-5-*`, `opus-4-*`)
- **Multi-model** (`sonnet-4-*`, `grok-*`, `gpt-5-*`, `opus-4-*`)
- Ollama supported for local LLMs ([docs](https://cmux.io/models.html#ollama-local))
- OpenRouter supported for long-tail of LLMs ([docs](https://cmux.io/models.html#openrouter-cloud))
- **VS Code Extension**: Jump into mux workspaces directly from VS Code ([docs](https://cmux.io/vscode-extension.html))
Expand Down
6 changes: 6 additions & 0 deletions bun.lock
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
{
"lockfileVersion": 1,
"configVersion": 0,
"workspaces": {
"": {
"name": "@coder/cmux",
"dependencies": {
"@ai-sdk/anthropic": "^2.0.44",
"@ai-sdk/google": "^2.0.38",
"@ai-sdk/openai": "^2.0.66",
"@ai-sdk/xai": "^2.0.33",
"@openrouter/ai-sdk-provider": "^1.2.2",
"@radix-ui/react-checkbox": "^1.3.3",
"@radix-ui/react-dialog": "^1.1.15",
Expand Down Expand Up @@ -141,10 +143,14 @@

"@ai-sdk/openai": ["@ai-sdk/openai@2.0.68", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-qUSLFkqgUoFArzBwttu0KWVAZYjbsdZGOklSJXpfZ2nDC61yseHxtcnuG8u6tqKnGXDh4eakEgREDWU2sRht7A=="],

"@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.27", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-bpYruxVLhrTbVH6CCq48zMJNeHu6FmHtEedl9FXckEgcIEAi036idFhJlcRwC1jNCwlacbzb8dPD7OAH1EKJaQ=="],

"@ai-sdk/provider": ["@ai-sdk/provider@2.0.0", "", { "dependencies": { "json-schema": "^0.4.0" } }, "sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA=="],

"@ai-sdk/provider-utils": ["@ai-sdk/provider-utils@3.0.17", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@standard-schema/spec": "^1.0.0", "eventsource-parser": "^3.0.6" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-TR3Gs4I3Tym4Ll+EPdzRdvo/rc8Js6c4nVhFLuvGLX/Y4V9ZcQMa/HTiYsHEgmYrf1zVi6Q145UEZUfleOwOjw=="],

"@ai-sdk/xai": ["@ai-sdk/xai@2.0.33", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.27", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.17" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-0+S+hxbAj8dA8/3dYQsmgkVkPcs8yptO1ueLWtJpa6PYjrdyliDcPSCZREL8aE76vHGvFsYlRABFfH9Ps2M8tg=="],

"@antfu/install-pkg": ["@antfu/install-pkg@1.1.0", "", { "dependencies": { "package-manager-detector": "^1.3.0", "tinyexec": "^1.0.1" } }, "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ=="],

"@antfu/utils": ["@antfu/utils@9.3.0", "", {}, "sha512-9hFT4RauhcUzqOE4f1+frMKLZrgNog5b06I7VmZQV1BkvwvqrbC8EBZf3L1eEL2AKb6rNKjER0sEvJiSP1FXEA=="],
Expand Down
2 changes: 1 addition & 1 deletion docs/intro.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
mux helps you work with multiple coding assistants more effectively via:

- Isolated workspaces with central view on git status updates
- Multi-model (`sonnet-4-*`, `gpt-5-*`, `opus-4-*`) support
- Multi-model (`sonnet-4-*`, `grok-*`, `gpt-5-*`, `opus-4-*`) support
- Supporting UI and keybinds for efficiently managing a suite of agents
- Rich markdown outputs (mermaid diagrams, LaTeX, etc.)

Expand Down
28 changes: 28 additions & 0 deletions docs/models.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,30 @@ Access Gemini models directly via Google's generative AI API:

TODO: add issue link here.

#### xAI (Grok)

Frontier reasoning models from xAI with built-in search orchestration:

- `xai:grok-4-1` — Fast unified model (switches between reasoning/non-reasoning based on thinking toggle)
- `xai:grok-code` — Optimized for coding tasks

**Setup:**

1. Create an API key at [console.x.ai](https://console.x.ai/)
2. Add to `~/.mux/providers.jsonc`:

```jsonc
{
"xai": {
"apiKey": "sk-xai-...",
},
}
```

**Search orchestration:**

Mux enables Grok's live search by default using `mode: "auto"` with citations. Add [`searchParameters`](https://docs.x.ai/docs/resources/search) to `providers.jsonc` if you want to customize the defaults (e.g., regional focus, time filters, or disabling search entirely per workspace).

#### OpenRouter (Cloud)

Access 300+ models from multiple providers through a single API:
Expand Down Expand Up @@ -167,6 +191,10 @@ All providers are configured in `~/.mux/providers.jsonc`. Example configurations
"google": {
"apiKey": "AIza...",
},
// Required for Grok models
"xai": {
"apiKey": "sk-xai-...",
},
// Required for OpenRouter models
"openrouter": {
"apiKey": "sk-or-v1-...",
Expand Down
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
"@ai-sdk/anthropic": "^2.0.44",
"@ai-sdk/google": "^2.0.38",
"@ai-sdk/openai": "^2.0.66",
"@ai-sdk/xai": "^2.0.33",
"@openrouter/ai-sdk-provider": "^1.2.2",
"@radix-ui/react-checkbox": "^1.3.3",
"@radix-ui/react-dialog": "^1.1.15",
Expand Down
2 changes: 1 addition & 1 deletion src/browser/App.stories.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ export const ActiveWorkspaceWithChat: Story = {
apiOverrides: {
providers: {
setProviderConfig: () => Promise.resolve({ success: true, data: undefined }),
list: () => Promise.resolve(["anthropic", "openai"]),
list: () => Promise.resolve(["anthropic", "openai", "xai"]),
},
workspace: {
create: (projectPath: string, branchName: string) =>
Expand Down
12 changes: 11 additions & 1 deletion src/common/constants/knownModels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
* Centralized model metadata. Update model versions here and everywhere else will follow.
*/

type ModelProvider = "anthropic" | "openai" | "google";
type ModelProvider = "anthropic" | "openai" | "google" | "xai";

interface KnownModelDefinition {
/** Provider identifier used by SDK factories */
Expand Down Expand Up @@ -76,6 +76,16 @@ const MODEL_DEFINITIONS = {
aliases: ["gemini-3", "gemini-3-pro"],
tokenizerOverride: "google/gemini-2.5-pro",
},
GROK_4_1: {
provider: "xai",
providerModelId: "grok-4-1-fast-non-reasoning",
aliases: ["grok", "grok-4", "grok-4.1", "grok-4-1"],
},
GROK_CODE: {
provider: "xai",
providerModelId: "grok-code-fast-1",
aliases: ["grok-code"],
},
} as const satisfies Record<string, KnownModelDefinition>;

export type KnownModelKey = keyof typeof MODEL_DEFINITIONS;
Expand Down
8 changes: 8 additions & 0 deletions src/common/constants/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,13 @@ export async function importOpenRouter() {
return await import("@openrouter/ai-sdk-provider");
}

/**
* Dynamically import the xAI provider package
*/
export async function importXAI() {
return await import("@ai-sdk/xai");
}

/**
* Centralized provider registry mapping provider names to their import functions
*
Expand All @@ -58,6 +65,7 @@ export const PROVIDER_REGISTRY = {
anthropic: importAnthropic,
openai: importOpenAI,
google: importGoogle,
xai: importXAI,
ollama: importOllama,
openrouter: importOpenRouter,
} as const;
Expand Down
11 changes: 11 additions & 0 deletions src/common/types/providerOptions.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import type { XaiProviderOptions } from "@ai-sdk/xai";

/**
* Mux provider-specific options that get passed through the stack.
* Used by both frontend and backend to configure provider-specific features
Expand Down Expand Up @@ -54,11 +56,20 @@ export interface OpenRouterProviderOptions {}
/**
* Mux provider options - used by both frontend and backend
*/
/**
* xAI-specific options
*/
export interface XaiProviderOverrides {
/** Override Grok search parameters (defaults to auto search with citations) */
searchParameters?: XaiProviderOptions["searchParameters"];
}

export interface MuxProviderOptions {
/** Provider-specific options */
anthropic?: AnthropicProviderOptions;
openai?: OpenAIProviderOptions;
google?: GoogleProviderOptions;
ollama?: OllamaProviderOptions;
openrouter?: OpenRouterProviderOptions;
xai?: XaiProviderOverrides;
}
25 changes: 24 additions & 1 deletion src/common/utils/ai/providerOptions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
import type { AnthropicProviderOptions } from "@ai-sdk/anthropic";
import type { OpenAIResponsesProviderOptions } from "@ai-sdk/openai";
import type { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
import type { XaiProviderOptions } from "@ai-sdk/xai";
import type { MuxProviderOptions } from "@/common/types/providerOptions";
import type { ThinkingLevel } from "@/common/types/thinking";
import {
ANTHROPIC_THINKING_BUDGETS,
Expand Down Expand Up @@ -38,6 +40,7 @@ type ProviderOptions =
| { openai: OpenAIResponsesProviderOptions }
| { google: GoogleGenerativeAIProviderOptions }
| { openrouter: OpenRouterReasoningOptions }
| { xai: XaiProviderOptions }
| Record<string, never>; // Empty object for unsupported providers

/**
Expand All @@ -59,7 +62,8 @@ export function buildProviderOptions(
modelString: string,
thinkingLevel: ThinkingLevel,
messages?: MuxMessage[],
lostResponseIds?: (id: string) => boolean
lostResponseIds?: (id: string) => boolean,
muxProviderOptions?: MuxProviderOptions
): ProviderOptions {
// Always clamp to the model's supported thinking policy (e.g., gpt-5-pro = HIGH only)
const effectiveThinking = enforceThinkingPolicy(modelString, thinkingLevel);
Expand Down Expand Up @@ -250,6 +254,25 @@ export function buildProviderOptions(
return {};
}

// Build xAI-specific options
if (provider === "xai") {
const overrides = muxProviderOptions?.xai ?? {};

const defaultSearchParameters: XaiProviderOptions["searchParameters"] = {
mode: "auto",
returnCitations: true,
};

const options: ProviderOptions = {
xai: {
...overrides,
searchParameters: overrides.searchParameters ?? defaultSearchParameters,
},
};
log.debug("buildProviderOptions: Returning xAI options", options);
return options;
}

// No provider-specific options for unsupported providers
log.debug("buildProviderOptions: Unsupported provider", provider);
return {};
Expand Down
12 changes: 12 additions & 0 deletions src/common/utils/providers/ensureProvidersConfig.ts
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,18 @@ const buildProvidersFromEnv = (env: NodeJS.ProcessEnv): ProvidersConfig => {
providers.openrouter = { apiKey: openRouterKey };
}

const xaiKey = trim(env.XAI_API_KEY);
if (xaiKey.length > 0) {
const entry: ProviderConfig = { apiKey: xaiKey };

const baseUrl = trim(env.XAI_BASE_URL);
if (baseUrl.length > 0) {
entry.baseUrl = baseUrl;
}

providers.xai = entry;
}

if (!providers.openai) {
const azureKey = trim(env.AZURE_OPENAI_API_KEY);
const azureEndpoint = trim(env.AZURE_OPENAI_ENDPOINT);
Expand Down
33 changes: 33 additions & 0 deletions src/common/utils/tokens/models.json
Original file line number Diff line number Diff line change
Expand Up @@ -23323,6 +23323,39 @@
"supports_tool_choice": true,
"supports_web_search": true
},

"xai/grok-4-1-fast-reasoning": {
"litellm_provider": "xai",
"max_input_tokens": 2e6,
"max_output_tokens": 2e6,
"max_tokens": 2e6,
"mode": "chat",
"input_cost_per_token": 0.2e-6,
"input_cost_per_token_above_128k_tokens": 0.4e-6,
"output_cost_per_token": 0.5e-6,
"output_cost_per_token_above_128k_tokens": 1e-6,
"cache_read_input_token_cost": 0.05e-6,
"source": "https://docs.x.ai/docs/models/grok-4-1-fast-reasoning",
"supports_function_calling": true,
"supports_tool_choice": true,
"supports_web_search": true
},
"xai/grok-4-1-fast-non-reasoning": {
"litellm_provider": "xai",
"max_input_tokens": 2e6,
"max_output_tokens": 2e6,
"cache_read_input_token_cost": 0.05e-6,
"max_tokens": 2e6,
"mode": "chat",
"input_cost_per_token": 0.2e-6,
"input_cost_per_token_above_128k_tokens": 0.4e-6,
"output_cost_per_token": 0.5e-6,
"output_cost_per_token_above_128k_tokens": 1e-6,
"source": "https://docs.x.ai/docs/models/grok-4-1-fast-reasoning",
"supports_function_calling": true,
"supports_tool_choice": true,
"supports_web_search": true
},
"xai/grok-4-fast-non-reasoning": {
"litellm_provider": "xai",
"max_input_tokens": 2e6,
Expand Down
5 changes: 5 additions & 0 deletions src/node/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ export type { Workspace, ProjectConfig, ProjectsConfig };
export interface ProviderConfig {
apiKey?: string;
baseUrl?: string;
baseURL?: string;
headers?: Record<string, string>;
[key: string]: unknown;
}

Expand Down Expand Up @@ -461,6 +463,9 @@ export class Config {
// "openai": {
// "apiKey": "sk-..."
// },
// "xai": {
// "apiKey": "sk-xai-..."
// },
// "ollama": {
// "baseUrl": "http://localhost:11434/api" // Optional - only needed for remote/custom URL
// }
Expand Down
Loading