Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backend/app/models/subtask.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ class Subtask(Base):
progress = Column(Integer, nullable=False, default=0)
result = Column(JSON)
error_message = Column(Text)
override_model_config = Column(JSON, nullable=True)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
completed_at = Column(DateTime)
Expand Down
1 change: 1 addition & 0 deletions backend/app/schemas/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ class TaskCreate(BaseModel):
task_type: Optional[str] = "chat" # chat、code
auto_delete_executor: Optional[str] = "false" # true、fasle
source: Optional[str] = "web"
override_model: Optional[str] = None


class TaskUpdate(BaseModel):
Expand Down
29 changes: 24 additions & 5 deletions backend/app/services/adapters/task_kinds.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def create_task_or_append(
db.add(task)

# Create subtasks for the task
self._create_subtasks(db, task, team, user.id, obj_in.prompt)
self._create_subtasks(db, task, team, user.id, obj_in.prompt, obj_in.override_model)

db.commit()
db.refresh(task)
Expand Down Expand Up @@ -436,6 +436,17 @@ def get_task_detail(
# Convert subtasks to dict and replace bot_ids with bot objects
subtasks_dict = []
for subtask in subtasks:
# Apply override_model_config to bot configs if present
subtask_bots = []
for bot_id in subtask.bot_ids:
if bot_id in bots:
bot_dict = bots[bot_id].copy()
# If subtask has override_model_config, use it to override agent_config
if subtask.override_model_config:
bot_dict["agent_config"] = subtask.override_model_config
logger.info(f"Applied override_model_config to bot {bot_id} in subtask {subtask.id}: {subtask.override_model_config}")
subtask_bots.append(bot_dict)
Comment on lines +439 to +448
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Override replaces entire agent_config instead of merging.

Line 446 replaces the entire agent_config with override_model_config:

bot_dict["agent_config"] = subtask.override_model_config

This means if the original agent_config had other important fields (e.g., temperature, max_tokens, top_p), they will be lost. Only private_model will remain.

Consider merging the override config with the existing config:

                 if bot_id in bots:
                     bot_dict = bots[bot_id].copy()
                     # If subtask has override_model_config, use it to override agent_config
                     if subtask.override_model_config:
-                        bot_dict["agent_config"] = subtask.override_model_config
+                        # Merge override config with existing config to preserve other settings
+                        if bot_dict.get("agent_config"):
+                            bot_dict["agent_config"] = {**bot_dict["agent_config"], **subtask.override_model_config}
+                        else:
+                            bot_dict["agent_config"] = subtask.override_model_config
                         logger.info(f"Applied override_model_config to bot {bot_id} in subtask {subtask.id}: {subtask.override_model_config}")
                     subtask_bots.append(bot_dict)

This preserves other configuration parameters while overriding only the private_model field.

🤖 Prompt for AI Agents
In backend/app/services/adapters/task_kinds.py around lines 439-448, the code
currently replaces the entire bot_dict["agent_config"] with
subtask.override_model_config which discards other fields; instead, ensure
bot_dict has an agent_config dict (create empty dict if missing), make a shallow
copy of it, update that copy with subtask.override_model_config (so override
keys replace existing ones but other keys like temperature/max_tokens/top_p are
preserved), assign the merged dict back to bot_dict["agent_config"], and keep
the info log (optionally logging both original and merged configs or just the
applied override).


# Convert subtask to dict
subtask_dict = {
# Subtask base fields
Expand All @@ -458,8 +469,8 @@ def get_task_detail(
"created_at": subtask.created_at,
"updated_at": subtask.updated_at,
"completed_at": subtask.completed_at,
# Add bot objects as dict for each bot_id
"bots": [bots.get(bot_id) for bot_id in subtask.bot_ids if bot_id in bots]
# Add bot objects with potentially overridden config
"bots": subtask_bots
}
subtasks_dict.append(subtask_dict)

Expand Down Expand Up @@ -832,11 +843,11 @@ def _convert_team_to_dict(self, team: Kind, db: Session, user_id: int) -> Dict[s
"updated_at": team.updated_at,
}

def _create_subtasks(self, db: Session, task: Kind, team: Kind, user_id: int, user_prompt: str) -> None:
def _create_subtasks(self, db: Session, task: Kind, team: Kind, user_id: int, user_prompt: str, override_model: Optional[str] = None) -> None:
"""
Create subtasks based on team's workflow configuration
"""
logger.info(f"_create_subtasks called with task_id={task.id}, team_id={team.id}, user_id={user_id}")
logger.info(f"_create_subtasks called with task_id={task.id}, team_id={team.id}, user_id={user_id}, override_model={override_model}")
team_crd = Team.model_validate(team.json)
task_crd = Task.model_validate(task.json)

Expand Down Expand Up @@ -902,6 +913,12 @@ def _create_subtasks(self, db: Session, task: Kind, team: Kind, user_id: int, us
)
db.add(user_subtask)

# Prepare override_model_config if override_model is provided
override_model_config = None
if override_model:
override_model_config = {"private_model": override_model}
logger.info(f"Using override_model_config: {override_model_config}")
Comment on lines +916 to +920
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

Add validation for override_model value.

The code creates override_model_config without validating that override_model is a valid model name. This could lead to runtime errors when the executor tries to use an invalid model.

Add validation before creating the config:

         # Prepare override_model_config if override_model is provided
         override_model_config = None
         if override_model:
+            # Validate override_model is not empty
+            if not override_model.strip():
+                raise HTTPException(
+                    status_code=400,
+                    detail="override_model cannot be empty"
+                )
+            # TODO: Consider validating against available models from the models API
             override_model_config = {"private_model": override_model}
             logger.info(f"Using override_model_config: {override_model_config}")

Additionally, verify if there's a models service that can validate model names:


🏁 Script executed:

#!/bin/bash
# Description: Check for model validation services or available models lists

# Search for models API endpoints or services
echo "=== Checking for models API or validation services ==="
rg -n "def.*models" --type=py -g "backend/**" -A3

# Search for model name validation
echo "=== Checking for model name validation ==="
rg -n "validate.*model|model.*valid" --type=py -g "backend/**" -C3

# Look for /models/names endpoint implementation
echo "=== Checking /models/names endpoint ==="
rg -n "/models/names|models/names" --type=py -g "backend/**" -C5

Length of output: 44837


🏁 Script executed:

#!/bin/bash
# Examine context around lines 916-920 in task_kinds.py

echo "=== Code context around lines 916-920 ==="
sed -n '900,930p' backend/app/services/adapters/task_kinds.py

echo -e "\n=== Searching for override_model usage ==="
rg -n "override_model" --type=py -C5 -g "backend/**"

echo -e "\n=== Checking function signature where override_model is used ==="
rg -n "def.*override_model" --type=py -C3 -g "backend/**"

Length of output: 10999



🏁 Script executed:

#!/bin/bash
# Check where override_model_config is used downstream and if validation exists

echo "=== Searching for where override_model_config is processed ==="
rg -n "override_model_config" --type=py -C3 -g "backend/**"

echo -e "\n=== Checking models endpoint for validation capabilities ==="
cat backend/app/api/endpoints/adapter/models.py | head -150

echo -e "\n=== Checking if there's a method to validate model names ==="
rg -n "def.*valid.*model|def.*check.*model" --type=py -C2 -g "backend/**"

Length of output: 8660



🏁 Script executed:

#!/bin/bash
# Check list_model_names implementation and related context

echo "=== Checking list_model_names implementation ==="
rg -n "def list_model_names" --type=py -C15 -g "backend/**"

echo -e "\n=== Checking if validation is done elsewhere for similar parameters ==="
rg -n "HTTPException.*400|raise.*ValueError" --type=py -C2 -g "backend/app/services/adapters/task_kinds.py" | head -50

echo -e "\n=== Check if override_model is ever validated at API level ==="
rg -n "override_model" --type=py backend/app/api/ -C3

Length of output: 5483


Add validation for override_model before creating config.

The code accepts override_model without validation and directly passes it to subtasks, which then override bot configurations. This could send invalid model names to the executor.

At minimum, validate that override_model is not empty/whitespace-only before creating the config. Additionally, consider validating against available models using the list_model_names() service (which exists at /models/names), though this would require accessing the agent_name from the team's bot references.

        # Prepare override_model_config if override_model is provided
        override_model_config = None
        if override_model:
+            if not override_model.strip():
+                raise HTTPException(
+                    status_code=400,
+                    detail="override_model cannot be empty"
+                )
+            # TODO: Consider validating against available models for the shells used in this team
             override_model_config = {"private_model": override_model}
             logger.info(f"Using override_model_config: {override_model_config}")

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In backend/app/services/adapters/task_kinds.py around lines 916 to 920, the code
builds override_model_config whenever override_model is truthy but does not
validate it; change this to first trim and ensure override_model is not empty or
only whitespace before creating override_model_config, log a warning and skip
creating the config if invalid, and (if agent_name/team context is available)
call the models name service (/models/names or list_model_names()) to verify the
override_model exists before using it — if the model is not found, log an error
and do not pass the invalid override_model to subtasks. Ensure logging includes
the problematic value and that behavior is consistent (no config created) when
validation fails.


# Update id of next message and parent
if parent_id == 0:
parent_id = 1
Expand Down Expand Up @@ -944,6 +961,7 @@ def _create_subtasks(self, db: Session, task: Kind, team: Kind, user_id: int, us
error_message="",
completed_at=datetime.now(),
result=None,
override_model_config=override_model_config,
)

# Update id of next message and parent
Expand Down Expand Up @@ -977,6 +995,7 @@ def _create_subtasks(self, db: Session, task: Kind, team: Kind, user_id: int, us
error_message="",
completed_at=datetime.now(),
result=None,
override_model_config=override_model_config,
)
db.add(assistant_subtask)

Expand Down
1 change: 1 addition & 0 deletions frontend/src/apis/tasks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ export interface CreateTaskRequest {
batch: number;
user_id: number;
user_name: string;
override_model?: string;
}

export interface UpdateTaskRequest {
Expand Down
72 changes: 63 additions & 9 deletions frontend/src/features/tasks/components/ChatArea.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import { ArrowTurnDownLeftIcon } from '@heroicons/react/24/outline';
import MessagesArea from './MessagesArea';
import ChatInput from './ChatInput';
import TeamSelector from './TeamSelector';
import ModelSelector from './ModelSelector';
import RepositorySelector from './RepositorySelector';
import BranchSelector from './BranchSelector';
import type { Team, GitRepoInfo, GitBranch } from '@/types/api';
Expand All @@ -18,7 +19,7 @@ import { useTaskContext } from '../contexts/taskContext';
import { App, Button } from 'antd';
import QuotaUsage from './QuotaUsage';
import { useMediaQuery } from '@/hooks/useMediaQuery';
import { saveLastTeam, getLastTeamId, saveLastRepo } from '@/utils/userPreferences';
import { saveLastTeam, getLastTeamId, saveLastRepo, saveLastModel, getLastModel } from '@/utils/userPreferences';

const SHOULD_HIDE_QUOTA_NAME_LIMIT = 18;

Expand Down Expand Up @@ -47,6 +48,7 @@ export default function ChatArea({
}

const [selectedTeam, setSelectedTeam] = useState<Team | null>(null);
const [selectedModel, setSelectedModel] = useState<string | null>(null);
const [selectedRepo, setSelectedRepo] = useState<GitRepoInfo | null>(null);
const [selectedBranch, setSelectedBranch] = useState<GitBranch | null>(null);
const [hasRestoredPreferences, setHasRestoredPreferences] = useState(false);
Expand Down Expand Up @@ -114,6 +116,38 @@ export default function ChatArea({
setHasRestoredPreferences(true);
}, [teams, hasRestoredPreferences]);

// Restore model preference from localStorage on mount (only for new tasks)
useEffect(() => {
if (!hasMessages) {
const lastModel = getLastModel();
if (lastModel) {
console.log('[ChatArea] Restoring model from localStorage:', lastModel);
setSelectedModel(lastModel);
}
}
}, [hasMessages]);

// Read current model from task detail when appending messages
useEffect(() => {
if (hasMessages && selectedTaskDetail?.subtasks) {
// Find the most recent ASSISTANT subtask
const assistantSubtasks = selectedTaskDetail.subtasks.filter(
(st: any) => st.role === 'ASSISTANT'
);
if (assistantSubtasks.length > 0) {
const latestAssistant = assistantSubtasks[assistantSubtasks.length - 1];
if (latestAssistant.bots && latestAssistant.bots.length > 0) {
const bot = latestAssistant.bots[0];
const privateModel = bot.agent_config?.private_model;
if (privateModel) {
console.log('[ChatArea] Setting model from current task:', privateModel);
setSelectedModel(privateModel);
}
}
}
}
}, [hasMessages, selectedTaskDetail]);

// Handle external team selection for new tasks (from team sharing)
useEffect(() => {
if (selectedTeamForNewTask && !hasMessages) {
Expand Down Expand Up @@ -142,6 +176,17 @@ export default function ChatArea({
}
};

const handleModelChange = (model: string | null) => {
console.log('[ChatArea] handleModelChange called:', model || 'null');
setSelectedModel(model);

// Save model preference to localStorage
if (model) {
console.log('[ChatArea] Saving model to localStorage:', model);
saveLastModel(model);
}
};

// Save repository preference when it changes
useEffect(() => {
if (selectedRepo) {
Expand All @@ -159,6 +204,7 @@ export default function ChatArea({
branch: showRepositorySelector ? selectedBranch : null,
task_id: selectedTaskDetail?.id,
taskType: taskType,
selectedModel: selectedModel,
});
if (error) {
message.error(error);
Expand Down Expand Up @@ -358,15 +404,23 @@ export default function ChatArea({
/>
{/* Team Selector and Send Button */}
<div className="flex items-end justify-between px-3 py-0">
<div>
<div className="flex items-center space-x-2">
{teams.length > 0 && (
<TeamSelector
selectedTeam={selectedTeam}
setSelectedTeam={handleTeamChange}
teams={teams}
disabled={hasMessages}
isLoading={isTeamsLoading}
/>
<>
<TeamSelector
selectedTeam={selectedTeam}
setSelectedTeam={handleTeamChange}
teams={teams}
disabled={hasMessages}
isLoading={isTeamsLoading}
/>
<ModelSelector
selectedModel={selectedModel}
setSelectedModel={handleModelChange}
disabled={false}
isLoading={isLoading}
/>
</>
)}
</div>
<div className="ml-auto flex items-center">
Expand Down
135 changes: 135 additions & 0 deletions frontend/src/features/tasks/components/ModelSelector.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
// SPDX-FileCopyrightText: 2025 Weibo, Inc.
//
// SPDX-License-Identifier: Apache-2.0

'use client';

import React, { useEffect, useState, useMemo } from 'react';
import { Select, theme } from 'antd';
import { CpuChipIcon } from '@heroicons/react/24/outline';
import { apiClient } from '@/apis/client';
import { useTranslation } from '@/hooks/useTranslation';
import { useMediaQuery } from '@/hooks/useMediaQuery';

interface ModelSelectorProps {
selectedModel: string | null;
setSelectedModel: (model: string | null) => void;
disabled?: boolean;
isLoading?: boolean;
}

interface ModelOption {
name: string;
}

export default function ModelSelector({
selectedModel,
setSelectedModel,
disabled = false,
isLoading = false,
}: ModelSelectorProps) {
const { t } = useTranslation('common');
const { token } = theme.useToken();
const isMobile = useMediaQuery('(max-width: 767px)');
const [models, setModels] = useState<ModelOption[]>([]);
const [modelsLoading, setModelsLoading] = useState(false);
const [error, setError] = useState<string | null>(null);

// Fetch models from API
useEffect(() => {
const fetchModels = async () => {
setModelsLoading(true);
setError(null);
try {
const response = await apiClient.get<{ data: ModelOption[] }>('/models/names', {
agent_name: 'ClaudeCode',
});
setModels(response.data || []);
} catch (err) {
console.error('Failed to fetch models:', err);
setError('Failed to load models');
setModels([]);
} finally {
setModelsLoading(false);
}
};

fetchModels();
}, []);

// Validate selectedModel exists in models list
useEffect(() => {
if (selectedModel && models.length > 0) {
const exists = models.some(m => m.name === selectedModel);
if (!exists) {
console.warn(
`Selected model "${selectedModel}" not found in models list, clearing selection`
);
setSelectedModel(null);
}
}
}, [selectedModel, models, setSelectedModel]);

const handleChange = (value: string | null) => {
setSelectedModel(value);
};

const modelOptions = useMemo(() => {
return models.map(model => ({
label: (
<span className="font-medium text-xs text-text-primary truncate" title={model.name}>
{model.name}
</span>
),
value: model.name,
}));
}, [models]);

const filterOption = (input: string, option?: { label: React.ReactNode; value: string }) => {
if (!option) return false;
return option.value.toLowerCase().includes(input.toLowerCase());
};

if (error) {
return null; // Hide selector if models failed to load
}

return (
<div className="flex items-baseline space-x-1 min-w-0">
<CpuChipIcon
className={`w-3 h-3 text-text-muted flex-shrink-0 ${modelsLoading || isLoading ? 'animate-pulse' : ''}`}
/>
<Select
showSearch
allowClear
value={selectedModel}
placeholder={
<span className="text-sx truncate h-2">
{modelsLoading ? t('chat.model_loading') || 'Loading...' : t('chat.select_model') || 'Select Model'}
</span>
}
className="repository-selector min-w-0 truncate"
style={{
width: 'auto',
maxWidth: isMobile ? 150 : 200,
display: 'inline-block',
paddingRight: 20,
}}
popupMatchSelectWidth={false}
styles={{ popup: { root: { maxWidth: 280 } } }}
classNames={{ popup: { root: 'repository-selector-dropdown custom-scrollbar' } }}
disabled={disabled || modelsLoading}
loading={modelsLoading}
size="small"
filterOption={filterOption}
onChange={handleChange}
notFoundContent={
<div className="px-3 py-2 text-sm text-text-muted">
{t('chat.no_model_found') || 'No model found'}
</div>
}
options={modelOptions}
/>
</div>
);
}
4 changes: 3 additions & 1 deletion frontend/src/features/tasks/service/messageService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@ export async function sendMessage(params: {
branch: GitBranch | null;
task_id?: number;
taskType?: 'chat' | 'code';
selectedModel?: string | null;
}) {
const { message, team, repo, branch, task_id, taskType = 'chat' } = params;
const { message, team, repo, branch, task_id, taskType = 'chat', selectedModel } = params;
const trimmed = message?.trim() ?? '';

if (!trimmed) {
Expand Down Expand Up @@ -51,6 +52,7 @@ export async function sendMessage(params: {
batch: 0,
user_id: 0,
user_name: '',
override_model: selectedModel ?? undefined,
};

try {
Expand Down
Loading