From 6d99fd72c6c2bb126332001aabd2a709b641b940 Mon Sep 17 00:00:00 2001 From: Man Yue Mo Date: Fri, 28 Nov 2025 09:36:51 +0000 Subject: [PATCH 1/3] add support for model parameters passing --- README.md | 20 +++++++++++++++ doc/GRAMMAR.md | 12 +++++++++ src/seclab_taskflow_agent/__main__.py | 36 +++++++++++++++++++++------ 3 files changed, 60 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 24ab58d..bab1844 100644 --- a/README.md +++ b/README.md @@ -457,6 +457,26 @@ taskflow: The model version can then be updated by changing `gpt_latest` in the `model_config` file and applied across all taskflows that use the config. +In addition, model specific parameters can be provided via `model_config`. To do so, define a `model_settings` section in the `model_config` file. This section has to be a dictionary with the model names as keys: + +```yaml +model_settings: + gpt_latest: + temperature: 1 + reasoning: + effort: high +``` + +You do not need to set parameters for all models defined in the `models` section. When parameters is not set for a model, they'll fall back to the default value. However, all the settings in this section must belong to one of the models specified in the `models` section, otherwise an error will raise: + +```yaml +model_settings: + new_model: + ... +``` + +The above will result in an error because `new_model` is not defined in `models` section. Model parameters can also be set per task, and any settings defined in a task will override the settings in the config. + ## Passing environment variables Files of types `taskflow` and `toolbox` allow environment variables to be passed using the `env` field: diff --git a/doc/GRAMMAR.md b/doc/GRAMMAR.md index 17ca0ec..c0581f4 100644 --- a/doc/GRAMMAR.md +++ b/doc/GRAMMAR.md @@ -91,6 +91,18 @@ Tasks can optionally specify which Model to use on the configured inference endp Note that model identifiers may differ between OpenAI compatible endpoint providers, make sure you change your model identifier accordingly when switching providers. If not specified, a default LLM model (`gpt-4o`) is used. +Parameters to the model can also be specified in the task using the `model_settings` section: + +```yaml + model: gpt-5-mini + model_settings: + temperature: 1 + reasoning: + effort: high +``` + +If `model_settings` is absent, then the model parameters will fall back to either the default or the ones supplied in a `model_config`. However, any parameters supplied in the task will override those that are set in the `model_config`. + ### Completion Requirement Tasks can be marked as requiring completion, if a required task fails, the taskflow will abort. This defaults to false. diff --git a/src/seclab_taskflow_agent/__main__.py b/src/seclab_taskflow_agent/__main__.py index aadacf3..1f8aae6 100644 --- a/src/seclab_taskflow_agent/__main__.py +++ b/src/seclab_taskflow_agent/__main__.py @@ -107,7 +107,7 @@ async def deploy_task_agents(available_tools: AvailableTools, exclude_from_context: bool = False, max_turns: int = DEFAULT_MAX_TURNS, model: str = DEFAULT_MODEL, - model_settings: ModelSettings | None = None, + model_par: dict = {}, run_hooks: TaskRunHooks | None = None, agent_hooks: TaskAgentHooks | None = None): @@ -130,10 +130,11 @@ async def deploy_task_agents(available_tools: AvailableTools, # https://openai.github.io/openai-agents-python/ref/model_settings/ parallel_tool_calls = True if os.getenv('MODEL_PARALLEL_TOOL_CALLS') else False - model_settings = ModelSettings( - temperature=os.getenv('MODEL_TEMP', default=0.0), - tool_choice=('auto' if toolboxes else None), - parallel_tool_calls=(parallel_tool_calls if toolboxes else None)) + model_params = {'temperature' : os.getenv('MODEL_TEMP', default = 0.0), + 'tool_choice' : ('auto' if toolboxes else None), + 'parallel_tool_calls' : (parallel_tool_calls if toolboxes else None)} + model_params.update(model_par) + model_settings = ModelSettings(**model_params) # block tools if requested tool_filter = create_static_tool_filter(blocked_tool_names=blocked_tools) if blocked_tools else None @@ -438,13 +439,22 @@ async def on_handoff_hook( global_variables.update(cli_globals) model_config = taskflow.get('model_config', {}) model_keys = [] + models_params = {} if model_config: - model_dict = available_tools.get_model_config(model_config) - model_dict = model_dict.get('models', {}) + m_config = available_tools.get_model_config(model_config) + model_dict = m_config.get('models', {}) if model_dict: if not isinstance(model_dict, dict): raise ValueError(f"Models section of the model_config file {model_config} must be a dictionary") - model_keys = model_dict.keys() + model_keys = model_dict.keys() + models_params = m_config.get('model_settings', {}) + if models_params and not isinstance(models_params, dict): + raise ValueError(f"Settings section of model_config file {model_config} must be a dictionary") + if not set(models_params.keys()).difference(model_keys).issubset(set([])): + raise ValueError(f"Settings section of model_config file {model_config} contains models that are not in the model section") + for k,v in models_params.items(): + if not isinstance(v, dict): + raise ValueError(f"Settings for model {k} in model_config file {model_config} is not a dictionary") for task in taskflow['taskflow']: @@ -465,8 +475,17 @@ async def on_handoff_hook( if k not in task_body: task_body[k] = v model = task_body.get('model', DEFAULT_MODEL) + model_settings = {} if model in model_keys: + if model in models_params: + model_settings = models_params[model] model = model_dict[model] + task_model_settings = task_body.get('model_settings', {}) + if not isinstance(task_model_settings, dict): + name = task.get('name', '') + raise ValueError(f"model_settings in task {name} needs to be a dictionary") + model_settings.update(task_model_settings) + # parse our taskflow grammar name = task_body.get('name', 'taskflow') # placeholder, not used yet description = task_body.get('description', 'taskflow') # placeholder not used yet @@ -622,6 +641,7 @@ async def _deploy_task_agents(resolved_agents, prompt): on_tool_end=on_tool_end_hook, on_tool_start=on_tool_start_hook), model = model, + model_par = model_settings, agent_hooks=TaskAgentHooks( on_handoff=on_handoff_hook)) return result From 29a9a6b2b0cce606116ac0e9cf2216e419bb23ad Mon Sep 17 00:00:00 2001 From: Man Yue Mo Date: Fri, 28 Nov 2025 09:48:09 +0000 Subject: [PATCH 2/3] Update src/seclab_taskflow_agent/__main__.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/seclab_taskflow_agent/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/seclab_taskflow_agent/__main__.py b/src/seclab_taskflow_agent/__main__.py index 1f8aae6..6ea873d 100644 --- a/src/seclab_taskflow_agent/__main__.py +++ b/src/seclab_taskflow_agent/__main__.py @@ -478,7 +478,7 @@ async def on_handoff_hook( model_settings = {} if model in model_keys: if model in models_params: - model_settings = models_params[model] + model_settings = models_params[model].copy() model = model_dict[model] task_model_settings = task_body.get('model_settings', {}) if not isinstance(task_model_settings, dict): From d0dff7fe8843eaa4b9d13f5437f716dc3cb07c86 Mon Sep 17 00:00:00 2001 From: Man Yue Mo Date: Fri, 28 Nov 2025 09:48:25 +0000 Subject: [PATCH 3/3] Update README.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bab1844..39f9c76 100644 --- a/README.md +++ b/README.md @@ -467,7 +467,7 @@ model_settings: effort: high ``` -You do not need to set parameters for all models defined in the `models` section. When parameters is not set for a model, they'll fall back to the default value. However, all the settings in this section must belong to one of the models specified in the `models` section, otherwise an error will raise: +You do not need to set parameters for all models defined in the `models` section. When parameters are not set for a model, they'll fall back to the default value. However, all the settings in this section must belong to one of the models specified in the `models` section, otherwise an error will raise: ```yaml model_settings: