From a2e41cc606e8c6f6bf0adac96c09737cb46c7e92 Mon Sep 17 00:00:00 2001 From: Kevin Backhouse Date: Tue, 2 Dec 2025 15:00:28 +0000 Subject: [PATCH 1/5] Add new environment variable named AI_API_TOKEN. --- src/seclab_taskflow_agent/__main__.py | 4 +- src/seclab_taskflow_agent/agent.py | 9 +++-- src/seclab_taskflow_agent/capi.py | 41 ++++++++++++++------ tests/test_api_endpoint_config.py | 54 +++++++++++---------------- 4 files changed, 57 insertions(+), 51 deletions(-) diff --git a/src/seclab_taskflow_agent/__main__.py b/src/seclab_taskflow_agent/__main__.py index 6ea873d..7f98b11 100644 --- a/src/seclab_taskflow_agent/__main__.py +++ b/src/seclab_taskflow_agent/__main__.py @@ -31,7 +31,7 @@ from .render_utils import render_model_output, flush_async_output from .env_utils import TmpEnv from .agent import TaskAgent -from .capi import list_tool_call_models +from .capi import list_tool_call_models, get_AI_token from .available_tools import AvailableTools load_dotenv(find_dotenv(usecwd=True)) @@ -686,7 +686,7 @@ async def _deploy_task_agents(resolved_agents, prompt): p, t, l, cli_globals, user_prompt, help_msg = parse_prompt_args(available_tools) if l: - tool_models = list_tool_call_models(os.getenv('COPILOT_TOKEN')) + tool_models = list_tool_call_models(get_AI_token()) for model in tool_models: print(model) sys.exit(0) diff --git a/src/seclab_taskflow_agent/agent.py b/src/seclab_taskflow_agent/agent.py index f05d764..07544f9 100644 --- a/src/seclab_taskflow_agent/agent.py +++ b/src/seclab_taskflow_agent/agent.py @@ -15,18 +15,19 @@ from agents.run import RunHooks from agents import Agent, Runner, AgentHooks, RunHooks, result, function_tool, Tool, RunContextWrapper, TContext, OpenAIChatCompletionsModel, set_default_openai_client, set_default_openai_api, set_tracing_disabled -from .capi import COPILOT_INTEGRATION_ID, AI_API_ENDPOINT, AI_API_ENDPOINT_ENUM +from .capi import COPILOT_INTEGRATION_ID, get_AI_endpoint, AI_API_ENDPOINT_ENUM # grab our secrets from .env, this must be in .gitignore load_dotenv(find_dotenv(usecwd=True)) -match urlparse(AI_API_ENDPOINT).netloc: +api_endpoint = get_AI_endpoint() +match urlparse(api_endpoint).netloc: case AI_API_ENDPOINT_ENUM.AI_API_GITHUBCOPILOT: default_model = 'gpt-4o' case AI_API_ENDPOINT_ENUM.AI_API_MODELS_GITHUB: default_model = 'openai/gpt-4o' case _: - raise ValueError(f"Unsupported Model Endpoint: {AI_API_ENDPOINT}") + raise ValueError(f"Unsupported Model Endpoint: {api_endpoint}") DEFAULT_MODEL = os.getenv('COPILOT_DEFAULT_MODEL', default=default_model) @@ -148,7 +149,7 @@ def __init__(self, model_settings: ModelSettings | None = None, run_hooks: TaskRunHooks | None = None, agent_hooks: TaskAgentHooks | None = None): - client = AsyncOpenAI(base_url=AI_API_ENDPOINT, + client = AsyncOpenAI(base_url=api_endpoint, api_key=os.getenv('COPILOT_TOKEN'), default_headers={'Copilot-Integration-Id': COPILOT_INTEGRATION_ID}) set_default_openai_client(client) diff --git a/src/seclab_taskflow_agent/capi.py b/src/seclab_taskflow_agent/capi.py index 8b52f00..c4a95ae 100644 --- a/src/seclab_taskflow_agent/capi.py +++ b/src/seclab_taskflow_agent/capi.py @@ -9,12 +9,6 @@ from strenum import StrEnum from urllib.parse import urlparse -# you can also set https://api.githubcopilot.com if you prefer -# but beware that your taskflows need to reference the correct model id -# since different APIs use their own id schema, use -l with your desired -# endpoint to retrieve the correct id names to use for your taskflow -AI_API_ENDPOINT = os.getenv('AI_API_ENDPOINT', default='https://models.github.ai/inference') - # Enumeration of currently supported API endpoints. class AI_API_ENDPOINT_ENUM(StrEnum): AI_API_MODELS_GITHUB = 'models.github.ai' @@ -22,20 +16,42 @@ class AI_API_ENDPOINT_ENUM(StrEnum): COPILOT_INTEGRATION_ID = 'vscode-chat' +# you can also set https://api.githubcopilot.com if you prefer +# but beware that your taskflows need to reference the correct model id +# since different APIs use their own id schema, use -l with your desired +# endpoint to retrieve the correct id names to use for your taskflow +def get_AI_endpoint(): + return os.getenv('AI_API_ENDPOINT', default='https://models.github.ai/inference') + +def get_AI_token(): + """ + Get the token for the AI API from the environment. + The environment variable can be named either AI_API_TOKEN + or COPILOT_TOKEN. + """ + token = os.getenv('AI_API_TOKEN') + if token: + return token + token = os.getenv('COPILOT_TOKEN') + if token: + return token + raise RuntimeError("AI_API_TOKEN environment variable is not set.") + # assume we are >= python 3.9 for our type hints def list_capi_models(token: str) -> dict[str, dict]: """Retrieve a dictionary of available CAPI models""" models = {} try: - netloc = urlparse(AI_API_ENDPOINT).netloc + api_endpoint = get_AI_endpoint() + netloc = urlparse(endpoint).netloc match netloc: case AI_API_ENDPOINT_ENUM.AI_API_GITHUBCOPILOT: models_catalog = 'models' case AI_API_ENDPOINT_ENUM.AI_API_MODELS_GITHUB: models_catalog = 'catalog/models' case _: - raise ValueError(f"Unsupported Model Endpoint: {AI_API_ENDPOINT}") - r = httpx.get(httpx.URL(AI_API_ENDPOINT).join(models_catalog), + raise ValueError(f"Unsupported Model Endpoint: {api_endpoint}") + r = httpx.get(httpx.URL(api_endpoint).join(models_catalog), headers={ 'Accept': 'application/json', 'Authorization': f'Bearer {token}', @@ -49,7 +65,7 @@ def list_capi_models(token: str) -> dict[str, dict]: case AI_API_ENDPOINT_ENUM.AI_API_MODELS_GITHUB: models_list = r.json() case _: - raise ValueError(f"Unsupported Model Endpoint: {AI_API_ENDPOINT}") + raise ValueError(f"Unsupported Model Endpoint: {api_endpoint}") for model in models_list: models[model.get('id')] = dict(model) except httpx.RequestError as e: @@ -61,7 +77,8 @@ def list_capi_models(token: str) -> dict[str, dict]: return models def supports_tool_calls(model: str, models: dict) -> bool: - match urlparse(AI_API_ENDPOINT).netloc: + api_endpoint = get_AI_endpoint() + match urlparse(api_endpoint).netloc: case AI_API_ENDPOINT_ENUM.AI_API_GITHUBCOPILOT: return models.get(model, {}).\ get('capabilities', {}).\ @@ -71,7 +88,7 @@ def supports_tool_calls(model: str, models: dict) -> bool: return 'tool-calling' in models.get(model, {}).\ get('capabilities', []) case _: - raise ValueError(f"Unsupported Model Endpoint: {AI_API_ENDPOINT}") + raise ValueError(f"Unsupported Model Endpoint: {api_endpoint}") def list_tool_call_models(token: str) -> dict[str, dict]: models = list_capi_models(token) diff --git a/tests/test_api_endpoint_config.py b/tests/test_api_endpoint_config.py index fc0d887..a2ae588 100644 --- a/tests/test_api_endpoint_config.py +++ b/tests/test_api_endpoint_config.py @@ -8,51 +8,39 @@ import pytest import os from urllib.parse import urlparse +from seclab_taskflow_agent.capi import get_AI_endpoint, AI_API_ENDPOINT_ENUM class TestAPIEndpoint: """Test API endpoint configuration.""" - - @staticmethod - def _reload_capi_module(): - """Helper method to reload the capi module.""" - import importlib - import seclab_taskflow_agent.capi - importlib.reload(seclab_taskflow_agent.capi) - + def test_default_api_endpoint(self): """Test that default API endpoint is set to models.github.ai/inference.""" - import seclab_taskflow_agent.capi # When no env var is set, it should default to models.github.ai/inference - # Note: We can't easily test this without manipulating the environment - # so we'll just import and verify the constant exists - endpoint = seclab_taskflow_agent.capi.AI_API_ENDPOINT - assert endpoint is not None - assert isinstance(endpoint, str) - assert urlparse(endpoint).netloc == seclab_taskflow_agent.capi.AI_API_ENDPOINT_ENUM.AI_API_MODELS_GITHUB - + try: + # Save original env + original_env = os.environ + os.environ.pop('AI_API_ENDPOINT', None) + endpoint = get_AI_endpoint() + assert endpoint is not None + assert isinstance(endpoint, str) + assert urlparse(endpoint).netloc == AI_API_ENDPOINT_ENUM.AI_API_MODELS_GITHUB + finally: + # Restore original env + os.environ = original_env + def test_api_endpoint_env_override(self): """Test that AI_API_ENDPOINT can be overridden by environment variable.""" - # Save original env - original_env = os.environ.get('AI_API_ENDPOINT') - try: - # Set custom endpoint - test_endpoint = 'https://test.example.com' + # Save original env + original_env = os.environ + # Set different endpoint + test_endpoint = 'https://api.githubcopilot.com' os.environ['AI_API_ENDPOINT'] = test_endpoint - - # Reload the module to pick up the new env var - self._reload_capi_module() - - import seclab_taskflow_agent.capi - assert seclab_taskflow_agent.capi.AI_API_ENDPOINT == test_endpoint + + assert get_AI_endpoint() == test_endpoint finally: # Restore original env - if original_env is None: - os.environ.pop('AI_API_ENDPOINT', None) - else: - os.environ['AI_API_ENDPOINT'] = original_env - # Reload again to restore original state - self._reload_capi_module() + os.environ = original_env if __name__ == '__main__': pytest.main([__file__, '-v']) From 68a46a606f75d39311af0b0646f486f98bf0444b Mon Sep 17 00:00:00 2001 From: Kevin Backhouse Date: Tue, 2 Dec 2025 15:19:59 +0000 Subject: [PATCH 2/5] Save/restore os.environ correctly --- tests/test_api_endpoint_config.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/test_api_endpoint_config.py b/tests/test_api_endpoint_config.py index a2ae588..654b44e 100644 --- a/tests/test_api_endpoint_config.py +++ b/tests/test_api_endpoint_config.py @@ -18,21 +18,21 @@ def test_default_api_endpoint(self): # When no env var is set, it should default to models.github.ai/inference try: # Save original env - original_env = os.environ - os.environ.pop('AI_API_ENDPOINT', None) + original_env = os.environ.pop('AI_API_ENDPOINT', None) endpoint = get_AI_endpoint() assert endpoint is not None assert isinstance(endpoint, str) assert urlparse(endpoint).netloc == AI_API_ENDPOINT_ENUM.AI_API_MODELS_GITHUB finally: # Restore original env - os.environ = original_env + if original_env: + os.environ['AI_API_ENDPOINT'] = original_env def test_api_endpoint_env_override(self): """Test that AI_API_ENDPOINT can be overridden by environment variable.""" try: # Save original env - original_env = os.environ + original_env = os.environ.pop('AI_API_ENDPOINT', None) # Set different endpoint test_endpoint = 'https://api.githubcopilot.com' os.environ['AI_API_ENDPOINT'] = test_endpoint @@ -40,7 +40,8 @@ def test_api_endpoint_env_override(self): assert get_AI_endpoint() == test_endpoint finally: # Restore original env - os.environ = original_env + if original_env: + os.environ['AI_API_ENDPOINT'] = original_env if __name__ == '__main__': pytest.main([__file__, '-v']) From e55d0ca88a817eec99d3111f647ba5ab21eda6b6 Mon Sep 17 00:00:00 2001 From: Kevin Backhouse Date: Tue, 2 Dec 2025 15:21:48 +0000 Subject: [PATCH 3/5] Update src/seclab_taskflow_agent/capi.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- src/seclab_taskflow_agent/capi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/seclab_taskflow_agent/capi.py b/src/seclab_taskflow_agent/capi.py index c4a95ae..54744d4 100644 --- a/src/seclab_taskflow_agent/capi.py +++ b/src/seclab_taskflow_agent/capi.py @@ -43,7 +43,7 @@ def list_capi_models(token: str) -> dict[str, dict]: models = {} try: api_endpoint = get_AI_endpoint() - netloc = urlparse(endpoint).netloc + netloc = urlparse(api_endpoint).netloc match netloc: case AI_API_ENDPOINT_ENUM.AI_API_GITHUBCOPILOT: models_catalog = 'models' From 7f11f1e2edb549798697a7b688a7fbac36cddd61 Mon Sep 17 00:00:00 2001 From: Kevin Backhouse Date: Tue, 2 Dec 2025 15:25:43 +0000 Subject: [PATCH 4/5] Use get_AI_token() --- src/seclab_taskflow_agent/agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/seclab_taskflow_agent/agent.py b/src/seclab_taskflow_agent/agent.py index 07544f9..6c26b0b 100644 --- a/src/seclab_taskflow_agent/agent.py +++ b/src/seclab_taskflow_agent/agent.py @@ -15,7 +15,7 @@ from agents.run import RunHooks from agents import Agent, Runner, AgentHooks, RunHooks, result, function_tool, Tool, RunContextWrapper, TContext, OpenAIChatCompletionsModel, set_default_openai_client, set_default_openai_api, set_tracing_disabled -from .capi import COPILOT_INTEGRATION_ID, get_AI_endpoint, AI_API_ENDPOINT_ENUM +from .capi import COPILOT_INTEGRATION_ID, get_AI_endpoint, get_AI_token, AI_API_ENDPOINT_ENUM # grab our secrets from .env, this must be in .gitignore load_dotenv(find_dotenv(usecwd=True)) @@ -150,7 +150,7 @@ def __init__(self, run_hooks: TaskRunHooks | None = None, agent_hooks: TaskAgentHooks | None = None): client = AsyncOpenAI(base_url=api_endpoint, - api_key=os.getenv('COPILOT_TOKEN'), + api_key=get_AI_token(), default_headers={'Copilot-Integration-Id': COPILOT_INTEGRATION_ID}) set_default_openai_client(client) # CAPI does not yet support the Responses API: https://github.com/github/copilot-api/issues/11185 From 71ffdfe454405608a35cfe3945f1ff7311a3cf0d Mon Sep 17 00:00:00 2001 From: Kevin Backhouse Date: Tue, 2 Dec 2025 15:34:42 +0000 Subject: [PATCH 5/5] Update comments and documentation. --- .devcontainer/post-attach.sh | 4 ++-- .github/workflows/smoketest.yaml | 2 +- README.md | 4 ++-- docker/run.sh | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.devcontainer/post-attach.sh b/.devcontainer/post-attach.sh index 0fd307a..1fda8ec 100644 --- a/.devcontainer/post-attach.sh +++ b/.devcontainer/post-attach.sh @@ -4,8 +4,8 @@ set -e # If running in Codespaces, check for necessary secrets and print error if missing if [ -v CODESPACES ]; then echo "🔐 Running in Codespaces - injecting secrets from Codespaces settings..." - if [ ! -v COPILOT_TOKEN ]; then - echo "⚠️ Running in Codespaces - please add COPILOT_TOKEN to your Codespaces secrets" + if [ ! -v AI_API_TOKEN ]; then + echo "⚠️ Running in Codespaces - please add AI_API_TOKEN to your Codespaces secrets" fi if [ ! -v GITHUB_PERSONAL_ACCESS_TOKEN ]; then echo "⚠️ Running in Codespaces - please add GITHUB_PERSONAL_ACCESS_TOKEN to your Codespaces secrets" diff --git a/.github/workflows/smoketest.yaml b/.github/workflows/smoketest.yaml index 788952c..90d78b4 100644 --- a/.github/workflows/smoketest.yaml +++ b/.github/workflows/smoketest.yaml @@ -52,7 +52,7 @@ jobs: - name: Run tests env: - COPILOT_TOKEN: ${{ secrets.COPILOT_TOKEN }} + AI_API_ENDPOINT: ${{ secrets.AI_API_ENDPOINT }} GITHUB_AUTH_HEADER: "Bearer ${{ secrets.GITHUB_TOKEN }}" run: | diff --git a/README.md b/README.md index 39f9c76..725e15c 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Python >= 3.9 or Docker ## Configuration -Provide a GitHub token for an account that is entitled to use GitHub Copilot via the `COPILOT_TOKEN` environment variable. Further configuration is use case dependent, i.e. pending which MCP servers you'd like to use in your taskflows. +Provide a GitHub token for an account that is entitled to use [GitHub Models](https://models.github.ai) via the `AI_API_ENDPOINT` environment variable. Further configuration is use case dependent, i.e. pending which MCP servers you'd like to use in your taskflows. You can set persisting environment variables via an `.env` file in the project root. @@ -44,7 +44,7 @@ Example: ```sh # Tokens -COPILOT_TOKEN= +AI_API_ENDPOINT= # MCP configs GITHUB_PERSONAL_ACCESS_TOKEN= CODEQL_DBS_BASE_PATH="/app/my_data/codeql_databases" diff --git a/docker/run.sh b/docker/run.sh index 6903bfd..8905bb3 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -11,7 +11,7 @@ # # git clone https://github.com/GitHubSecurityLab/seclab-taskflow-agent.git # cd seclab-taskflow-agent/src -# export COPILOT_TOKEN= +# export AI_API_TOKEN= # export GITHUB_AUTH_HEADER= # sudo -E ../docker/run.sh -p seclab_taskflow_agent.personalities.assistant 'explain modems to me please' @@ -23,5 +23,5 @@ docker run -i \ --mount type=bind,src="$PWD",dst=/app \ -e DATA_DIR=/app/data \ -e GITHUB_PERSONAL_ACCESS_TOKEN="$GITHUB_PERSONAL_ACCESS_TOKEN" \ - -e COPILOT_TOKEN="$COPILOT_TOKEN" \ + -e AI_API_TOKEN="$AI_API_TOKEN" \ "ghcr.io/githubsecuritylab/seclab-taskflow-agent" "$@"