Skip to content

Commit 767c1cd

Browse files
authored
Add GithubAgent step (#1366)
* Add github agent step; Allow AgenticLLMV2 to select model; Log all tool execution * add func to evict noop log handler
1 parent da240d1 commit 767c1cd

File tree

15 files changed

+292
-254
lines changed

15 files changed

+292
-254
lines changed

patchwork/common/client/llm/utils.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
from __future__ import annotations
22

33
import json
4+
import random
5+
import string
46

57
from openai.lib._parsing._completions import type_to_response_format_param
68
from openai.types.chat.completion_create_params import ResponseFormat
@@ -114,5 +116,5 @@ def example_dict_to_base_model(example_data: dict) -> Type[BaseModel]:
114116

115117
field = Field(**field_kwargs)
116118
base_model_field_defs[example_data_key] = (value_typing, field)
117-
118-
return create_model("ResponseFormat", **base_model_field_defs)
119+
random_suffix = "".join(random.choice(string.ascii_lowercase) for _ in range(4))
120+
return create_model(f"ResponseFormat_{random_suffix}", **base_model_field_defs)

patchwork/common/multiturn_strategy/agentic_strategy_v2.py

Lines changed: 29 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,23 @@
11
from __future__ import annotations
22

33
import asyncio
4+
import json
45
import logging
56
import sys
67

78
from pydantic import BaseModel
89
from pydantic_ai import Agent
9-
from pydantic_ai.models.anthropic import AnthropicModel
10-
from pydantic_ai.result import RunResult
10+
from pydantic_ai.agent import AgentRunResult
1111
from typing_extensions import Any, Dict, Optional, Union
1212

13+
from patchwork.common.client.llm.protocol import LlmClient
1314
from patchwork.common.client.llm.utils import example_json_to_base_model
1415
from patchwork.common.tools import Tool
1516
from patchwork.common.utils.utils import mustache_render
1617

1718
_COMPLETION_FLAG_ATTRIBUTE = "is_task_completed"
1819
_MESSAGE_ATTRIBUTE = "message"
20+
DEFAULT_AGENT_EXAMPLE_JSON = f'{{"{_MESSAGE_ATTRIBUTE}":"message", "{_COMPLETION_FLAG_ATTRIBUTE}": false}}'
1921

2022

2123
class AgentConfig(BaseModel):
@@ -25,15 +27,23 @@ class Config:
2527
name: str
2628
tool_set: Dict[str, Tool]
2729
system_prompt: str = ""
28-
example_json: Union[
29-
str, Dict[str, Any]
30-
] = f'{{"{_MESSAGE_ATTRIBUTE}":"message", "{_COMPLETION_FLAG_ATTRIBUTE}": false}}'
30+
example_json: Union[str, Dict[str, Any]] = DEFAULT_AGENT_EXAMPLE_JSON
31+
32+
def model_post_init(self, __context: Any) -> None:
33+
if self.example_json == DEFAULT_AGENT_EXAMPLE_JSON:
34+
return
35+
36+
wanted = json.loads(self.example_json)
37+
default_wanted = json.loads(DEFAULT_AGENT_EXAMPLE_JSON)
38+
default_wanted.update(wanted)
39+
self.example_json = json.dumps(default_wanted)
3140

3241

3342
class AgenticStrategyV2:
3443
def __init__(
3544
self,
36-
api_key: str,
45+
model: str,
46+
llm_client: LlmClient,
3747
template_data: dict[str, str],
3848
system_prompt_template: str,
3949
user_prompt_template: str,
@@ -44,25 +54,30 @@ def __init__(
4454
self.__limit = limit
4555
self.__template_data = template_data
4656
self.__user_prompt_template = user_prompt_template
47-
model = AnthropicModel("claude-3-5-sonnet-latest", api_key=api_key)
4857
self.__summariser = Agent(
49-
model,
58+
llm_client,
5059
system_prompt=mustache_render(system_prompt_template, self.__template_data),
5160
result_type=example_json_to_base_model(example_json),
52-
model_settings=dict(parallel_tool_calls=False),
61+
model_settings=dict(
62+
parallel_tool_calls=False,
63+
model=model,
64+
),
5365
)
5466
self.__agents = []
5567
for agent_config in agent_configs:
5668
tools = []
5769
for tool in agent_config.tool_set.values():
5870
tools.append(tool.to_pydantic_ai_function_tool())
5971
agent = Agent(
60-
model,
72+
llm_client,
6173
name=agent_config.name,
6274
system_prompt=mustache_render(agent_config.system_prompt, self.__template_data),
6375
tools=tools,
6476
result_type=example_json_to_base_model(agent_config.example_json),
65-
model_settings=dict(parallel_tool_calls=False),
77+
model_settings=dict(
78+
parallel_tool_calls=False,
79+
model=model,
80+
),
6681
)
6782

6883
self.__agents.append(agent)
@@ -89,7 +104,7 @@ def execute(self, limit: Optional[int] = None) -> dict:
89104
message_history = None
90105
agent_output = None
91106
for i in range(limit or self.__limit or sys.maxsize):
92-
agent_output: RunResult[Any] = loop.run_until_complete(
107+
agent_output: AgentRunResult[Any] = loop.run_until_complete(
93108
agent.run(user_message, message_history=message_history)
94109
)
95110
message_history = agent_output.all_messages()
@@ -107,10 +122,11 @@ def execute(self, limit: Optional[int] = None) -> dict:
107122
return dict()
108123

109124
if len(agents_result) == 1:
125+
history = next(v for _, v in agents_result.items()).all_messages()
110126
final_result = loop.run_until_complete(
111127
self.__summariser.run(
112128
"From the actions taken by the assistant. Please give me the result.",
113-
message_history=next(v for _, v in agents_result.items()).all_messages(),
129+
message_history=history,
114130
)
115131
)
116132
else:
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
from __future__ import annotations
2+
3+
import os
4+
import subprocess
5+
6+
from patchwork.common.tools.tool import Tool
7+
8+
9+
class GitHubTool(Tool, tool_name="github_tool"):
10+
def __init__(self, path: str, gh_token: str):
11+
super().__init__()
12+
self.path = path
13+
self.gh_token = gh_token
14+
15+
@property
16+
def json_schema(self) -> dict:
17+
return {
18+
"name": "github_tool",
19+
"description": """\
20+
Access to the GitHub CLI, the command is also `gh` all args provided are used as is
21+
""",
22+
"input_schema": {
23+
"type": "object",
24+
"properties": {
25+
"args": {
26+
"type": "array",
27+
"items": {"type": "string"},
28+
"description": "The args to run `gh` command with.",
29+
}
30+
},
31+
"required": ["args"],
32+
},
33+
}
34+
35+
def execute(self, args: list[str]) -> str:
36+
env = os.environ.copy()
37+
env["GH_TOKEN"] = self.gh_token
38+
p = subprocess.run(
39+
["gh", *args],
40+
env=env,
41+
cwd=self.path,
42+
text=True,
43+
stdout=subprocess.PIPE,
44+
stderr=subprocess.STDOUT,
45+
)
46+
return p.stdout

patchwork/common/tools/grep_tool.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def execute(self, pattern: Optional[str] = None, depth: int = 1, is_case_sensiti
103103

104104

105105
class FindTextTool(Tool, tool_name="find_text"):
106-
__CHAR_LIMIT = 200
106+
__CHAR_LIMIT = 400
107107
__CHAR_LIMIT_TEXT = "<Too many characters>"
108108

109109
def __init__(self, path: Path | str, **kwargs):

patchwork/common/tools/tool.py

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,29 @@
1+
import functools
12
from abc import ABC, abstractmethod
23

34
from pydantic_ai.tools import RunContext
45
from pydantic_ai.tools import Tool as PydanticTool
56
from pydantic_ai.tools import ToolDefinition
67
from typing_extensions import Type
78

9+
from patchwork.logger import logger
10+
811

912
class Tool(ABC):
1013
__internal_map: dict[str, Type["Tool"]] = dict()
1114

12-
def __init_subclass__(cls, tool_name=None, abc_register=True, **kwargs):
15+
def __init_subclass__(cls, tool_name=None, abc_register=True, tool_logging=True, **kwargs):
16+
cls_name = tool_name or cls.__name__
17+
cls.name = cls_name
18+
19+
if tool_logging:
20+
setattr(cls, "execute", Tool.__execute_logging_wrapper(cls.__dict__["execute"]))
21+
1322
if not abc_register:
1423
return
1524

16-
cls_name = tool_name or cls.__name__
1725
if cls_name in cls.__internal_map.keys():
1826
raise ValueError(f"Duplicate subclass name for class {cls.__name__}: {cls_name}")
19-
cls.name = cls_name
2027
Tool.__internal_map[cls_name] = cls
2128

2229
@property
@@ -55,3 +62,18 @@ async def _prep(ctx: RunContext[None], tool_def: ToolDefinition) -> ToolDefiniti
5562
return PydanticTool(
5663
self.execute, prepare=_prep, name=self.name, description=self.json_schema.get("description", "")
5764
)
65+
66+
@staticmethod
67+
def __execute_logging_wrapper(func):
68+
@functools.wraps(func)
69+
def execute_logging_wrapper(self, *args, **kwargs):
70+
arg_text = ""
71+
if len(args) > 0:
72+
arg_text += f"args: {args}"
73+
if len(kwargs) > 0:
74+
arg_text += f"kwargs: {kwargs}"
75+
76+
logger.info(f"Executing Tool: {self.name} with {arg_text}")
77+
return func(self, *args, **kwargs)
78+
79+
return execute_logging_wrapper

patchwork/logger.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,13 @@
3131
logger.addHandler(__noop)
3232

3333

34+
def evict_null_handler():
35+
global logger, __noop
36+
37+
warnings.simplefilter("ignore")
38+
logger.removeHandler(__noop)
39+
40+
3441
class TerminalHandler(RichHandler):
3542
def __init__(self, log_level: str):
3643
super().__init__(
@@ -137,10 +144,9 @@ def inner(record: logging.LogRecord) -> bool:
137144

138145

139146
def init_cli_logger(log_level: str) -> logging.Logger:
140-
global logger, __noop
147+
global logger
141148

142-
warnings.simplefilter("ignore")
143-
logger.removeHandler(__noop)
149+
evict_null_handler()
144150

145151
if not os.path.exists(HOME_FOLDER): # Check if HOME_FOLDER exists at this point
146152
os.makedirs(HOME_FOLDER)

patchwork/steps/AgenticLLMV2/AgenticLLMV2.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from pathlib import Path
22

3+
from patchwork.common.client.llm.aio import AioLlmClient
34
from patchwork.common.multiturn_strategy.agentic_strategy_v2 import (
45
AgentConfig,
56
AgenticStrategyV2,
@@ -17,7 +18,8 @@ def __init__(self, inputs):
1718
base_path = str(Path.cwd())
1819
self.conversation_limit = int(inputs.get("max_agent_calls", 1))
1920
self.agentic_strategy = AgenticStrategyV2(
20-
api_key=inputs.get("anthropic_api_key"),
21+
model="claude-3-7-sonnet-latest",
22+
llm_client=AioLlmClient.create_aio_client(inputs),
2123
template_data=inputs.get("prompt_value", {}),
2224
system_prompt_template=inputs.get("system_prompt", "Summarise from our previous conversation"),
2325
user_prompt_template=inputs.get("user_prompt"),

patchwork/steps/BrowserUse/BrowserUse.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,7 @@ def init_browser():
4040
os.makedirs(downloads_path)
4141

4242
context_config = BrowserContextConfig(save_downloads_path=downloads_path)
43-
config = BrowserConfig(
44-
headless=True, disable_security=True, new_context_config=context_config
45-
)
43+
config = BrowserConfig(headless=True, disable_security=True, new_context_config=context_config)
4644
controller = Controller()
4745

4846
# Register custom action to upload files to web elements
@@ -124,6 +122,7 @@ class BrowserUse(Step, input_class=BrowserUseInputs, output_class=BrowserUseOutp
124122
This class provides a high-level interface for executing browser-based tasks
125123
using various LLM providers (Google, OpenAI, Anthropic) to control the browser.
126124
"""
125+
127126
required_keys = {"task"}
128127

129128
def __init__(self, inputs):
@@ -142,9 +141,7 @@ def __init__(self, inputs):
142141
if "google_api_key" in self.inputs:
143142
from langchain_google_genai import ChatGoogleGenerativeAI
144143

145-
self.llm = ChatGoogleGenerativeAI(
146-
model="gemini-2.0-flash", google_api_key=self.inputs["google_api_key"]
147-
)
144+
self.llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", google_api_key=self.inputs["google_api_key"])
148145
elif "openai_api_key" in self.inputs:
149146
from langchain_openai import ChatOpenAI
150147

patchwork/steps/BrowserUse/typed.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@ class BrowserUseInputs(TypedDict, total=False):
1010
str,
1111
StepTypeConfig(is_config=True, or_op=["google_api_key", "anthropic_api_key"]),
1212
]
13-
anthropic_api_key: Annotated[
14-
str, StepTypeConfig(is_config=True, or_op=["google_api_key", "openai_api_key"])
15-
]
13+
anthropic_api_key: Annotated[str, StepTypeConfig(is_config=True, or_op=["google_api_key", "openai_api_key"])]
1614
google_api_key: Annotated[
1715
str,
1816
StepTypeConfig(is_config=True, or_op=["openai_api_key", "anthropic_api_key"]),
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
from pathlib import Path
2+
3+
from patchwork.common.client.llm.aio import AioLlmClient
4+
from patchwork.common.multiturn_strategy.agentic_strategy_v2 import (
5+
AgentConfig,
6+
AgenticStrategyV2,
7+
)
8+
from patchwork.common.tools.github_tool import GitHubTool
9+
from patchwork.step import Step
10+
from patchwork.steps.GitHubAgent.typed import GitHubAgentInputs, GitHubAgentOutputs
11+
12+
13+
class GitHubAgent(Step, input_class=GitHubAgentInputs, output_class=GitHubAgentOutputs):
14+
def __init__(self, inputs):
15+
super().__init__(inputs)
16+
base_path = inputs.get("base_path", str(Path.cwd()))
17+
task = inputs["task"]
18+
self.agentic_strategy = AgenticStrategyV2(
19+
model="claude-3-7-sonnet-latest",
20+
llm_client=AioLlmClient.create_aio_client(inputs),
21+
template_data=dict(),
22+
system_prompt_template=f"""\
23+
Please summarise the conversation given and provide the result in the structure that is asked of you.
24+
""",
25+
user_prompt_template=f"""\
26+
Please help me with the following task using the GitHub CLI. You should not do anything extra.
27+
Please take note of any requirements to the data required to fetch.
28+
29+
{task}
30+
""",
31+
agent_configs=[
32+
AgentConfig(
33+
name="Assistant",
34+
tool_set=dict(github_tool=GitHubTool(base_path, inputs["github_api_token"])),
35+
system_prompt="""\
36+
You are a senior software developer helping the program manager to obtain some data from GitHub.
37+
You can access github through the `gh` CLI app.
38+
Your `gh` app has already been authenticated.
39+
""",
40+
)
41+
],
42+
example_json=inputs.get("example_json"),
43+
)
44+
45+
def run(self) -> dict:
46+
result = self.agentic_strategy.execute(limit=10)
47+
return {**result, **self.agentic_strategy.usage()}

0 commit comments

Comments
 (0)