Skip to content

Commit df96a19

Browse files
committed
Fix LLM instantiation across all workflows for LangGraph Studio compatibility
- Updated workflow/langgraph_workflow.py to use os.environ.get('GEMINI_API_KEY') directly - Fixed workflow/langgraph_workflow_manager.py with proper LLM setup and Studio export - Standardized utils/llm/gemini_client_factory.py with convert_system_message_to_human=True, max_retries=5, timeout=120 - Added development_workflow_manager to langgraph.json - Reorganized documentation files to proper locations per file organization rules - All 24 graphs now load successfully with consistent Gemini API pattern - Development workflows now executable in LangGraph Studio
1 parent b551e72 commit df96a19

File tree

151 files changed

+4545
-7363
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

151 files changed

+4545
-7363
lines changed

agents/development/architecture_designer.py

Lines changed: 102 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,25 @@
55
"""
66

77
import json
8-
from typing import Dict, Any, Optional
8+
from typing import Dict, Any, Optional, List
99
from models.state import AgentState
1010
from models.responses import ArchitectureDesignResponse
1111
from models.simplified_responses import SimplifiedComponent, SimplifiedArchitectureResponse, create_simplified_architecture_response
12-
from ..core.base_agent import BaseAgent
12+
from agents.core.base_agent import BaseAgent
1313
from prompts import get_agent_prompt_loader
1414
import google.generativeai as genai
1515

16+
17+
# LangGraph integration check
18+
try:
19+
from langgraph.graph import StateGraph, END
20+
from langgraph.checkpoint.memory import MemorySaver
21+
from pydantic import BaseModel, Field
22+
LANGGRAPH_AVAILABLE = True
23+
except ImportError:
24+
LANGGRAPH_AVAILABLE = False
25+
logging.warning("LangGraph not available - agent will work in legacy mode only")
26+
1627
try:
1728
from langchain_core.output_parsers import JsonOutputParser
1829
from langchain_core.prompts import PromptTemplate
@@ -22,6 +33,26 @@
2233
LANGCHAIN_AVAILABLE = False
2334

2435

36+
37+
38+
class ArchitectureDesignerState(BaseModel):
39+
"""State for ArchitectureDesigner LangGraph workflow using Pydantic BaseModel."""
40+
41+
# Input fields
42+
input_data: Dict[str, Any] = Field(default_factory=dict, description="Input data")
43+
44+
# Output fields
45+
output_data: Dict[str, Any] = Field(default_factory=dict, description="Output data")
46+
47+
# Control fields
48+
errors: List[str] = Field(default_factory=list, description="Error messages")
49+
status: str = Field(default="initialized", description="Current status")
50+
metrics: Dict[str, float] = Field(default_factory=dict, description="Execution metrics")
51+
52+
class Config:
53+
"""Pydantic configuration."""
54+
arbitrary_types_allowed = True
55+
2556
class ArchitectureDesigner(BaseAgent):
2657
"""
2758
Agent responsible for designing system architecture.
@@ -38,6 +69,17 @@ def __init__(self, config, gemini_client):
3869
else:
3970
self.json_parser = None
4071

72+
73+
# Build LangGraph workflow if available
74+
if LANGGRAPH_AVAILABLE:
75+
self.workflow = self._build_langgraph_workflow()
76+
self.app = self.workflow.compile()
77+
self.logger.info("✅ LangGraph workflow compiled and ready")
78+
else:
79+
self.workflow = None
80+
self.app = None
81+
self.logger.info("⚠️ LangGraph not available - using legacy mode")
82+
4183
def validate_task(self, task: Any) -> bool:
4284
"""
4385
Validate that the task is appropriate for architecture design.
@@ -542,3 +584,61 @@ def validate_input(self, state: AgentState) -> bool:
542584
# Don't fail, just warn - we can still design basic architecture
543585

544586
return True
587+
588+
589+
def _build_langgraph_workflow(self) -> StateGraph:
590+
"""Build LangGraph workflow for ArchitectureDesigner."""
591+
workflow = StateGraph(ArchitectureDesignerState)
592+
593+
# Simple workflow: just execute the agent
594+
workflow.add_node("execute", self._langgraph_execute_node)
595+
workflow.set_entry_point("execute")
596+
workflow.add_edge("execute", END)
597+
598+
return workflow
599+
600+
async def _langgraph_execute_node(self, state: ArchitectureDesignerState) -> ArchitectureDesignerState:
601+
"""Execute agent in LangGraph workflow."""
602+
import time
603+
start = time.time()
604+
605+
try:
606+
# Call the agent's execute method
607+
result = await self.execute(state.input_data)
608+
609+
# Update state with results
610+
state.output_data = result
611+
state.status = "completed"
612+
state.metrics["execution_time"] = time.time() - start
613+
614+
except Exception as e:
615+
self.logger.error(f"LangGraph execution failed: {e}")
616+
state.errors.append(str(e))
617+
state.status = "failed"
618+
state.metrics["execution_time"] = time.time() - start
619+
620+
return state
621+
622+
623+
# Export for LangGraph Studio
624+
_default_instance = None
625+
626+
def get_graph():
627+
"""Get the compiled graph for LangGraph Studio."""
628+
global _default_instance
629+
if _default_instance is None and LANGGRAPH_AVAILABLE:
630+
from models.config import AgentConfig
631+
from utils.llm.gemini_client_factory import get_gemini_client
632+
633+
config = AgentConfig(
634+
agent_id='architecture_designer',
635+
name='ArchitectureDesigner',
636+
description='ArchitectureDesigner agent',
637+
model_name='gemini-2.5-flash'
638+
)
639+
client = get_gemini_client(agent_name='architecture_designer')
640+
_default_instance = ArchitectureDesigner(config, gemini_client=client)
641+
return _default_instance.app if _default_instance else None
642+
643+
# Studio expects 'graph' variable
644+
graph = get_graph()

0 commit comments

Comments
 (0)