Skip to content

Commit 25dca5c

Browse files
committed
feat: Implement Agile Factory, Deep Agents, and Prompt Management enhancements
1 parent 3f1d387 commit 25dca5c

File tree

95 files changed

+13754
-724
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

95 files changed

+13754
-724
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ test_results/
170170

171171
# Generated project directories
172172
generated_projects/
173+
agile_factory_workspace/
173174
generated/
174175
output/
175176
temp/

agents/agile_factory/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
"""
2+
Agile Software Factory Agent System
3+
4+
A comprehensive multi-agent system for generating interactive websites and Streamlit apps.
5+
"""
6+
7+
__version__ = "0.1.0"
8+
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
"""Human-in-the-loop checkpoint implementations."""
2+
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
"""
2+
Human-in-the-loop checkpoint implementations.
3+
4+
Provides structured HITL checkpoints between workflow stages.
5+
"""
6+
7+
import logging
8+
from typing import Dict, Any
9+
from agents.agile_factory.state.agile_state import AgileFactoryState
10+
11+
logger = logging.getLogger(__name__)
12+
13+
14+
def create_checkpoint_summary(state: AgileFactoryState, checkpoint_name: str) -> Dict[str, Any]:
15+
"""
16+
Create structured summary for human review at checkpoint.
17+
18+
Args:
19+
state: Current workflow state
20+
checkpoint_name: Name of checkpoint
21+
22+
Returns:
23+
Structured summary dictionary
24+
"""
25+
summary = {
26+
"checkpoint": checkpoint_name,
27+
"timestamp": None, # Will be set by caller if needed
28+
"status": state.get("status", "processing")
29+
}
30+
31+
if checkpoint_name == "story_review":
32+
summary.update({
33+
"what": "User Story Input",
34+
"content": state.get("user_story", ""),
35+
"project_type": state.get("project_type", "website")
36+
})
37+
38+
elif checkpoint_name == "requirements_review":
39+
requirements = state.get("requirements", {})
40+
summary.update({
41+
"what": "Requirements Analysis",
42+
"summary": requirements.get("summary", ""),
43+
"functional_count": len(requirements.get("functional_requirements", [])),
44+
"non_functional_count": len(requirements.get("non_functional_requirements", []))
45+
})
46+
47+
elif checkpoint_name == "architecture_review":
48+
architecture = state.get("architecture", {})
49+
summary.update({
50+
"what": "Architecture Design",
51+
"system_overview": architecture.get("system_overview", "")[:500],
52+
"architecture_pattern": architecture.get("architecture_pattern", ""),
53+
"components_count": len(architecture.get("components", [])),
54+
"tech_stack": architecture.get("technology_stack", {})
55+
})
56+
57+
elif checkpoint_name == "code_generation_review":
58+
code_files = state.get("code_files", {})
59+
summary.update({
60+
"what": "Code Generation",
61+
"files_generated": len(code_files),
62+
"file_list": list(code_files.keys())[:10], # First 10 files
63+
"total_size": sum(len(content) for content in code_files.values())
64+
})
65+
66+
elif checkpoint_name == "final_review":
67+
summary.update({
68+
"what": "Final Project Review",
69+
"requirements_complete": bool(state.get("requirements")),
70+
"architecture_complete": bool(state.get("architecture")),
71+
"code_complete": bool(state.get("code_files")),
72+
"tests_complete": bool(state.get("test_results")),
73+
"docs_complete": bool(state.get("documentation_files"))
74+
})
75+
76+
return summary
77+
78+
79+
def print_summary(summary: Dict[str, Any]) -> None:
80+
"""
81+
Print checkpoint summary to console.
82+
83+
Args:
84+
summary: Summary dictionary
85+
"""
86+
print(f"\n{summary.get('what', 'Checkpoint')}:")
87+
print("-" * 60)
88+
89+
for key, value in summary.items():
90+
if key not in ["checkpoint", "what", "timestamp"]:
91+
if isinstance(value, dict):
92+
print(f"{key}:")
93+
for k, v in value.items():
94+
print(f" {k}: {v}")
95+
elif isinstance(value, list):
96+
print(f"{key}: {len(value)} items")
97+
if value and len(value) <= 5:
98+
for item in value:
99+
print(f" - {item}")
100+
else:
101+
print(f"{key}: {value}")
102+
103+
104+
def hitl_checkpoint_node(state: AgileFactoryState, checkpoint_name: str) -> dict:
105+
"""
106+
Generic HITL checkpoint node.
107+
108+
This node presents a checkpoint summary to the human reviewer and
109+
collects their decision (approve/reject/edit).
110+
111+
Args:
112+
state: Current workflow state
113+
checkpoint_name: Name of checkpoint
114+
115+
Returns:
116+
Updates dict with HITL feedback (LangGraph will merge with state)
117+
"""
118+
# Create structured summary
119+
summary = create_checkpoint_summary(state, checkpoint_name)
120+
121+
# Present to human (console for MVP, Streamlit UI later)
122+
print("\n" + "="*60)
123+
print(f"HITL CHECKPOINT: {checkpoint_name.upper().replace('_', ' ')}")
124+
print("="*60)
125+
print_summary(summary)
126+
print("\nOptions:")
127+
print(" [a]pprove - Continue to next step")
128+
print(" [r]eject - Restart from beginning")
129+
print(" [e]dit - Provide feedback for revision")
130+
print(" [s]kip - Skip this checkpoint (not recommended)")
131+
132+
# Get human input
133+
# For LangGraph Studio, this will be handled via interrupt
134+
# For now, we'll set a default and let Studio handle the interrupt
135+
feedback = "approve" # Default for automated testing
136+
137+
# In LangGraph Studio, this will be an interrupt point
138+
# The human will provide feedback through Studio UI
139+
140+
# Build updates dict (correct LangGraph pattern - return only updates)
141+
# Merge with existing hitl_approvals and hitl_feedback from state
142+
existing_approvals = state.get("hitl_approvals", {}).copy()
143+
existing_feedback = state.get("hitl_feedback", {}).copy()
144+
145+
existing_approvals[checkpoint_name] = feedback == "approve"
146+
existing_feedback[checkpoint_name] = feedback
147+
148+
updates = {
149+
"current_checkpoint": checkpoint_name,
150+
"hitl_approvals": existing_approvals,
151+
"hitl_feedback": existing_feedback
152+
}
153+
154+
if feedback == "approve":
155+
updates["status"] = "approved"
156+
elif feedback == "reject":
157+
updates["status"] = "rejected"
158+
elif feedback == "edit":
159+
updates["status"] = "needs_revision"
160+
161+
logger.info("HITL checkpoint %s: %s", checkpoint_name, feedback)
162+
163+
return updates
164+
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
"""Agent nodes for Agile Factory workflow."""
2+
Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
"""
2+
Architecture Designer Node for Agile Factory workflow.
3+
4+
Uses existing architecture_designer agent with prompt loader.
5+
"""
6+
7+
import logging
8+
import os
9+
import re
10+
from typing import Dict, Any
11+
from agents.agile_factory.state.agile_state import AgileFactoryState
12+
from agents.agile_factory.utils.safe_prompt_formatting import safe_format_prompt, safe_format_prompt_with_validation
13+
from agents.agile_factory.utils.llm_config import get_llm_model
14+
from prompts import get_agent_prompt_loader
15+
from langchain_google_genai import ChatGoogleGenerativeAI
16+
from langchain_core.messages import SystemMessage, HumanMessage
17+
18+
logger = logging.getLogger(__name__)
19+
20+
21+
def architecture_node(state: AgileFactoryState) -> dict:
22+
"""
23+
Architecture Designer Node using existing agent with prompt loader.
24+
25+
This node:
26+
1. Loads prompt using existing get_agent_prompt_loader system
27+
2. Creates LLM with Gemini 2.5 Flash
28+
3. Executes architecture design
29+
4. Returns updates dict with architecture (LangGraph will merge with state)
30+
31+
Args:
32+
state: Current workflow state
33+
34+
Returns:
35+
Updates dict with architecture design (LangGraph will merge with state)
36+
"""
37+
try:
38+
# Get API key
39+
api_key = os.environ.get('GEMINI_API_KEY') or os.environ.get('GOOGLE_API_KEY')
40+
if not api_key:
41+
raise ValueError("GEMINI_API_KEY or GOOGLE_API_KEY environment variable must be set")
42+
43+
# Prepare context for architecture design
44+
user_story = state.get("user_story", "")
45+
requirements = state.get("requirements", {})
46+
project_type = state.get("project_type", "website")
47+
48+
# Load prompt using existing system and inject dynamic context
49+
prompt_loader = get_agent_prompt_loader("architecture_designer_v1")
50+
system_prompt_template = prompt_loader.get_system_prompt()
51+
52+
# Ensure prompt is a string (not PromptTemplate object)
53+
if not isinstance(system_prompt_template, str):
54+
logger.warning(f"Prompt is not a string (type: {type(system_prompt_template)}), converting...")
55+
if hasattr(system_prompt_template, 'template'):
56+
system_prompt_template = system_prompt_template.template
57+
else:
58+
system_prompt_template = str(system_prompt_template)
59+
60+
# Validate prompt was loaded
61+
if not system_prompt_template or len(system_prompt_template.strip()) == 0:
62+
error_msg = "Failed to load system prompt template - prompt is empty"
63+
logger.error(error_msg)
64+
return {
65+
"errors": state.get("errors", []) + [error_msg],
66+
"status": "error",
67+
"current_node": "architecture_designer"
68+
}
69+
70+
logger.info(f"Loaded prompt template: {len(system_prompt_template)} characters (type: {type(system_prompt_template).__name__})")
71+
72+
# Format system prompt with dynamic context using safe formatting
73+
try:
74+
# Convert requirements dict to string for formatting
75+
requirements_str = str(requirements) if isinstance(requirements, dict) else requirements
76+
system_prompt, missing_placeholders = safe_format_prompt_with_validation(
77+
system_prompt_template,
78+
user_story=user_story,
79+
requirements=requirements_str,
80+
project_type=project_type
81+
)
82+
83+
if missing_placeholders:
84+
logger.warning(f"Some placeholders not replaced: {missing_placeholders}")
85+
86+
logger.info(f"Formatted prompt: {len(system_prompt)} characters")
87+
except Exception as e:
88+
error_msg = f"Prompt formatting failed: {e}"
89+
logger.error(error_msg, exc_info=True)
90+
return {
91+
"errors": state.get("errors", []) + [error_msg],
92+
"status": "error",
93+
"current_node": "architecture_designer"
94+
}
95+
96+
# Create LLM with configurable model
97+
model_name = get_llm_model(state)
98+
logger.info(f"Using LLM model: {model_name}")
99+
llm = ChatGoogleGenerativeAI(
100+
model=model_name,
101+
google_api_key=api_key,
102+
temperature=0,
103+
convert_system_message_to_human=True
104+
)
105+
106+
# Task message is now embedded in the system prompt, so we use a simple instruction
107+
task = "Design the architecture for the project based on the requirements provided."
108+
109+
# Execute architecture design
110+
messages = [
111+
SystemMessage(content=system_prompt),
112+
HumanMessage(content=task)
113+
]
114+
115+
logger.info("Calling LLM for architecture design...")
116+
result = llm.invoke(messages)
117+
logger.info(f"LLM response received: {len(result.content) if hasattr(result, 'content') else 'N/A'} characters")
118+
output = result.content.strip()
119+
120+
# Parse JSON response (architecture_designer outputs JSON)
121+
import json
122+
import re
123+
124+
# Extract JSON from markdown code block if present
125+
json_match = re.search(r'```json\s*(\{.*\})\s*```', output, re.DOTALL)
126+
if json_match:
127+
json_str = json_match.group(1)
128+
else:
129+
# Try to find JSON object boundaries
130+
first_brace = output.find('{')
131+
if first_brace != -1:
132+
brace_count = 0
133+
json_start = first_brace
134+
for i, char in enumerate(output[first_brace:], start=first_brace):
135+
if char == '{':
136+
brace_count += 1
137+
elif char == '}':
138+
brace_count -= 1
139+
if brace_count == 0:
140+
json_str = output[json_start:i+1]
141+
break
142+
else:
143+
json_str = output[first_brace:]
144+
else:
145+
json_str = output
146+
147+
# Clean trailing commas
148+
json_str = re.sub(r',(\s*[}\]])', r'\1', json_str)
149+
150+
architecture = json.loads(json_str)
151+
152+
logger.info(f"Architecture design complete: {len(architecture.get('components', []))} components")
153+
154+
# Return updates dict (correct LangGraph pattern)
155+
return {
156+
"architecture": architecture,
157+
"current_node": "architecture_designer"
158+
}
159+
160+
except Exception as e:
161+
logger.error(f"Architecture design failed: {e}")
162+
return {
163+
"errors": state.get("errors", []) + [f"Architecture design error: {str(e)}"],
164+
"status": "error",
165+
"current_node": "architecture_designer"
166+
}
167+

0 commit comments

Comments
 (0)