Skip to content

Commit 772ca36

Browse files
improve agentic forms (#124)
1 parent 8e025b6 commit 772ca36

File tree

2 files changed

+98
-25
lines changed

2 files changed

+98
-25
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "solana-agent"
3-
version = "31.1.2"
3+
version = "31.1.3"
44
description = "AI Agents for Solana"
55
authors = ["Bevan Hunt <bevan@bevanhunt.com>"]
66
license = "MIT"

solana_agent/services/query.py

Lines changed: 97 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async def process(
8080
router: Optional[RoutingServiceInterface] = None,
8181
output_model: Optional[Type[BaseModel]] = None,
8282
capture_schema: Optional[Dict[str, Any]] = None,
83-
capture_name: Optional[str] = None,
83+
capture_name: Optional[Dict[str, Any]] = None,
8484
) -> AsyncGenerator[Union[str, bytes, BaseModel], None]: # pragma: no cover
8585
"""Process the user request and generate a response."""
8686
try:
@@ -241,31 +241,102 @@ def _extract_numbered_options(text: str) -> Dict[str, str]:
241241
options[idx] = label
242242
return options
243243

244-
def _detect_field_from_prev_question(
244+
# LLM-backed field detection (gpt-4.1-mini) with graceful fallbacks
245+
class _FieldDetect(BaseModel):
246+
field: Optional[str] = None
247+
248+
async def _detect_field_from_prev_question(
245249
prev_text: str, schema: Optional[Dict[str, Any]]
246250
) -> Optional[str]:
247251
if not prev_text or not isinstance(schema, dict):
248252
return None
249-
t = prev_text.lower()
250-
patterns = [
251-
("ideas", ["which ideas attract you", "ideas"]),
252-
("description", ["please describe yourself", "describe yourself"]),
253-
("myself", ["tell us about yourself", "about yourself"]),
254-
("questions", ["do you have any questions"]),
255-
("rating", ["rating", "1 to 5", "how satisfied", "how happy"]),
256-
("email", ["email"]),
257-
("phone", ["phone"]),
258-
("name", ["name"]),
259-
("city", ["city"]),
260-
("state", ["state"]),
261-
]
262-
candidates = set((schema.get("properties") or {}).keys())
263-
for field, keys in patterns:
264-
if field in candidates and any(key in t for key in keys):
265-
return field
266-
for field in candidates:
267-
if field in t:
268-
return field
253+
props = list((schema.get("properties") or {}).keys())
254+
if not props:
255+
return None
256+
257+
question = prev_text.strip()
258+
instruction = (
259+
"You are a strict classifier. Given the assistant's last question and a list of "
260+
"permitted schema field keys, choose exactly one key that the question is asking the user to answer. "
261+
"If none apply, return null."
262+
)
263+
user_prompt = (
264+
f"Schema field keys (choose exactly one of these): {props}\n"
265+
f"Assistant question:\n{question}\n\n"
266+
'Return strictly JSON like: {"field": "<one_of_the_keys_or_null>"}'
267+
)
268+
269+
# Try llm_provider.parse_structured_output with mini
270+
try:
271+
if hasattr(
272+
self.agent_service.llm_provider, "parse_structured_output"
273+
):
274+
try:
275+
result = await self.agent_service.llm_provider.parse_structured_output(
276+
prompt=user_prompt,
277+
system_prompt=instruction,
278+
model_class=_FieldDetect,
279+
model="gpt-4.1-nano",
280+
)
281+
except TypeError:
282+
# Provider may not accept 'model' kwarg
283+
result = await self.agent_service.llm_provider.parse_structured_output(
284+
prompt=user_prompt,
285+
system_prompt=instruction,
286+
model_class=_FieldDetect,
287+
)
288+
# Read result
289+
sel = None
290+
try:
291+
sel = getattr(result, "field", None)
292+
except Exception:
293+
sel = None
294+
if sel is None:
295+
try:
296+
d = result.model_dump()
297+
sel = d.get("field")
298+
except Exception:
299+
sel = None
300+
if sel in props:
301+
return sel
302+
except Exception as e:
303+
logger.debug(
304+
f"LLM parse_structured_output field detection failed: {e}"
305+
)
306+
307+
# Fallback: use generate_response with output_model=_FieldDetect
308+
try:
309+
async for r in self.agent_service.generate_response(
310+
agent_name=agent_name,
311+
user_id=user_id,
312+
query=user_text,
313+
images=images,
314+
memory_context="",
315+
output_format="text",
316+
prompt=f"{instruction}\n\n{user_prompt}",
317+
output_model=_FieldDetect,
318+
):
319+
fd = r
320+
sel = None
321+
try:
322+
sel = fd.field # type: ignore[attr-defined]
323+
except Exception:
324+
try:
325+
d = fd.model_dump()
326+
sel = d.get("field")
327+
except Exception:
328+
sel = None
329+
if sel in props:
330+
return sel
331+
break
332+
except Exception as e:
333+
logger.debug(f"LLM generate_response field detection failed: {e}")
334+
335+
# Final heuristic fallback (keeps system working if LLM unavailable)
336+
t = question.lower()
337+
for key in props:
338+
if key in t:
339+
return key
269340
return None
270341

271342
# Resolve active capture from args or agent config
@@ -334,7 +405,9 @@ def _missing(fields: List[str]) -> List[str]:
334405
missing_required = _missing(required_fields)
335406
missing_optional = _missing(optional_fields)
336407

337-
target_field: Optional[str] = _detect_field_from_prev_question(
408+
target_field: Optional[
409+
str
410+
] = await _detect_field_from_prev_question(
338411
prev_assistant, active_capture_schema
339412
)
340413
if not target_field:
@@ -758,5 +831,5 @@ def py_type(js: Dict[str, Any]):
758831
else:
759832
fields[field_name] = (typ, default)
760833

761-
Model = create_model(name, **fields)
834+
Model = create_model(name, **fields) # type: ignore
762835
return Model

0 commit comments

Comments
 (0)