Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/humanloop/agents/raw_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -578,7 +578,7 @@ def _iter():
if _sse.data == None:
return
try:
yield _sse.data()
yield _sse.data
except Exception:
pass
return
Expand Down Expand Up @@ -869,7 +869,7 @@ def _iter():
if _sse.data == None:
return
try:
yield _sse.data()
yield _sse.data
except Exception:
pass
return
Expand Down Expand Up @@ -2410,7 +2410,7 @@ async def _iter():
if _sse.data == None:
return
try:
yield _sse.data()
yield _sse.data
except Exception:
pass
return
Expand Down Expand Up @@ -2701,7 +2701,7 @@ async def _iter():
if _sse.data == None:
return
try:
yield _sse.data()
yield _sse.data
except Exception:
pass
return
Expand Down
4 changes: 2 additions & 2 deletions src/humanloop/prompts/raw_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,7 +673,7 @@ def _iter():
if _sse.data == None:
return
try:
yield _sse.data()
yield _sse.data
except Exception:
pass
return
Expand Down Expand Up @@ -2489,7 +2489,7 @@ async def _iter():
if _sse.data == None:
return
try:
yield _sse.data()
yield _sse.data
except Exception:
pass
return
Expand Down
16 changes: 16 additions & 0 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,22 @@ def eval_prompt(
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")


@pytest.fixture(scope="function")
def prompt(
humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
) -> Generator[TestIdentifiers, None, None]:
prompt_path = f"{sdk_test_dir}/prompt"
try:
response = humanloop_test_client.prompts.upsert(
path=prompt_path,
**test_prompt_config,
)
yield TestIdentifiers(file_id=response.id, file_path=response.path)
humanloop_test_client.prompts.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")


@pytest.fixture(scope="function")
def output_not_null_evaluator(
humanloop_test_client: Humanloop, sdk_test_dir: str
Expand Down
45 changes: 45 additions & 0 deletions tests/integration/test_prompts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from humanloop.client import Humanloop
from tests.integration.conftest import TestIdentifiers


def test_prompts_call(
humanloop_test_client: Humanloop,
prompt: TestIdentifiers,
test_prompt_config: TestIdentifiers,
) -> None:
response = humanloop_test_client.prompts.call(
path=prompt.file_path,
prompt={**test_prompt_config},
inputs={"question": "What is the capital of the France?"},
)
assert response is not None
assert response.log_id is not None
assert response.logs is not None
for log in response.logs:
assert log is not None
assert log.output or log.error or log.output_message is not None
assert "Paris" in log.output
assert response.prompt.path == prompt.file_path


def test_prompts_call_stream(
humanloop_test_client: Humanloop,
prompt: TestIdentifiers,
test_prompt_config: TestIdentifiers,
) -> None:
response = humanloop_test_client.prompts.call_stream(
path=prompt.file_path,
prompt={**test_prompt_config},
inputs={"question": "What is the capital of the France?"},
)

output = ""
for chunk in response:
assert chunk is not None
assert chunk.output or chunk.error or chunk.output_message is not None
assert chunk.id is not None
assert chunk.prompt_id is not None
assert chunk.version_id is not None
output += chunk.output

assert "Paris" in output
Loading