diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py index b13491a6..fbf5ed98 100644 --- a/src/humanloop/agents/raw_client.py +++ b/src/humanloop/agents/raw_client.py @@ -578,7 +578,7 @@ def _iter(): if _sse.data == None: return try: - yield _sse.data() + yield _sse.data except Exception: pass return @@ -869,7 +869,7 @@ def _iter(): if _sse.data == None: return try: - yield _sse.data() + yield _sse.data except Exception: pass return @@ -2410,7 +2410,7 @@ async def _iter(): if _sse.data == None: return try: - yield _sse.data() + yield _sse.data except Exception: pass return @@ -2701,7 +2701,7 @@ async def _iter(): if _sse.data == None: return try: - yield _sse.data() + yield _sse.data except Exception: pass return diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py index 2b907d91..a52c043e 100644 --- a/src/humanloop/prompts/raw_client.py +++ b/src/humanloop/prompts/raw_client.py @@ -673,7 +673,7 @@ def _iter(): if _sse.data == None: return try: - yield _sse.data() + yield _sse.data except Exception: pass return @@ -2489,7 +2489,7 @@ async def _iter(): if _sse.data == None: return try: - yield _sse.data() + yield _sse.data except Exception: pass return diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 72611b5d..955c035c 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -119,6 +119,22 @@ def eval_prompt( pytest.fail(f"Failed to create prompt {prompt_path}: {e}") +@pytest.fixture(scope="function") +def prompt( + humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any] +) -> Generator[TestIdentifiers, None, None]: + prompt_path = f"{sdk_test_dir}/prompt" + try: + response = humanloop_test_client.prompts.upsert( + path=prompt_path, + **test_prompt_config, + ) + yield TestIdentifiers(file_id=response.id, file_path=response.path) + humanloop_test_client.prompts.delete(id=response.id) + except Exception as e: + pytest.fail(f"Failed to create prompt {prompt_path}: {e}") + + @pytest.fixture(scope="function") def output_not_null_evaluator( humanloop_test_client: Humanloop, sdk_test_dir: str diff --git a/tests/integration/test_prompts.py b/tests/integration/test_prompts.py new file mode 100644 index 00000000..8926a06c --- /dev/null +++ b/tests/integration/test_prompts.py @@ -0,0 +1,45 @@ +from humanloop.client import Humanloop +from tests.integration.conftest import TestIdentifiers + + +def test_prompts_call( + humanloop_test_client: Humanloop, + prompt: TestIdentifiers, + test_prompt_config: TestIdentifiers, +) -> None: + response = humanloop_test_client.prompts.call( + path=prompt.file_path, + prompt={**test_prompt_config}, + inputs={"question": "What is the capital of the France?"}, + ) + assert response is not None + assert response.log_id is not None + assert response.logs is not None + for log in response.logs: + assert log is not None + assert log.output or log.error or log.output_message is not None + assert "Paris" in log.output + assert response.prompt.path == prompt.file_path + + +def test_prompts_call_stream( + humanloop_test_client: Humanloop, + prompt: TestIdentifiers, + test_prompt_config: TestIdentifiers, +) -> None: + response = humanloop_test_client.prompts.call_stream( + path=prompt.file_path, + prompt={**test_prompt_config}, + inputs={"question": "What is the capital of the France?"}, + ) + + output = "" + for chunk in response: + assert chunk is not None + assert chunk.output or chunk.error or chunk.output_message is not None + assert chunk.id is not None + assert chunk.prompt_id is not None + assert chunk.version_id is not None + output += chunk.output + + assert "Paris" in output