Skip to content

Commit 55eb707

Browse files
authored
Allow to use credentials from environment variables (#2)
1 parent a1b7bb4 commit 55eb707

File tree

5 files changed

+39
-13
lines changed

5 files changed

+39
-13
lines changed

Justfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ default: install lint test
22

33
install:
44
uv lock --upgrade
5-
uv sync --frozen
5+
uv sync --frozen --all-groups
66

77
lint:
88
uv run --group lint ruff check

README.md

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,16 @@ config = any_llm_client.MockLLMConfig(
102102
client = any_llm_client.get_client(config, ...)
103103
```
104104

105-
#### Using dynamic LLM config from environment with [pydantic-settings](https://docs.pydantic.dev/latest/concepts/pydantic_settings/)
105+
#### Configuration with environment variables
106+
107+
##### Credentials
108+
109+
Instead of passing credentials directly, you can set corresponding environment variables:
110+
111+
- OpenAI: `ANY_LLM_CLIENT_OPENAI_AUTH_TOKEN`,
112+
- YandexGPT: `ANY_LLM_CLIENT_YANDEXGPT_AUTH_HEADER`, `ANY_LLM_CLIENT_YANDEXGPT_FOLDER_ID`.
113+
114+
##### LLM model config (with [pydantic-settings](https://docs.pydantic.dev/latest/concepts/pydantic_settings/))
106115

107116
```python
108117
import os
@@ -125,6 +134,8 @@ settings = Settings()
125134
client = any_llm_client.get_client(settings.llm_model, ...)
126135
```
127136

137+
Combining with environment variables from previous section, you can keep LLM model configuration and secrets separate.
138+
128139
#### Using clients directly
129140

130141
The recommended way to get LLM client is to call `any_llm_client.get_client()`. This way you can easily swap LLM models. If you prefer, you can use `any_llm_client.OpenAIClient` or `any_llm_client.YandexGPTClient` directly:

any_llm_client/clients/openai.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import contextlib
22
import dataclasses
3+
import os
34
import types
45
import typing
56
from http import HTTPStatus
@@ -15,12 +16,15 @@
1516
from any_llm_client.retry import RequestRetryConfig
1617

1718

19+
OPENAI_AUTH_TOKEN_ENV_NAME: typing.Final = "ANY_LLM_CLIENT_OPENAI_AUTH_TOKEN"
20+
21+
1822
class OpenAIConfig(LLMConfig):
1923
if typing.TYPE_CHECKING:
2024
url: str # pragma: no cover
2125
else:
2226
url: pydantic.HttpUrl
23-
auth_token: str | None = None
27+
auth_token: str | None = pydantic.Field(default_factory=lambda: os.environ.get(OPENAI_AUTH_TOKEN_ENV_NAME))
2428
model_name: str
2529
force_user_assistant_message_alternation: bool = False
2630
"Gemma 2 doesn't support {role: system, text: ...} message, and requires alternated messages"

any_llm_client/clients/yandexgpt.py

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import contextlib
22
import dataclasses
3+
import os
34
import types
45
import typing
56
from http import HTTPStatus
@@ -14,13 +15,21 @@
1415
from any_llm_client.retry import RequestRetryConfig
1516

1617

18+
YANDEXGPT_AUTH_HEADER_ENV_NAME: typing.Final = "ANY_LLM_CLIENT_YANDEXGPT_AUTH_HEADER"
19+
YANDEXGPT_FOLDER_ID_ENV_NAME: typing.Final = "ANY_LLM_CLIENT_YANDEXGPT_FOLDER_ID"
20+
21+
1722
class YandexGPTConfig(LLMConfig):
1823
if typing.TYPE_CHECKING:
1924
url: str = "https://llm.api.cloud.yandex.net/foundationModels/v1/completion" # pragma: no cover
2025
else:
2126
url: pydantic.HttpUrl = "https://llm.api.cloud.yandex.net/foundationModels/v1/completion"
22-
auth_header: str | None = None
23-
folder_id: str | None = None
27+
auth_header: str = pydantic.Field( # type: ignore[assignment]
28+
default_factory=lambda: os.environ.get(YANDEXGPT_AUTH_HEADER_ENV_NAME), validate_default=True
29+
)
30+
folder_id: str = pydantic.Field( # type: ignore[assignment]
31+
default_factory=lambda: os.environ.get(YANDEXGPT_FOLDER_ID_ENV_NAME), validate_default=True
32+
)
2433
model_name: str
2534
model_version: str = "latest"
2635
max_tokens: int = 7400
@@ -79,10 +88,12 @@ def __init__(
7988
self.httpx_client = get_http_client_from_kwargs(httpx_kwargs)
8089

8190
def _build_request(self, payload: dict[str, typing.Any]) -> httpx.Request:
82-
headers: typing.Final = {"x-data-logging-enabled": "false"}
83-
if self.config.auth_header:
84-
headers["Authorization"] = self.config.auth_header
85-
return self.httpx_client.build_request(method="POST", url=str(self.config.url), json=payload, headers=headers)
91+
return self.httpx_client.build_request(
92+
method="POST",
93+
url=str(self.config.url),
94+
json=payload,
95+
headers={"Authorization": self.config.auth_header, "x-data-logging-enabled": "false"},
96+
)
8697

8798
def _prepare_payload(
8899
self, *, messages: str | list[Message], temperature: float = 0.2, stream: bool

tests/test_http.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,17 @@ def test_http_timeout_is_added(self) -> None:
1111
original_kwargs: typing.Final = {"mounts": {"http://": None}}
1212
passed_kwargs: typing.Final = copy.deepcopy(original_kwargs)
1313

14-
result: typing.Final = get_http_client_from_kwargs(passed_kwargs)
14+
client: typing.Final = get_http_client_from_kwargs(passed_kwargs)
1515

16-
assert result.timeout == DEFAULT_HTTP_TIMEOUT
16+
assert client.timeout == DEFAULT_HTTP_TIMEOUT
1717
assert original_kwargs == passed_kwargs
1818

1919
def test_http_timeout_is_not_modified_if_set(self) -> None:
2020
timeout: typing.Final = httpx.Timeout(7, connect=5, read=3)
2121
original_kwargs: typing.Final = {"mounts": {"http://": None}, "timeout": timeout}
2222
passed_kwargs: typing.Final = copy.deepcopy(original_kwargs)
2323

24-
result: typing.Final = get_http_client_from_kwargs(passed_kwargs)
24+
client: typing.Final = get_http_client_from_kwargs(passed_kwargs)
2525

26-
assert result.timeout == timeout
26+
assert client.timeout == timeout
2727
assert original_kwargs == passed_kwargs

0 commit comments

Comments
 (0)