Skip to content

Commit 90af453

Browse files
committed
Merge branch 'develop' of https://github.com/unclecode/crawl4ai into develop
2 parents 69961cf + 8bb0e68 commit 90af453

File tree

5 files changed

+17
-29
lines changed

5 files changed

+17
-29
lines changed

deploy/docker/README.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -692,8 +692,7 @@ app:
692692
# Default LLM Configuration
693693
llm:
694694
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
695-
api_key_env: "OPENAI_API_KEY"
696-
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
695+
# api_key: sk-... # If you pass the API key directly (not recommended)
697696
698697
# Redis Configuration (Used by internal Redis server managed by supervisord)
699698
redis:

deploy/docker/api.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ async def handle_llm_qa(
9696
response = perform_completion_with_backoff(
9797
provider=config["llm"]["provider"],
9898
prompt_with_variables=prompt,
99-
api_token=get_llm_api_key(config)
99+
api_token=get_llm_api_key(config) # Returns None to let litellm handle it
100100
)
101101

102102
return response.choices[0].message.content
@@ -127,7 +127,7 @@ async def process_llm_extraction(
127127
"error": error_msg
128128
})
129129
return
130-
api_key = get_llm_api_key(config, provider)
130+
api_key = get_llm_api_key(config, provider) # Returns None to let litellm handle it
131131
llm_strategy = LLMExtractionStrategy(
132132
llm_config=LLMConfig(
133133
provider=provider or config["llm"]["provider"],
@@ -203,7 +203,7 @@ async def handle_markdown_request(
203203
FilterType.LLM: LLMContentFilter(
204204
llm_config=LLMConfig(
205205
provider=provider or config["llm"]["provider"],
206-
api_token=get_llm_api_key(config, provider),
206+
api_token=get_llm_api_key(config, provider), # Returns None to let litellm handle it
207207
),
208208
instruction=query or "Extract main content"
209209
)

deploy/docker/config.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@ app:
1111
# Default LLM Configuration
1212
llm:
1313
provider: "openai/gpt-4o-mini"
14-
api_key_env: "OPENAI_API_KEY"
15-
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
14+
# api_key: sk-... # If you pass the API key directly (not recommended)
1615

1716
# Redis Configuration
1817
redis:

deploy/docker/utils.py

Lines changed: 10 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -71,27 +71,22 @@ def decode_redis_hash(hash_data: Dict[bytes, bytes]) -> Dict[str, str]:
7171

7272

7373

74-
def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> str:
74+
def get_llm_api_key(config: Dict, provider: Optional[str] = None) -> Optional[str]:
7575
"""Get the appropriate API key based on the LLM provider.
7676
7777
Args:
7878
config: The application configuration dictionary
7979
provider: Optional provider override (e.g., "openai/gpt-4")
8080
8181
Returns:
82-
The API key for the provider, or empty string if not found
82+
The API key if directly configured, otherwise None to let litellm handle it
8383
"""
84-
85-
# Use provided provider or fall back to config
86-
if not provider:
87-
provider = config["llm"]["provider"]
88-
89-
# Check if direct API key is configured
84+
# Check if direct API key is configured (for backward compatibility)
9085
if "api_key" in config["llm"]:
9186
return config["llm"]["api_key"]
9287

93-
# Fall back to the configured api_key_env if no match
94-
return os.environ.get(config["llm"].get("api_key_env", ""), "")
88+
# Return None - litellm will automatically find the right environment variable
89+
return None
9590

9691

9792
def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple[bool, str]:
@@ -104,16 +99,12 @@ def validate_llm_provider(config: Dict, provider: Optional[str] = None) -> tuple
10499
Returns:
105100
Tuple of (is_valid, error_message)
106101
"""
107-
# Use provided provider or fall back to config
108-
if not provider:
109-
provider = config["llm"]["provider"]
110-
111-
# Get the API key for this provider
112-
api_key = get_llm_api_key(config, provider)
113-
114-
if not api_key:
115-
return False, f"No API key found for provider '{provider}'. Please set the appropriate environment variable."
102+
# If a direct API key is configured, validation passes
103+
if "api_key" in config["llm"]:
104+
return True, ""
116105

106+
# Otherwise, trust that litellm will find the appropriate environment variable
107+
# We can't easily validate this without reimplementing litellm's logic
117108
return True, ""
118109

119110

docs/md_v2/core/docker-deployment.md

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ The Docker setup now supports flexible LLM provider configuration through three
176176

177177
3. **Config File Default**: Falls back to `config.yml` (default: `openai/gpt-4o-mini`)
178178

179-
The system automatically selects the appropriate API key based on the configured `api_key_env` in the config file.
179+
The system automatically selects the appropriate API key based on the provider. LiteLLM handles finding the correct environment variable for each provider (e.g., OPENAI_API_KEY for OpenAI, GEMINI_API_TOKEN for Google Gemini, etc.).
180180

181181
#### 3. Build and Run with Compose
182182

@@ -693,8 +693,7 @@ app:
693693
# Default LLM Configuration
694694
llm:
695695
provider: "openai/gpt-4o-mini" # Can be overridden by LLM_PROVIDER env var
696-
api_key_env: "OPENAI_API_KEY"
697-
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
696+
# api_key: sk-... # If you pass the API key directly (not recommended)
698697
699698
# Redis Configuration (Used by internal Redis server managed by supervisord)
700699
redis:

0 commit comments

Comments
 (0)