Skip to content

Commit d0e6f4a

Browse files
committed
feat(config): set gpt-4.1-mini as default model and add 2025 OpenAI models/pricing
1 parent afb356f commit d0e6f4a

File tree

5 files changed

+57
-28
lines changed

5 files changed

+57
-28
lines changed

README.md

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -176,18 +176,25 @@ Configuration files are searched in this order:
176176

177177
### 🤖 Model Configuration
178178

179-
CommitLoom supports various OpenAI models with different cost implications:
179+
CommitLoom supports any OpenAI model for commit message generation. You can specify any model name (e.g., `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano`, etc.) using the `MODEL_NAME` or `COMMITLOOM_MODEL` environment variable, or with the `-m`/`--model` CLI option.
180180

181-
| Model | Description | Cost per 1M tokens (Input/Output) | Best for |
182-
|-------|-------------|----------------------------------|----------|
183-
| gpt-4o-mini | Default, optimized for commits | $0.15/$0.60 | Most use cases |
184-
| gpt-4o | Latest model, powerful | $2.50/$10.00 | Complex analysis |
185-
| gpt-4o-2024-05-13 | Previous version | $5.00/$15.00 | Legacy support |
186-
| gpt-3.5-turbo | Fine-tuned version | $3.00/$6.00 | Training data |
181+
| Model | Description | Input (per 1M tokens) | Output (per 1M tokens) | Best for |
182+
|-----------------|------------------------------------|-----------------------|------------------------|-------------------------|
183+
| gpt-4.1 | Highest quality, 1M ctx, multimodal| $2.00 | $8.00 | Final docs, critical |
184+
| gpt-4.1-mini | Default, best cost/quality | $0.40 | $1.60 | Most use cases |
185+
| gpt-4.1-nano | Fastest, cheapest | $0.10 | $0.40 | Drafts, previews |
186+
| gpt-4o-mini | Legacy, cost-efficient | $0.15 | $0.60 | Legacy/compatibility |
187+
| gpt-4o | Legacy, powerful | $2.50 | $10.00 | Legacy/compatibility |
188+
| gpt-3.5-turbo | Legacy, fine-tuned | $3.00 | $6.00 | Training data |
189+
| gpt-4o-2024-05-13| Legacy, previous version | $5.00 | $15.00 | Legacy support |
187190

188-
You can change the model by setting the `MODEL_NAME` environment variable. The default `gpt-4o-mini` model is recommended as it provides the best balance of cost and quality for commit message generation. It's OpenAI's most cost-efficient small model that's smarter and cheaper than GPT-3.5 Turbo.
191+
> **Default model:** `gpt-4.1-mini` (best balance for documentation and code)
189192
190-
> Note: Prices are based on OpenAI's official pricing (https://openai.com/api/pricing/). Batch API usage can provide a 50% discount but responses will be returned within 24 hours.
193+
> **Warning:** If you use a model that is not in the above list, CommitLoom will still work, but cost estimation and token pricing will not be available for that model. You will see a warning in the CLI, and cost will be reported as zero. To add cost support for a new model, update the `model_costs` dictionary in `commitloom/config/settings.py`.
194+
195+
You can change the model by setting the `MODEL_NAME` environment variable. The default `gpt-4.1-mini` model is recommended as it provides the best balance of cost and quality for commit message generation. It's OpenAI's most cost-efficient small model that's smarter and cheaper than GPT-3.5 Turbo.
196+
197+
> Note: Prices are based on OpenAI's official pricing (https://openai.com/pricing/). Batch API usage can provide a 50% discount but responses will be returned within 24 hours.
191198
192199
## ❓ FAQ
193200

commitloom/__main__.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,28 +51,26 @@ def cli(ctx, debug: bool, version: bool = False) -> None:
5151
@click.option(
5252
"-m",
5353
"--model",
54-
type=click.Choice(list(config.model_costs.keys())),
55-
help=f"Specify the AI model to use (default: {config.default_model})"
54+
type=str, # Permitir cualquier string
55+
help=f"Specify any OpenAI model to use (default: {config.default_model})"
5656
)
5757
@click.pass_context
5858
def commit(ctx, yes: bool, combine: bool, model: str | None) -> None:
5959
"""Generate commit message and commit changes."""
6060
debug = ctx.obj.get("DEBUG", False)
6161

6262
try:
63-
# Use test_mode=True when running tests (detected by pytest)
6463
test_mode = "pytest" in sys.modules
65-
# Only pass API key if not in test mode and it exists
6664
api_key = None if test_mode else os.getenv("OPENAI_API_KEY")
67-
68-
# Initialize with test_mode
6965
loom = CommitLoom(test_mode=test_mode, api_key=api_key if api_key else None)
70-
71-
# Set custom model if specified
66+
# Validación personalizada para modelos OpenAI
7267
if model:
68+
if not model.startswith("gpt-"):
69+
console.print_warning(f"Model '{model}' does not appear to be a valid OpenAI model (should start with 'gpt-').")
70+
if model not in config.model_costs:
71+
console.print_warning(f"Model '{model}' is not in the known cost list. Cost estimation will be unavailable or inaccurate.")
7372
os.environ["COMMITLOOM_MODEL"] = model
7473
console.print_info(f"Using model: {model}")
75-
7674
loom.run(auto_commit=yes, combine_commits=combine, debug=debug)
7775
except (KeyboardInterrupt, Exception) as e:
7876
handle_error(e)
@@ -108,14 +106,15 @@ def help() -> None:
108106
loom commit Generate commit message for staged changes
109107
loom commit -y Skip confirmation prompts
110108
loom commit -c Combine all changes into a single commit
111-
loom commit -m MODEL Specify AI model to use
109+
loom commit -m MODEL Specify any OpenAI model to use
112110
loom stats Show usage statistics
113111
loom --version Display version information
114112
loom help Show this help message
115113
116114
[bold]Available Models:[/bold]
117115
{', '.join(config.model_costs.keys())}
118116
Default: {config.default_model}
117+
(You can use any OpenAI model name, but cost estimation is only available for the above models.)
119118
120119
[bold]Environment Setup:[/bold]
121120
1. Set OPENAI_API_KEY in your environment or in a .env file

commitloom/config/settings.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def from_env(cls) -> "Config":
8888
))
8989
default_model = os.getenv(
9090
"COMMITLOOM_MODEL",
91-
os.getenv("MODEL_NAME", "gpt-4o-mini")
91+
os.getenv("MODEL_NAME", "gpt-4.1-mini")
9292
)
9393

9494
return cls(
@@ -116,6 +116,20 @@ def from_env(cls) -> "Config":
116116
"*.min.css",
117117
],
118118
model_costs={
119+
# Nuevos modelos recomendados 2025
120+
"gpt-4.1": ModelCosts(
121+
input=0.00200, # $2.00 por 1M tokens
122+
output=0.00800, # $8.00 por 1M tokens
123+
),
124+
"gpt-4.1-mini": ModelCosts(
125+
input=0.00040, # $0.40 por 1M tokens
126+
output=0.00160, # $1.60 por 1M tokens
127+
),
128+
"gpt-4.1-nano": ModelCosts(
129+
input=0.00010, # $0.10 por 1M tokens
130+
output=0.00040, # $0.40 por 1M tokens
131+
),
132+
# Modelos legacy
119133
"gpt-4o-mini": ModelCosts(
120134
input=0.00015,
121135
output=0.00060,

commitloom/core/analyzer.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,12 @@ def estimate_tokens_and_cost(text: str, model: str = config.default_model) -> tu
5858
Tuple of (estimated_tokens, estimated_cost)
5959
"""
6060
estimated_tokens = len(text) // config.token_estimation_ratio
61-
cost_per_token = config.model_costs[model].input / 1_000_000
61+
if model in config.model_costs:
62+
cost_per_token = config.model_costs[model].input / 1_000_000
63+
else:
64+
print(f"[WARNING] Cost estimation is not available for model '{model}'.")
65+
cost_per_token = 0.0
6266
estimated_cost = estimated_tokens * cost_per_token
63-
6467
return estimated_tokens, estimated_cost
6568

6669
@staticmethod

commitloom/services/ai_service.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import json
44
from dataclasses import dataclass
5+
import os
56

67
import requests
78

@@ -29,10 +30,14 @@ def from_api_usage(
2930
completion_tokens = usage["completion_tokens"]
3031
total_tokens = usage["total_tokens"]
3132

32-
# Calculate costs - convert from per million tokens to actual cost
33-
# These costs are in EUR per 1M tokens, so we divide by 1M to get cost per token
34-
input_cost = (prompt_tokens / 1_000) * config.model_costs[model].input
35-
output_cost = (completion_tokens / 1_000) * config.model_costs[model].output
33+
# Si el modelo no está en la lista, coste 0 y advertencia
34+
if model in config.model_costs:
35+
input_cost = (prompt_tokens / 1_000) * config.model_costs[model].input
36+
output_cost = (completion_tokens / 1_000) * config.model_costs[model].output
37+
else:
38+
input_cost = 0.0
39+
output_cost = 0.0
40+
print(f"[WARNING] Cost estimation is not available for model '{model}'.")
3641
total_cost = input_cost + output_cost
3742

3843
return cls(
@@ -81,7 +86,8 @@ def __init__(self, api_key: str | None = None, test_mode: bool = False):
8186
raise ValueError("API key is required")
8287
self.api_key = api_key or config.api_key
8388
self.test_mode = test_mode
84-
self.model_name = config.default_model
89+
# Permitir override por variable de entorno
90+
self.model_name = os.getenv("COMMITLOOM_MODEL", config.default_model)
8591

8692
@property
8793
def model(self) -> str:
@@ -200,7 +206,7 @@ def generate_commit_message(
200206
}
201207

202208
data = {
203-
"model": config.default_model,
209+
"model": self.model_name,
204210
"messages": [{"role": "user", "content": prompt}],
205211
"response_format": {"type": "json_object"},
206212
"max_tokens": 1000,

0 commit comments

Comments
 (0)