1- package llm
1+ package ai
22
33import (
44 "context"
@@ -25,93 +25,17 @@ const (
2525
2626type Engine struct {
2727 mode EngineMode
28- config * options.Config
29- channel chan StreamCompletionOutput
30- pipe string
3128 running bool
29+ channel chan StreamCompletionOutput
3230
3331 convoStore convo.Store
32+ model * openai.Model
3433
35- Model * openai. Model
34+ Config * options. Config
3635}
3736
38- func NewLLMEngine (mode EngineMode , cfg * options.Config ) (* Engine , error ) {
39- var api options.API
40- mod , ok := cfg .Models [cfg .Model ]
41- if ! ok {
42- if cfg .API == "" {
43- return nil , errbook .Wrap (
44- fmt .Sprintf (
45- "Model %s is not in the settings file." ,
46- console .StderrStyles ().InlineCode .Render (cfg .Model ),
47- ),
48- errbook .NewUserErrorf (
49- "Please specify an API endpoint with %s or configure the model in the settings: %s" ,
50- console .StderrStyles ().InlineCode .Render ("--api" ),
51- console .StderrStyles ().InlineCode .Render ("ai -s" ),
52- ),
53- )
54- }
55- mod .Name = cfg .Model
56- mod .API = cfg .API
57- mod .MaxChars = cfg .MaxInputChars
58- }
59- if cfg .API != "" {
60- mod .API = cfg .API
61- }
62- for _ , a := range cfg .APIs {
63- if mod .API == a .Name {
64- api = a
65- break
66- }
67- }
68- if api .Name == "" {
69- eps := make ([]string , 0 )
70- for _ , a := range cfg .APIs {
71- eps = append (eps , console .StderrStyles ().InlineCode .Render (a .Name ))
72- }
73- return nil , errbook .Wrap (
74- fmt .Sprintf (
75- "The API endpoint %s is not configured." ,
76- console .StderrStyles ().InlineCode .Render (cfg .API ),
77- ),
78- errbook .NewUserErrorf (
79- "Your configured API endpoints are: %s" ,
80- eps ,
81- ),
82- )
83- }
84-
85- key , err := ensureApiKey (api )
86- if err != nil {
87- return nil , err
88- }
89-
90- var opts []openai.Option
91- opts = append (opts ,
92- openai .WithModel (mod .Name ),
93- openai .WithBaseURL (api .BaseURL ),
94- openai .WithToken (key ),
95- )
96- llm , err := openai .New (opts ... )
97- if err != nil {
98- return nil , err
99- }
100-
101- chatHistory , err := convo .GetConversationStore (cfg )
102- if err != nil {
103- return nil , errbook .Wrap ("Failed to get chat convo store." , err )
104- }
105-
106- return & Engine {
107- mode : mode ,
108- config : cfg ,
109- Model : llm ,
110- channel : make (chan StreamCompletionOutput ),
111- pipe : "" ,
112- running : false ,
113- convoStore : chatHistory ,
114- }, nil
37+ func NewLLMEngine (ops ... EngineOption ) (* Engine , error ) {
38+ return applyEngineOptions (ops ... )
11539}
11640
11741func (e * Engine ) SetMode (m EngineMode ) {
@@ -122,10 +46,6 @@ func (e *Engine) GetMode() EngineMode {
12246 return e .mode
12347}
12448
125- func (e * Engine ) SetPipe (pipe string ) {
126- e .pipe = pipe
127- }
128-
12949func (e * Engine ) GetChannel () chan StreamCompletionOutput {
13050 return e .channel
13151}
@@ -152,7 +72,7 @@ func (e *Engine) CreateCompletion(ctx context.Context, messages []llms.ChatMessa
15272 return nil , err
15373 }
15474
155- rsp , err := e .Model .GenerateContent (ctx , slices .Map (messages , convert ), e .callOptions ()... )
75+ rsp , err := e .model .GenerateContent (ctx , slices .Map (messages , convert ), e .callOptions ()... )
15676 if err != nil {
15777 return nil , errbook .Wrap ("Failed to create completion." , err )
15878 }
@@ -178,7 +98,7 @@ func (e *Engine) CreateStreamCompletion(ctx context.Context, messages []llms.Cha
17898 e .running = true
17999
180100 streamingFunc := func (ctx context.Context , chunk []byte ) error {
181- if ! e .config .Quiet {
101+ if ! e .Config .Quiet {
182102 e .channel <- StreamCompletionOutput {
183103 Content : string (chunk ),
184104 Last : false ,
@@ -192,14 +112,14 @@ func (e *Engine) CreateStreamCompletion(ctx context.Context, messages []llms.Cha
192112 }
193113
194114 for _ , v := range messages {
195- err := e .convoStore .AddMessage (ctx , e .config .CacheWriteToID , v )
115+ err := e .convoStore .AddMessage (ctx , e .Config .CacheWriteToID , v )
196116 if err != nil {
197117 errbook .HandleError (errbook .Wrap ("Failed to add user chat input message to convo" , err ))
198118 }
199119 }
200120
201121 messageParts := slices .Map (messages , convert )
202- rsp , err := e .Model .GenerateContent (ctx , messageParts , e .callOptions (streamingFunc )... )
122+ rsp , err := e .model .GenerateContent (ctx , messageParts , e .callOptions (streamingFunc )... )
203123 if err != nil {
204124 e .running = false
205125 return nil , errbook .Wrap ("Failed to create stream completion." , err )
@@ -214,7 +134,7 @@ func (e *Engine) CreateStreamCompletion(ctx context.Context, messages []llms.Cha
214134 }
215135 }
216136
217- if ! e .config .Quiet {
137+ if ! e .Config .Quiet {
218138 e .channel <- StreamCompletionOutput {
219139 Content : "" ,
220140 Last : true ,
@@ -234,17 +154,17 @@ func (e *Engine) CreateStreamCompletion(ctx context.Context, messages []llms.Cha
234154
235155func (e * Engine ) callOptions (streamingFunc ... func (ctx context.Context , chunk []byte ) error ) []llms.CallOption {
236156 var opts []llms.CallOption
237- if e .config .MaxTokens > 0 {
238- opts = append (opts , llms .WithMaxTokens (e .config .MaxTokens ))
157+ if e .Config .MaxTokens > 0 {
158+ opts = append (opts , llms .WithMaxTokens (e .Config .MaxTokens ))
239159 }
240160 if len (streamingFunc ) > 0 && streamingFunc [0 ] != nil {
241161 opts = append (opts , llms .WithStreamingFunc (streamingFunc [0 ]))
242162 }
243- opts = append (opts , llms .WithModel (e .config .Model ))
244- opts = append (opts , llms .WithMaxLength (e .config .MaxInputChars ))
245- opts = append (opts , llms .WithTemperature (e .config .Temperature ))
246- opts = append (opts , llms .WithTopP (e .config .TopP ))
247- opts = append (opts , llms .WithTopK (e .config .TopK ))
163+ opts = append (opts , llms .WithModel (e .Config .Model ))
164+ opts = append (opts , llms .WithMaxLength (e .Config .MaxInputChars ))
165+ opts = append (opts , llms .WithTemperature (e .Config .Temperature ))
166+ opts = append (opts , llms .WithTopP (e .Config .TopP ))
167+ opts = append (opts , llms .WithTopK (e .Config .TopK ))
248168 opts = append (opts , llms .WithMultiContent (false ))
249169
250170 return opts
@@ -256,8 +176,8 @@ func (e *Engine) setupChatContext(ctx context.Context, messages *[]llms.ChatMess
256176 return errbook .New ("no chat convo store found" )
257177 }
258178
259- if ! e .config .NoCache && e .config .CacheReadFromID != "" {
260- history , err := store .Messages (ctx , e .config .CacheReadFromID )
179+ if ! e .Config .NoCache && e .Config .CacheReadFromID != "" {
180+ history , err := store .Messages (ctx , e .Config .CacheReadFromID )
261181 if err != nil {
262182 return errbook .Wrap (fmt .Sprintf (
263183 "There was a problem reading the cache. Use %s / %s to disable it." ,
@@ -272,8 +192,8 @@ func (e *Engine) setupChatContext(ctx context.Context, messages *[]llms.ChatMess
272192}
273193
274194func (e * Engine ) appendAssistantMessage (content string ) {
275- if e .convoStore != nil && e .config .CacheWriteToID != "" {
276- if err := e .convoStore .AddAIMessage (context .Background (), e .config .CacheWriteToID , content ); err != nil {
195+ if e .convoStore != nil && e .Config .CacheWriteToID != "" {
196+ if err := e .convoStore .AddAIMessage (context .Background (), e .Config .CacheWriteToID , content ); err != nil {
277197 errbook .HandleError (errbook .Wrap ("failed to add assistant chat output message to convo" , err ))
278198 }
279199 }
@@ -302,8 +222,8 @@ func ensureApiKey(api options.API) (string, error) {
302222 fmt .Sprintf (
303223 "%[1]s required; set the environment variable %[1]s or update %[2]s through %[3]s." ,
304224 console .StderrStyles ().InlineCode .Render (api .APIKeyEnv ),
305- console .StderrStyles ().InlineCode .Render ("config .yaml" ),
306- console .StderrStyles ().InlineCode .Render ("ai config " ),
225+ console .StderrStyles ().InlineCode .Render ("Config .yaml" ),
226+ console .StderrStyles ().InlineCode .Render ("ai Config " ),
307227 ),
308228 errbook .NewUserErrorf (
309229 "You can grab one at %s." ,
0 commit comments