Skip to content

Commit b03fdde

Browse files
committed
refactor(logging): replace sendToOutput with structured logger
Introduce a new Logger class using VS Code's LogOutputChannel for structured logging with trace, debug, info, warn, and error levels. Update all service files to use the logger instance instead of the deprecated sendToOutput function, improving debugging and respecting log level settings. Maintain backward compatibility with deprecated exports.
1 parent 9b2ef1d commit b03fdde

File tree

7 files changed

+168
-61
lines changed

7 files changed

+168
-61
lines changed

src/Diffy.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import OpenAiService from "./service/OpenAiService";
1010
import VsCodeLlmService from "./service/VsCodeLlmService";
1111
import WindowService from "./service/WindowService";
1212
import WorkspaceService from "./service/WorkspaceService";
13-
import { sendToOutput } from "./utils/log";
13+
import { logger } from "./utils/log";
1414

1515
class Diffy extends BaseDiffy {
1616
static _instance: Diffy;
@@ -42,7 +42,7 @@ class Diffy extends BaseDiffy {
4242
this.onWorkSpaceChanged();
4343
});
4444
this.isEnabled = true;
45-
sendToOutput("initiated");
45+
logger.info("Diffy extension initialized");
4646
}
4747

4848
/**
@@ -128,7 +128,7 @@ class Diffy extends BaseDiffy {
128128
const codebaseContext = await this.getCodebaseIndexService().getCodebaseContext();
129129

130130
if (codebaseContext) {
131-
sendToOutput(`Adding codebase context (${strategy} mode) to prompt`);
131+
logger.info("Adding codebase context to prompt", { strategy });
132132

133133
// If using structured mode, also analyze the diff
134134
if (strategy === "structured") {
@@ -143,7 +143,7 @@ class Diffy extends BaseDiffy {
143143
return `${codebaseContext}\n\nDIFF:\n${diff}`;
144144
}
145145
} catch (error) {
146-
sendToOutput(`Error getting codebase context: ${error}`);
146+
logger.error("Error getting codebase context", error);
147147
}
148148

149149
return diff;

src/service/CodebaseIndexService.ts

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { countTokens } from "gpt-tokenizer";
22
import * as vscode from "vscode";
3-
import { sendToOutput } from "../utils/log";
3+
import { logger } from "../utils/log";
44
import WorkspaceService from "./WorkspaceService";
55

66
interface IndexedFileContent {
@@ -286,7 +286,7 @@ export default class CodebaseIndexService {
286286

287287
const workspaceFolder = workspaceService.getCurrentWorkspace();
288288
if (!workspaceFolder) {
289-
sendToOutput("No workspace folder found for codebase indexing");
289+
logger.info("No workspace folder found for codebase indexing");
290290
return null;
291291
}
292292

@@ -298,19 +298,17 @@ export default class CodebaseIndexService {
298298
// Auto-detect project files if indexedFiles is empty or includes "auto"
299299
if (!indexedFiles || indexedFiles.length === 0 || indexedFiles.includes("auto")) {
300300
const autoDetected = await this.autoDetectProjectFiles(workspaceFolder);
301-
sendToOutput(
302-
`Auto-detected ${autoDetected.length} project files: ${autoDetected.join(", ")}`,
303-
);
301+
logger.info(`Auto-detected ${autoDetected.length} project files: ${autoDetected.join(", ")}`);
304302
indexedFiles = autoDetected;
305303
}
306304

307305
if (indexedFiles.length === 0) {
308-
sendToOutput("No files configured for indexing");
306+
logger.info("No files configured for indexing");
309307
return null;
310308
}
311309

312-
sendToOutput(`Starting smart codebase indexing with max file size: ${maxFileSizeKB}KB`);
313-
sendToOutput(`Files to index: ${indexedFiles.join(", ")}`);
310+
logger.info(`Starting smart codebase indexing with max file size: ${maxFileSizeKB}KB`);
311+
logger.info(`Files to index: ${indexedFiles.join(", ")}`);
314312

315313
const indexedContent: IndexedFileContent[] = [];
316314
let totalTokens = 0;
@@ -341,7 +339,7 @@ export default class CodebaseIndexService {
341339
if (cached && totalTokens + cached.tokens <= maxTotalTokens) {
342340
indexedContent.push(cached);
343341
totalTokens += cached.tokens;
344-
sendToOutput(`Using cached ${filePattern}: ${cached.tokens} tokens`);
342+
logger.info(`Using cached ${filePattern}: ${cached.tokens} tokens`);
345343
continue;
346344
}
347345
}
@@ -352,7 +350,7 @@ export default class CodebaseIndexService {
352350

353351
// Skip if file is too large
354352
if (fileStat.size > maxFileSizeBytes) {
355-
sendToOutput(
353+
logger.info(
356354
`Skipping ${filePattern}: file size ${(fileStat.size / this.KB_TO_BYTES).toFixed(
357355
1,
358356
)}KB exceeds limit of ${maxFileSizeKB}KB`,
@@ -372,7 +370,7 @@ export default class CodebaseIndexService {
372370

373371
// Check if adding this file would exceed total token budget
374372
if (totalTokens + tokenCount > maxTotalTokens) {
375-
sendToOutput(
373+
logger.info(
376374
`Skipping ${filePattern}: would exceed total token budget (${
377375
totalTokens + tokenCount
378376
} > ${maxTotalTokens})`,
@@ -391,18 +389,18 @@ export default class CodebaseIndexService {
391389
this.cache.set(filePattern, indexedItem);
392390

393391
totalTokens += tokenCount;
394-
sendToOutput(`Indexed ${filePattern}: ${tokenCount} tokens (analyzed)`);
392+
logger.info(`Indexed ${filePattern}: ${tokenCount} tokens (analyzed)`);
395393
} catch {
396394
// File doesn't exist, skip silently
397-
sendToOutput(`File not found: ${filePattern}`);
395+
logger.info(`File not found: ${filePattern}`);
398396
}
399397
} catch (error) {
400-
sendToOutput(`Error reading ${filePattern}: ${error}`);
398+
logger.info(`Error reading ${filePattern}: ${error}`);
401399
}
402400
}
403401

404402
if (indexedContent.length === 0) {
405-
sendToOutput("No files were successfully indexed");
403+
logger.info("No files were successfully indexed");
406404
return null;
407405
}
408406

@@ -435,7 +433,11 @@ export default class CodebaseIndexService {
435433
strategy: "structured",
436434
},
437435
};
438-
formattedContext = `CODEBASE CONTEXT (Structured):\n${JSON.stringify(structuredData, null, 2)}`;
436+
formattedContext = `CODEBASE CONTEXT (Structured):\n${JSON.stringify(
437+
structuredData,
438+
null,
439+
2,
440+
)}`;
439441
break;
440442
}
441443

@@ -447,15 +449,15 @@ export default class CodebaseIndexService {
447449
return `${item.file}: ${item.content} [${item.tokens} tokens]`;
448450
});
449451
formattedContext = `CODEBASE (AST-Enhanced):\n${astLines.join("\n")}`;
450-
sendToOutput("AST-based indexing: Full implementation pending. Using enhanced format.");
452+
logger.info("AST-based indexing: Full implementation pending. Using enhanced format.");
451453
break;
452454
}
453455

454456
default:
455457
formattedContext = `PROJECT: ${indexedContent.map((i) => i.content).join(" • ")}`;
456458
}
457459

458-
sendToOutput(
460+
logger.info(
459461
`Indexed [${strategy}]: ${indexedContent.length} files, ${totalTokens} tokens (${this.cache.size} cached)`,
460462
);
461463

@@ -467,6 +469,6 @@ export default class CodebaseIndexService {
467469
*/
468470
public clearCache(): void {
469471
this.cache.clear();
470-
sendToOutput("Codebase index cache cleared");
472+
logger.info("Codebase index cache cleared");
471473
}
472474
}

src/service/DiffAnalyzer.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { sendToOutput } from "../utils/log";
1+
import { logger } from "../utils/log";
22

33
/**
44
* Structured representation of a file change
@@ -56,7 +56,7 @@ export default class DiffAnalyzer {
5656
* @returns Structured diff context
5757
*/
5858
async analyzeGitDiff(diff: string): Promise<DiffContext> {
59-
sendToOutput("Starting structured diff analysis");
59+
logger.info("Starting structured diff analysis");
6060

6161
const files: FileChange[] = [];
6262
const affectedModules = new Set<string>();
@@ -112,7 +112,7 @@ export default class DiffAnalyzer {
112112
},
113113
};
114114

115-
sendToOutput(`Analyzed ${files.length} files: +${totalAdditions} -${totalDeletions}`);
115+
logger.info(`Analyzed ${files.length} files: +${totalAdditions} -${totalDeletions}`);
116116

117117
return context;
118118
}

src/service/GeminiService.ts

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { GoogleGenAI } from "@google/genai";
22
import type * as vscode from "vscode";
33
import { window } from "vscode";
44
import { cleanAiResponse } from "../utils/aiResponse";
5-
import { clearOutput, sendToOutput } from "../utils/log";
5+
import { logger } from "../utils/log";
66
import { CacheService } from "./CacheService";
77
import WorkspaceService from "./WorkspaceService";
88

@@ -84,18 +84,20 @@ class GeminiService implements AIService {
8484

8585
if (exist) {
8686
const result = this.cacheService.get(model, instructions + prompt) as string;
87-
sendToOutput(`result: ${JSON.stringify(result)}`);
87+
logger.debug("Gemini cache hit", { model });
8888
return result;
8989
}
9090

9191
progress?.report({ increment: 50 });
9292

93-
clearOutput();
94-
sendToOutput(`instructions: ${instructions}`);
95-
sendToOutput(`git diff prompt: ${prompt}`);
96-
sendToOutput(`model: ${model}`);
97-
sendToOutput(`temperature: ${WorkspaceService.getInstance().getTemp()}`);
98-
sendToOutput(`max_tokens: ${WorkspaceService.getInstance().getMaxTokens()}`);
93+
logger.clear();
94+
logger.info("Gemini request", {
95+
model,
96+
temperature: WorkspaceService.getInstance().getTemp(),
97+
maxTokens: WorkspaceService.getInstance().getMaxTokens(),
98+
});
99+
logger.trace("System instructions", instructions);
100+
logger.trace("User prompt", prompt);
99101

100102
let response: string | undefined;
101103
try {
@@ -118,11 +120,12 @@ class GeminiService implements AIService {
118120
});
119121

120122
response = result.text;
121-
sendToOutput(`result success: ${JSON.stringify(response)}`);
123+
logger.info("Gemini response received");
124+
logger.debug("Gemini full response", response);
122125
progress?.report({ increment: 49 });
123126
} catch (reason: unknown) {
127+
logger.error("Gemini request failed", reason);
124128
console.error(reason);
125-
sendToOutput(`result failed: ${JSON.stringify(reason)}`);
126129

127130
const hasResponse = (
128131
err: unknown,

src/service/OpenAiService.ts

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import OpenAI from "openai";
22
import type * as vscode from "vscode";
33
import { window } from "vscode";
44
import { cleanAiResponse } from "../utils/aiResponse";
5-
import { clearOutput, sendToOutput } from "../utils/log";
5+
import { logger } from "../utils/log";
66
import { CacheService } from "./CacheService";
77
import WorkspaceService from "./WorkspaceService";
88

@@ -106,7 +106,7 @@ class OpenAiService implements AIService {
106106
model,
107107
instructions + prompt,
108108
) as OpenAI.Chat.Completions.ChatCompletion;
109-
sendToOutput(`result: ${JSON.stringify(result)}`);
109+
logger.debug("OpenAI cache hit", { model });
110110
return result;
111111
}
112112
if (!openAIKey) {
@@ -136,22 +136,28 @@ class OpenAiService implements AIService {
136136
temperature: WorkspaceService.getInstance().getTemp(),
137137
max_tokens: WorkspaceService.getInstance().getMaxTokens(),
138138
};
139-
clearOutput();
140-
sendToOutput(`instructions: ${instructions}`);
141-
sendToOutput(`git diff prompt: ${prompt}`);
142-
sendToOutput(`base url: ${openAiClient.baseURL}`);
143-
sendToOutput(`model: ${params.model}`);
144-
sendToOutput(`max_tokens: ${params.max_tokens}`);
145-
sendToOutput(`temperature: ${params.temperature}`);
139+
logger.clear();
140+
logger.info("OpenAI request", {
141+
baseUrl: openAiClient.baseURL,
142+
model: params.model,
143+
maxTokens: params.max_tokens,
144+
temperature: params.temperature,
145+
});
146+
logger.trace("System instructions", instructions);
147+
logger.trace("User prompt", prompt);
146148

147149
let response: OpenAI.Chat.Completions.ChatCompletion | undefined;
148150
try {
149151
response = await openAiClient.chat.completions.create(params);
150-
sendToOutput(`result success: ${JSON.stringify(response)}`);
152+
logger.info("OpenAI response received", {
153+
usage: response.usage,
154+
model: response.model,
155+
});
156+
logger.debug("OpenAI full response", response);
151157
progress?.report({ increment: 49 });
152158
} catch (reason: unknown) {
159+
logger.error("OpenAI request failed", reason);
153160
console.error(reason);
154-
sendToOutput(`result failed: ${JSON.stringify(reason)}`);
155161

156162
// Type guard for error with response property
157163
const hasResponse = (

src/service/VsCodeLlmService.ts

Lines changed: 16 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import * as vscode from "vscode";
22
import { window } from "vscode";
33
import { cleanAiResponse } from "../utils/aiResponse";
4-
import { clearOutput, sendToOutput } from "../utils/log";
4+
import { logger } from "../utils/log";
55
import { CacheService } from "./CacheService";
66
import WorkspaceService from "./WorkspaceService";
77

@@ -89,15 +89,15 @@ class VsCodeLlmService implements AIService {
8989
const exist = this.cacheService.recordExists(vscodeLmModel, cacheKey);
9090
if (exist) {
9191
const result = this.cacheService.get(vscodeLmModel, cacheKey) as string;
92-
sendToOutput(`result (cached): ${result}`);
92+
logger.debug("VS Code LLM cache hit", { model: vscodeLmModel });
9393
return result;
9494
}
9595

9696
try {
97-
clearOutput();
98-
sendToOutput(`instructions: ${instructions}`);
99-
sendToOutput(`git diff prompt: ${prompt}`);
100-
sendToOutput(`model: ${vscodeLmModel}`);
97+
logger.clear();
98+
logger.info("VS Code LLM request", { model: vscodeLmModel });
99+
logger.trace("System instructions", instructions);
100+
logger.trace("User prompt", prompt);
101101

102102
// Select the appropriate model based on settings
103103
let models: vscode.LanguageModelChat[] = [];
@@ -277,7 +277,11 @@ class VsCodeLlmService implements AIService {
277277
}
278278

279279
const [model] = models;
280-
sendToOutput(`Selected model: ${model.id} (${model.vendor}/${model.family})`);
280+
logger.info("Selected VS Code LLM model", {
281+
id: model.id,
282+
vendor: model.vendor,
283+
family: model.family,
284+
});
281285

282286
progress?.report({ increment: 30 });
283287

@@ -299,7 +303,10 @@ class VsCodeLlmService implements AIService {
299303
responseText += fragment;
300304
}
301305

302-
sendToOutput(`result success: ${responseText}`);
306+
logger.info("VS Code LLM response received", {
307+
responseLength: responseText.length,
308+
});
309+
logger.debug("VS Code LLM full response", responseText);
303310

304311
// Cache the result
305312
if (responseText && responseText.length > 6) {
@@ -315,8 +322,8 @@ class VsCodeLlmService implements AIService {
315322

316323
return responseText;
317324
} catch (error: unknown) {
325+
logger.error("VS Code LLM request failed", error);
318326
console.error(error);
319-
sendToOutput(`result failed: ${JSON.stringify(error)}`);
320327

321328
if (error instanceof vscode.LanguageModelError) {
322329
// Handle specific LLM errors

0 commit comments

Comments
 (0)