From 74a467bbd402d0b98274e735233eb59309ee0ef1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 13 Aug 2024 22:22:53 +0200 Subject: [PATCH] updadet code --- package.json | 2 +- src/extension.ts | 364 +++++++++++++++++++++++++---------------------- 2 files changed, 191 insertions(+), 175 deletions(-) diff --git a/package.json b/package.json index fa30404..4f64025 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "fabelous-autocoder", "displayName": "Fabelous Autocoder", "description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality", - "version": "0.0.4", + "version": "0.1.0", "icon": "icon.png", "publisher": "fabel", "license": "CC BY-ND 4.0", diff --git a/src/extension.ts b/src/extension.ts index 1eb2b4f..6977072 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -1,5 +1,3 @@ -// Original script was GPT4 but it has been deeply Ship of Theseused. - import * as vscode from "vscode"; import axios from "axios"; @@ -19,53 +17,73 @@ let continueInline: boolean | undefined; let keepAlive: number | undefined; let topP: number | undefined; - function updateVSConfig() { - VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder"); - apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate"; - apiAuthentication = VSConfig.get("authentication") || ""; - apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with - apiMessageHeader = VSConfig.get("message header") || ""; - numPredict = VSConfig.get("max tokens predicted") || 1000; - promptWindowSize = VSConfig.get("prompt window size") || 2000; - completionKeys = VSConfig.get("completion keys") || " "; - responsePreview = VSConfig.get("response preview"); - responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50; - responsePreviewDelay = VSConfig.get("preview delay") || 0; // Must be || 0 instead of || [default] because of truthy - continueInline = VSConfig.get("continue inline"); - apiTemperature = VSConfig.get("temperature") || 0.7; - keepAlive = VSConfig.get("keep alive") || 30; - topP = VSConfig.get("top p") || 1; + VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder"); + apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate"; + apiAuthentication = VSConfig.get("authentication") || ""; + apiModel = VSConfig.get("model") || "fabelous-coder:latest"; // Updated to use FIM model + apiMessageHeader = VSConfig.get("message header") || ""; + numPredict = VSConfig.get("max tokens predicted") || 1000; + promptWindowSize = VSConfig.get("prompt window size") || 2000; + completionKeys = VSConfig.get("completion keys") || " "; + responsePreview = VSConfig.get("response preview"); + responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50; + responsePreviewDelay = VSConfig.get("preview delay") || 0; + continueInline = VSConfig.get("continue inline"); + apiTemperature = VSConfig.get("temperature") || 0.7; + keepAlive = VSConfig.get("keep alive") || 30; + topP = VSConfig.get("top p") || 1; } updateVSConfig(); -// No need for restart for any of these settings vscode.workspace.onDidChangeConfiguration(updateVSConfig); -// Give model additional information + function messageHeaderSub(document: vscode.TextDocument) { - const sub = apiMessageHeader - .replace("{LANG}", document.languageId) - .replace("{FILE_NAME}", document.fileName) - .replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled"); - return sub; + const sub = apiMessageHeader + .replace("{LANG}", document.languageId) + .replace("{FILE_NAME}", document.fileName) + .replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled"); + return sub; } -// internal function for autocomplete, not directly exposed + +function getContextLines(document: vscode.TextDocument, position: vscode.Position): string { + const lines = []; + const lineCount = document.lineCount; + + // Get more context for FIM + const startLine = Math.max(0, position.line - 10); + const endLine = Math.min(lineCount - 1, position.line + 10); + + for (let i = startLine; i <= endLine; i++) { + lines.push(document.lineAt(i).text); + } + + return lines.join("\n"); +} + async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) { const document = textEditor.document; const position = textEditor.selection.active; - // Get the current prompt - let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position)); - prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length); + // Get the current context + const context = getContextLines(document, position); - // Replace {Prompt} with the extracted text from the document - const sub = messageHeaderSub(document).replace("{Prompt}", prompt); + // Split the context into prefix and suffix for FIM + const lines = context.split("\n"); + const currentLineIndex = position.line - Math.max(0, position.line - 10); + const prefix = lines.slice(0, currentLineIndex + 1).join("\n"); + const suffix = lines.slice(currentLineIndex + 1).join("\n"); + + // Create FIM prompt + const fimPrompt = `${prefix}${suffix}`; + + // Replace {Prompt} with the FIM prompt + const sub = messageHeaderSub(document).replace("{PROMPT}", fimPrompt); - // Show a progress message vscode.window.withProgress( { location: vscode.ProgressLocation.Notification, - title: "Ollama Autocoder", + title: "Fabelous Autocoder", cancellable: true, }, async (progress, progressCancellationToken) => { @@ -84,160 +102,158 @@ async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationTo vscode.workspace.onDidCloseTextDocument(cancelPost); }); - // Make a request to the ollama.ai REST API const response = await axios.post(apiEndpoint, { - model: apiModel, // Change this to the model you want to use - prompt: sub, // Use the modified sub string with the replaced prompt + model: apiModel, + prompt: sub, stream: true, raw: true, options: { num_predict: numPredict, temperature: apiTemperature, - stop: ["```"] + stop: ["", "```"] } - }, { - cancelToken: axiosCancelToken, - responseType: 'stream', - headers: { - 'Authorization': apiAuthentication - } - } - ); - //tracker - let currentPosition = position; - response.data.on('data', async (d: Uint8Array) => { - progress.report({ message: "Generating..." }); - // Check for user input (cancel) - if (currentPosition.line != textEditor.selection.end.line || currentPosition.character != textEditor.selection.end.character) { - axiosCancelPost(); // cancel axios => cancel finished promise => close notification - return; - } - // Get a completion from the response - const completion: string = JSON.parse(d.toString()).response; - // lastToken = completion; - + }, { + cancelToken: axiosCancelToken, + responseType: 'stream', + headers: { + 'Authorization': apiAuthentication + } + }); - if (completion === "") { - return; - } - //complete edit for token - const edit = new vscode.WorkspaceEdit(); - edit.insert(document.uri, currentPosition, completion); - await vscode.workspace.applyEdit(edit); - // Move the cursor to the end of the completion - const completionLines = completion.split("\n"); - const newPosition = new vscode.Position( - currentPosition.line + completionLines.length - 1, - (completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length - ); - const newSelection = new vscode.Selection( - position, - newPosition - ); - currentPosition = newPosition; - // completion bar - progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) }); - // move cursor - textEditor.selection = newSelection; - }); - // Keep cancel window available - const finished = new Promise((resolve) => { - response.data.on('end', () => { - progress.report({ message: "Fabelous completion finished." }); - resolve(true); - }); - axiosCancelToken.promise.finally(() => { // prevent notification from freezing on user input cancel - resolve(false); - }); - }); - await finished; - } catch (err: any) { - // Show an error message - vscode.window.showErrorMessage( - "Fabelous Autocoder encountered an error: " + err.message - ); - console.log(err); - } - } - ); + let currentPosition = position; + let completionText = ""; + response.data.on('data', async (d: Uint8Array) => { + progress.report({ message: "Generating..." }); + if (currentPosition.line != textEditor.selection.end.line || currentPosition.character != textEditor.selection.end.character) { + axiosCancelPost(); + return; + } + const completion: string = JSON.parse(d.toString()).response; + + if (completion === "") { + return; + } + + completionText += completion; + + const edit = new vscode.WorkspaceEdit(); + edit.insert(document.uri, currentPosition, completion); + await vscode.workspace.applyEdit(edit); + + const completionLines = completion.split("\n"); + const newPosition = new vscode.Position( + currentPosition.line + completionLines.length - 1, + (completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length + ); + const newSelection = new vscode.Selection( + position, + newPosition + ); + currentPosition = newPosition; + + progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) }); + textEditor.selection = newSelection; + }); + + const finished = new Promise((resolve) => { + response.data.on('end', () => { + progress.report({ message: "Fabelous completion finished." }); + resolve(true); + }); + axiosCancelToken.promise.finally(() => { + resolve(false); + }); + }); + await finished; + + // Remove any remaining FIM tokens from the completion + completionText = completionText.replace(/||/g, ''); + const finalEdit = new vscode.WorkspaceEdit(); + finalEdit.replace(document.uri, new vscode.Range(position, currentPosition), completionText); + await vscode.workspace.applyEdit(finalEdit); + + } catch (err: any) { + vscode.window.showErrorMessage( + "Fabelous Autocoder encountered an error: " + err.message + ); + console.log(err); + } + } + ); } -// Completion item provider callback for activate + async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) { - // Create a completion item - const item = new vscode.CompletionItem("Fabelous autocompletion"); - // Set the insert text to a placeholder - item.insertText = new vscode.SnippetString('${1:}'); - // Wait before initializing Ollama to reduce compute usage - if (responsePreview) await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000)); - if (cancellationToken.isCancellationRequested) { - return [ item ]; - } - // Set the label & inset text to a shortened, non-stream response - if (responsePreview) { - let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position)); - prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length); - const response_preview = await axios.post(apiEndpoint, { - model: apiModel, // Change this to the model you want to use - prompt: messageHeaderSub(document) + prompt, - stream: false, - raw: true, - options: { - num_predict: responsePreviewMaxTokens, // reduced compute max - temperature: apiTemperature, - stop: ['\n', '```'], - ...keepAlive && { keep_alive: keepAlive }, - ...topP && { top_p: topP }, - } - }, { - cancelToken: new axios.CancelToken((c) => { - const cancelPost = function () { - c("Autocompletion request terminated by completion cancel"); - }; - cancellationToken.onCancellationRequested(cancelPost); - }) - }); - if (response_preview.data.response.trim() != "") { // default if empty - item.label = response_preview.data.response.trimStart(); // tended to add whitespace at the beginning - item.insertText = response_preview.data.response.trimStart(); - } - } - // Set the documentation to a message - item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Ollama'); - // Set the command to trigger the completion - if (continueInline || !responsePreview) item.command = { - command: 'fabelous-autocoder.autocomplete', - title: 'Fabelous Autocomplete', - arguments: [cancellationToken] - }; - // Return the completion item - return [item]; -} -// This method is called when extension is activated -function activate(context: vscode.ExtensionContext) { - // Register a completion provider for JavaScript files - const completionProvider = vscode.languages.registerCompletionItemProvider("*", { - provideCompletionItems - }, - ...completionKeys.split("") - ); - // Register a command for getting a completion from Ollama through command/keybind - const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand( - "fabelous-autocoder.autocomplete", - (textEditor, _, cancellationToken?) => { - // no cancellation token from here, but there is one from completionProvider - autocompleteCommand(textEditor, cancellationToken); - } - ); - // Add the commands & completion provider to the context - context.subscriptions.push(completionProvider); - context.subscriptions.push(externalAutocompleteCommand); + const item = new vscode.CompletionItem("Fabelous autocompletion"); + item.insertText = new vscode.SnippetString('${1:}'); + + if (responsePreview) await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000)); + if (cancellationToken.isCancellationRequested) { + return [ item ]; + } + + if (responsePreview) { + const context = getContextLines(document, position); + const lines = context.split("\n"); + const currentLineIndex = position.line - Math.max(0, position.line - 10); + const prefix = lines.slice(0, currentLineIndex + 1).join("\n"); + const suffix = lines.slice(currentLineIndex + 1).join("\n"); + const fimPrompt = `${prefix}${suffix}`; + + const response_preview = await axios.post(apiEndpoint, { + model: apiModel, + prompt: messageHeaderSub(document) + fimPrompt, + stream: false, + raw: true, + options: { + num_predict: responsePreviewMaxTokens, + temperature: apiTemperature, + stop: ['', '\n', '```'], + ...keepAlive && { keep_alive: keepAlive }, + ...topP && { top_p: topP }, + } + }, { + cancelToken: new axios.CancelToken((c) => { + const cancelPost = function () { + c("Autocompletion request terminated by completion cancel"); + }; + cancellationToken.onCancellationRequested(cancelPost); + }) + }); + if (response_preview.data.response.trim() != "") { + const previewText = response_preview.data.response.replace(/||/g, '').trimStart(); + item.label = previewText; + item.insertText = previewText; + } + } + + item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Fabelous Autocoder'); + if (continueInline || !responsePreview) item.command = { + command: 'fabelous-autocoder.autocomplete', + title: 'Fabelous Autocomplete', + arguments: [cancellationToken] + }; + return [item]; +} + +function activate(context: vscode.ExtensionContext) { + const completionProvider = vscode.languages.registerCompletionItemProvider("*", { + provideCompletionItems + }, + ...completionKeys.split("") + ); + const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand( + "fabelous-autocoder.autocomplete", + (textEditor, _, cancellationToken?) => { + autocompleteCommand(textEditor, cancellationToken); + } + ); + context.subscriptions.push(completionProvider); + context.subscriptions.push(externalAutocompleteCommand); } -// This method is called when extension is deactivated -// eslint-disable-next-line @typescript-eslint/no-empty-function function deactivate() { } module.exports = { - activate, - deactivate, + activate, + deactivate, }; \ No newline at end of file