updadet code

This commit is contained in:
Falko Victor Habel 2024-08-13 22:22:53 +02:00
parent 68b7eda25c
commit 74a467bbd4
2 changed files with 191 additions and 175 deletions

View File

@ -2,7 +2,7 @@
"name": "fabelous-autocoder", "name": "fabelous-autocoder",
"displayName": "Fabelous Autocoder", "displayName": "Fabelous Autocoder",
"description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality", "description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality",
"version": "0.0.4", "version": "0.1.0",
"icon": "icon.png", "icon": "icon.png",
"publisher": "fabel", "publisher": "fabel",
"license": "CC BY-ND 4.0", "license": "CC BY-ND 4.0",

View File

@ -1,5 +1,3 @@
// Original script was GPT4 but it has been deeply Ship of Theseused.
import * as vscode from "vscode"; import * as vscode from "vscode";
import axios from "axios"; import axios from "axios";
@ -19,53 +17,73 @@ let continueInline: boolean | undefined;
let keepAlive: number | undefined; let keepAlive: number | undefined;
let topP: number | undefined; let topP: number | undefined;
function updateVSConfig() { function updateVSConfig() {
VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder"); VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder");
apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate"; apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate";
apiAuthentication = VSConfig.get("authentication") || ""; apiAuthentication = VSConfig.get("authentication") || "";
apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with apiModel = VSConfig.get("model") || "fabelous-coder:latest"; // Updated to use FIM model
apiMessageHeader = VSConfig.get("message header") || ""; apiMessageHeader = VSConfig.get("message header") || "";
numPredict = VSConfig.get("max tokens predicted") || 1000; numPredict = VSConfig.get("max tokens predicted") || 1000;
promptWindowSize = VSConfig.get("prompt window size") || 2000; promptWindowSize = VSConfig.get("prompt window size") || 2000;
completionKeys = VSConfig.get("completion keys") || " "; completionKeys = VSConfig.get("completion keys") || " ";
responsePreview = VSConfig.get("response preview"); responsePreview = VSConfig.get("response preview");
responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50; responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50;
responsePreviewDelay = VSConfig.get("preview delay") || 0; // Must be || 0 instead of || [default] because of truthy responsePreviewDelay = VSConfig.get("preview delay") || 0;
continueInline = VSConfig.get("continue inline"); continueInline = VSConfig.get("continue inline");
apiTemperature = VSConfig.get("temperature") || 0.7; apiTemperature = VSConfig.get("temperature") || 0.7;
keepAlive = VSConfig.get("keep alive") || 30; keepAlive = VSConfig.get("keep alive") || 30;
topP = VSConfig.get("top p") || 1; topP = VSConfig.get("top p") || 1;
} }
updateVSConfig(); updateVSConfig();
// No need for restart for any of these settings
vscode.workspace.onDidChangeConfiguration(updateVSConfig); vscode.workspace.onDidChangeConfiguration(updateVSConfig);
// Give model additional information
function messageHeaderSub(document: vscode.TextDocument) { function messageHeaderSub(document: vscode.TextDocument) {
const sub = apiMessageHeader const sub = apiMessageHeader
.replace("{LANG}", document.languageId) .replace("{LANG}", document.languageId)
.replace("{FILE_NAME}", document.fileName) .replace("{FILE_NAME}", document.fileName)
.replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled"); .replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled");
return sub; return sub;
} }
// internal function for autocomplete, not directly exposed
function getContextLines(document: vscode.TextDocument, position: vscode.Position): string {
const lines = [];
const lineCount = document.lineCount;
// Get more context for FIM
const startLine = Math.max(0, position.line - 10);
const endLine = Math.min(lineCount - 1, position.line + 10);
for (let i = startLine; i <= endLine; i++) {
lines.push(document.lineAt(i).text);
}
return lines.join("\n");
}
async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) { async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) {
const document = textEditor.document; const document = textEditor.document;
const position = textEditor.selection.active; const position = textEditor.selection.active;
// Get the current prompt // Get the current context
let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position)); const context = getContextLines(document, position);
prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length);
// Replace {Prompt} with the extracted text from the document // Split the context into prefix and suffix for FIM
const sub = messageHeaderSub(document).replace("{Prompt}", prompt); const lines = context.split("\n");
const currentLineIndex = position.line - Math.max(0, position.line - 10);
const prefix = lines.slice(0, currentLineIndex + 1).join("\n");
const suffix = lines.slice(currentLineIndex + 1).join("\n");
// Create FIM prompt
const fimPrompt = `<fim_prefix>${prefix}<fim_suffix>${suffix}<fim_middle>`;
// Replace {Prompt} with the FIM prompt
const sub = messageHeaderSub(document).replace("{PROMPT}", fimPrompt);
// Show a progress message
vscode.window.withProgress( vscode.window.withProgress(
{ {
location: vscode.ProgressLocation.Notification, location: vscode.ProgressLocation.Notification,
title: "Ollama Autocoder", title: "Fabelous Autocoder",
cancellable: true, cancellable: true,
}, },
async (progress, progressCancellationToken) => { async (progress, progressCancellationToken) => {
@ -84,160 +102,158 @@ async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationTo
vscode.workspace.onDidCloseTextDocument(cancelPost); vscode.workspace.onDidCloseTextDocument(cancelPost);
}); });
// Make a request to the ollama.ai REST API
const response = await axios.post(apiEndpoint, { const response = await axios.post(apiEndpoint, {
model: apiModel, // Change this to the model you want to use model: apiModel,
prompt: sub, // Use the modified sub string with the replaced prompt prompt: sub,
stream: true, stream: true,
raw: true, raw: true,
options: { options: {
num_predict: numPredict, num_predict: numPredict,
temperature: apiTemperature, temperature: apiTemperature,
stop: ["```"] stop: ["<fim_suffix>", "```"]
} }
}, { }, {
cancelToken: axiosCancelToken, cancelToken: axiosCancelToken,
responseType: 'stream', responseType: 'stream',
headers: { headers: {
'Authorization': apiAuthentication 'Authorization': apiAuthentication
} }
} });
);
//tracker
let currentPosition = position;
response.data.on('data', async (d: Uint8Array) => {
progress.report({ message: "Generating..." });
// Check for user input (cancel)
if (currentPosition.line != textEditor.selection.end.line || currentPosition.character != textEditor.selection.end.character) {
axiosCancelPost(); // cancel axios => cancel finished promise => close notification
return;
}
// Get a completion from the response
const completion: string = JSON.parse(d.toString()).response;
// lastToken = completion;
if (completion === "") { let currentPosition = position;
return; let completionText = "";
} response.data.on('data', async (d: Uint8Array) => {
//complete edit for token progress.report({ message: "Generating..." });
const edit = new vscode.WorkspaceEdit(); if (currentPosition.line != textEditor.selection.end.line || currentPosition.character != textEditor.selection.end.character) {
edit.insert(document.uri, currentPosition, completion); axiosCancelPost();
await vscode.workspace.applyEdit(edit); return;
// Move the cursor to the end of the completion }
const completionLines = completion.split("\n"); const completion: string = JSON.parse(d.toString()).response;
const newPosition = new vscode.Position(
currentPosition.line + completionLines.length - 1, if (completion === "") {
(completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length return;
); }
const newSelection = new vscode.Selection(
position, completionText += completion;
newPosition
); const edit = new vscode.WorkspaceEdit();
currentPosition = newPosition; edit.insert(document.uri, currentPosition, completion);
// completion bar await vscode.workspace.applyEdit(edit);
progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) });
// move cursor const completionLines = completion.split("\n");
textEditor.selection = newSelection; const newPosition = new vscode.Position(
}); currentPosition.line + completionLines.length - 1,
// Keep cancel window available (completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length
const finished = new Promise((resolve) => { );
response.data.on('end', () => { const newSelection = new vscode.Selection(
progress.report({ message: "Fabelous completion finished." }); position,
resolve(true); newPosition
}); );
axiosCancelToken.promise.finally(() => { // prevent notification from freezing on user input cancel currentPosition = newPosition;
resolve(false);
}); progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) });
}); textEditor.selection = newSelection;
await finished; });
} catch (err: any) {
// Show an error message const finished = new Promise((resolve) => {
vscode.window.showErrorMessage( response.data.on('end', () => {
"Fabelous Autocoder encountered an error: " + err.message progress.report({ message: "Fabelous completion finished." });
); resolve(true);
console.log(err); });
} axiosCancelToken.promise.finally(() => {
} resolve(false);
); });
});
await finished;
// Remove any remaining FIM tokens from the completion
completionText = completionText.replace(/<fim_middle>|<fim_suffix>|<fim_prefix>/g, '');
const finalEdit = new vscode.WorkspaceEdit();
finalEdit.replace(document.uri, new vscode.Range(position, currentPosition), completionText);
await vscode.workspace.applyEdit(finalEdit);
} catch (err: any) {
vscode.window.showErrorMessage(
"Fabelous Autocoder encountered an error: " + err.message
);
console.log(err);
}
}
);
} }
// Completion item provider callback for activate
async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) { async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) {
// Create a completion item const item = new vscode.CompletionItem("Fabelous autocompletion");
const item = new vscode.CompletionItem("Fabelous autocompletion"); item.insertText = new vscode.SnippetString('${1:}');
// Set the insert text to a placeholder
item.insertText = new vscode.SnippetString('${1:}'); if (responsePreview) await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000));
// Wait before initializing Ollama to reduce compute usage if (cancellationToken.isCancellationRequested) {
if (responsePreview) await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000)); return [ item ];
if (cancellationToken.isCancellationRequested) { }
return [ item ];
} if (responsePreview) {
// Set the label & inset text to a shortened, non-stream response const context = getContextLines(document, position);
if (responsePreview) { const lines = context.split("\n");
let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position)); const currentLineIndex = position.line - Math.max(0, position.line - 10);
prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length); const prefix = lines.slice(0, currentLineIndex + 1).join("\n");
const response_preview = await axios.post(apiEndpoint, { const suffix = lines.slice(currentLineIndex + 1).join("\n");
model: apiModel, // Change this to the model you want to use const fimPrompt = `<fim_prefix>${prefix}<fim_suffix>${suffix}<fim_middle>`;
prompt: messageHeaderSub(document) + prompt,
stream: false, const response_preview = await axios.post(apiEndpoint, {
raw: true, model: apiModel,
options: { prompt: messageHeaderSub(document) + fimPrompt,
num_predict: responsePreviewMaxTokens, // reduced compute max stream: false,
temperature: apiTemperature, raw: true,
stop: ['\n', '```'], options: {
...keepAlive && { keep_alive: keepAlive }, num_predict: responsePreviewMaxTokens,
...topP && { top_p: topP }, temperature: apiTemperature,
} stop: ['<fim_suffix>', '\n', '```'],
}, { ...keepAlive && { keep_alive: keepAlive },
cancelToken: new axios.CancelToken((c) => { ...topP && { top_p: topP },
const cancelPost = function () { }
c("Autocompletion request terminated by completion cancel"); }, {
}; cancelToken: new axios.CancelToken((c) => {
cancellationToken.onCancellationRequested(cancelPost); const cancelPost = function () {
}) c("Autocompletion request terminated by completion cancel");
}); };
if (response_preview.data.response.trim() != "") { // default if empty cancellationToken.onCancellationRequested(cancelPost);
item.label = response_preview.data.response.trimStart(); // tended to add whitespace at the beginning })
item.insertText = response_preview.data.response.trimStart(); });
} if (response_preview.data.response.trim() != "") {
} const previewText = response_preview.data.response.replace(/<fim_middle>|<fim_suffix>|<fim_prefix>/g, '').trimStart();
// Set the documentation to a message item.label = previewText;
item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Ollama'); item.insertText = previewText;
// Set the command to trigger the completion }
if (continueInline || !responsePreview) item.command = { }
command: 'fabelous-autocoder.autocomplete',
title: 'Fabelous Autocomplete', item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Fabelous Autocoder');
arguments: [cancellationToken] if (continueInline || !responsePreview) item.command = {
}; command: 'fabelous-autocoder.autocomplete',
// Return the completion item title: 'Fabelous Autocomplete',
return [item]; arguments: [cancellationToken]
} };
// This method is called when extension is activated return [item];
function activate(context: vscode.ExtensionContext) { }
// Register a completion provider for JavaScript files
const completionProvider = vscode.languages.registerCompletionItemProvider("*", { function activate(context: vscode.ExtensionContext) {
provideCompletionItems const completionProvider = vscode.languages.registerCompletionItemProvider("*", {
}, provideCompletionItems
...completionKeys.split("") },
); ...completionKeys.split("")
// Register a command for getting a completion from Ollama through command/keybind );
const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand( const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand(
"fabelous-autocoder.autocomplete", "fabelous-autocoder.autocomplete",
(textEditor, _, cancellationToken?) => { (textEditor, _, cancellationToken?) => {
// no cancellation token from here, but there is one from completionProvider autocompleteCommand(textEditor, cancellationToken);
autocompleteCommand(textEditor, cancellationToken); }
} );
); context.subscriptions.push(completionProvider);
// Add the commands & completion provider to the context context.subscriptions.push(externalAutocompleteCommand);
context.subscriptions.push(completionProvider);
context.subscriptions.push(externalAutocompleteCommand);
} }
// This method is called when extension is deactivated
// eslint-disable-next-line @typescript-eslint/no-empty-function
function deactivate() { } function deactivate() { }
module.exports = { module.exports = {
activate, activate,
deactivate, deactivate,
}; };