2024-01-06 23:33:17 +00:00
|
|
|
// Original script was GPT4 but it has been deeply Ship of Theseused.
|
2023-12-20 09:36:55 +00:00
|
|
|
|
|
|
|
import * as vscode from "vscode";
|
2023-12-21 00:27:42 +00:00
|
|
|
import axios from "axios";
|
|
|
|
|
|
|
|
let VSConfig: vscode.WorkspaceConfiguration;
|
|
|
|
let apiEndpoint: string;
|
|
|
|
let apiModel: string;
|
|
|
|
let apiSystemMessage: string | undefined;
|
|
|
|
let numPredict: number;
|
|
|
|
let promptWindowSize: number;
|
2023-12-21 06:33:07 +00:00
|
|
|
let rawInput: boolean | undefined;
|
2024-01-06 21:49:41 +00:00
|
|
|
let completionKeys: string;
|
2024-01-06 23:33:17 +00:00
|
|
|
let responsePreview: boolean | undefined;
|
2024-01-09 02:21:32 +00:00
|
|
|
let responsePreviewMaxTokens: number;
|
2024-01-28 02:22:47 +00:00
|
|
|
let responsePreviewDelay: number;
|
|
|
|
let continueInline: boolean | undefined;
|
2023-12-21 00:27:42 +00:00
|
|
|
|
|
|
|
function updateVSConfig() {
|
|
|
|
VSConfig = vscode.workspace.getConfiguration("ollama-autocoder");
|
2024-01-09 13:32:24 +00:00
|
|
|
apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate";
|
2023-12-21 06:33:07 +00:00
|
|
|
apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with
|
2023-12-21 06:54:03 +00:00
|
|
|
apiSystemMessage = VSConfig.get("system message");
|
|
|
|
numPredict = VSConfig.get("max tokens predicted") || 500;
|
|
|
|
promptWindowSize = VSConfig.get("prompt window size") || 2000;
|
|
|
|
rawInput = VSConfig.get("raw input");
|
2024-01-06 21:49:41 +00:00
|
|
|
completionKeys = VSConfig.get("completion keys") || " ";
|
2024-01-06 23:33:17 +00:00
|
|
|
responsePreview = VSConfig.get("response preview");
|
2024-01-09 02:21:32 +00:00
|
|
|
responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 10;
|
2024-01-28 02:22:47 +00:00
|
|
|
responsePreviewDelay = VSConfig.get("preview delay") || 0; // Must be || 0 instead of || [default] because of truthy
|
|
|
|
continueInline = VSConfig.get("continue inline");
|
2023-12-21 00:27:42 +00:00
|
|
|
|
|
|
|
if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined;
|
|
|
|
}
|
|
|
|
|
|
|
|
updateVSConfig();
|
2023-12-20 09:36:55 +00:00
|
|
|
|
2023-12-21 00:27:42 +00:00
|
|
|
// No need for restart for any of these settings
|
|
|
|
vscode.workspace.onDidChangeConfiguration(updateVSConfig);
|
2023-12-20 09:36:55 +00:00
|
|
|
|
2023-12-21 06:33:07 +00:00
|
|
|
// internal function for autocomplete, not directly exposed
|
2023-12-27 01:04:50 +00:00
|
|
|
async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) {
|
|
|
|
const document = textEditor.document;
|
|
|
|
const position = textEditor.selection.active;
|
|
|
|
|
2023-12-21 06:33:07 +00:00
|
|
|
// Get the current prompt
|
|
|
|
let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position));
|
|
|
|
prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length);
|
|
|
|
|
2023-12-20 22:06:47 +00:00
|
|
|
// Show a progress message
|
|
|
|
vscode.window.withProgress(
|
|
|
|
{
|
|
|
|
location: vscode.ProgressLocation.Notification,
|
2023-12-21 00:27:42 +00:00
|
|
|
title: "Ollama Autocoder",
|
2023-12-20 22:06:47 +00:00
|
|
|
cancellable: true,
|
|
|
|
},
|
|
|
|
async (progress, progressCancellationToken) => {
|
|
|
|
try {
|
2023-12-21 00:27:42 +00:00
|
|
|
progress.report({ message: "Starting model..." });
|
|
|
|
|
2023-12-27 01:04:50 +00:00
|
|
|
let axiosCancelPost: () => void;
|
|
|
|
const axiosCancelToken = new axios.CancelToken((c) => {
|
|
|
|
const cancelPost = function () {
|
|
|
|
c("Autocompletion request terminated by user cancel");
|
|
|
|
};
|
|
|
|
axiosCancelPost = cancelPost;
|
|
|
|
if (cancellationToken) cancellationToken.onCancellationRequested(cancelPost);
|
|
|
|
progressCancellationToken.onCancellationRequested(cancelPost);
|
|
|
|
vscode.workspace.onDidCloseTextDocument(cancelPost);
|
|
|
|
});
|
|
|
|
|
2023-12-20 22:06:47 +00:00
|
|
|
// Make a request to the ollama.ai REST API
|
|
|
|
const response = await axios.post(apiEndpoint, {
|
|
|
|
model: apiModel, // Change this to the model you want to use
|
|
|
|
prompt: prompt,
|
|
|
|
stream: true,
|
|
|
|
system: apiSystemMessage,
|
2023-12-21 00:27:42 +00:00
|
|
|
raw: rawInput,
|
2023-12-20 22:06:47 +00:00
|
|
|
options: {
|
|
|
|
num_predict: numPredict
|
2023-12-21 00:27:42 +00:00
|
|
|
}
|
2023-12-20 22:06:47 +00:00
|
|
|
}, {
|
2023-12-27 01:04:50 +00:00
|
|
|
cancelToken: axiosCancelToken,
|
2023-12-20 22:06:47 +00:00
|
|
|
responseType: 'stream'
|
|
|
|
}
|
|
|
|
);
|
|
|
|
|
|
|
|
//tracker
|
2023-12-27 01:04:50 +00:00
|
|
|
let oldPosition = position;
|
2023-12-20 22:06:47 +00:00
|
|
|
let currentPosition = position;
|
2023-12-27 01:04:50 +00:00
|
|
|
let lastToken = "";
|
|
|
|
|
2023-12-20 22:06:47 +00:00
|
|
|
|
|
|
|
response.data.on('data', async (d: Uint8Array) => {
|
2023-12-21 00:27:42 +00:00
|
|
|
progress.report({ message: "Generating..." });
|
|
|
|
|
2023-12-27 01:04:50 +00:00
|
|
|
// Check for user input (cancel)
|
|
|
|
if (lastToken != "") {
|
|
|
|
const lastInput = document.getText(new vscode.Range(oldPosition, textEditor.selection.active));
|
|
|
|
if (lastInput !== lastToken) {
|
|
|
|
axiosCancelPost(); // cancel axios => cancel finished promise => close notification
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-20 22:06:47 +00:00
|
|
|
// Get a completion from the response
|
|
|
|
const completion: string = JSON.parse(d.toString()).response;
|
2023-12-27 01:04:50 +00:00
|
|
|
lastToken = completion;
|
2023-12-20 22:06:47 +00:00
|
|
|
|
|
|
|
//complete edit for token
|
|
|
|
const edit = new vscode.WorkspaceEdit();
|
2023-12-21 02:22:39 +00:00
|
|
|
const range = new vscode.Position(
|
2023-12-20 22:06:47 +00:00
|
|
|
currentPosition.line,
|
|
|
|
currentPosition.character
|
|
|
|
);
|
2023-12-21 02:22:39 +00:00
|
|
|
edit.insert(document.uri, range, completion);
|
2023-12-20 22:06:47 +00:00
|
|
|
await vscode.workspace.applyEdit(edit);
|
|
|
|
|
|
|
|
// Move the cursor to the end of the completion
|
|
|
|
const completionLines = completion.split("\n");
|
2023-12-21 02:22:39 +00:00
|
|
|
const newPosition = new vscode.Position(
|
|
|
|
currentPosition.line + completionLines.length - 1,
|
|
|
|
(completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length
|
2023-12-20 22:06:47 +00:00
|
|
|
);
|
|
|
|
const newSelection = new vscode.Selection(
|
|
|
|
newPosition,
|
|
|
|
newPosition
|
|
|
|
);
|
2023-12-27 01:04:50 +00:00
|
|
|
oldPosition = currentPosition;
|
2023-12-20 22:06:47 +00:00
|
|
|
currentPosition = newPosition;
|
|
|
|
|
|
|
|
// completion bar
|
2023-12-21 02:22:39 +00:00
|
|
|
progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) });
|
2023-12-20 22:06:47 +00:00
|
|
|
|
|
|
|
// move cursor
|
2023-12-21 06:33:07 +00:00
|
|
|
const editor = vscode.window.activeTextEditor;
|
|
|
|
if (editor) editor.selection = newSelection;
|
2023-12-20 22:06:47 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
// Keep cancel window available
|
|
|
|
const finished = new Promise((resolve) => {
|
|
|
|
response.data.on('end', () => {
|
|
|
|
progress.report({ message: "Ollama completion finished." });
|
|
|
|
resolve(true);
|
|
|
|
});
|
2023-12-27 01:04:50 +00:00
|
|
|
axiosCancelToken.promise.finally(() => { // prevent notification from freezing on user input cancel
|
|
|
|
resolve(false);
|
|
|
|
});
|
2023-12-20 22:06:47 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
await finished;
|
|
|
|
|
|
|
|
} catch (err: any) {
|
|
|
|
// Show an error message
|
|
|
|
vscode.window.showErrorMessage(
|
|
|
|
"Ollama encountered an error: " + err.message
|
|
|
|
);
|
2023-12-21 00:27:42 +00:00
|
|
|
console.log(err);
|
2023-12-20 22:06:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-12-21 00:27:42 +00:00
|
|
|
// This method is called when extension is activated
|
2023-12-20 09:36:55 +00:00
|
|
|
function activate(context: vscode.ExtensionContext) {
|
|
|
|
// Register a completion provider for JavaScript files
|
2023-12-21 06:33:07 +00:00
|
|
|
const completionProvider = vscode.languages.registerCompletionItemProvider("*", {
|
2024-01-06 23:33:17 +00:00
|
|
|
async provideCompletionItems(document, position, cancellationToken) {
|
|
|
|
|
2023-12-21 06:33:07 +00:00
|
|
|
// Create a completion item
|
|
|
|
const item = new vscode.CompletionItem("Autocomplete with Ollama");
|
2024-01-06 23:33:17 +00:00
|
|
|
|
2023-12-21 06:33:07 +00:00
|
|
|
// Set the insert text to a placeholder
|
|
|
|
item.insertText = new vscode.SnippetString('${1:}');
|
2024-01-06 23:33:17 +00:00
|
|
|
|
2024-01-28 02:22:47 +00:00
|
|
|
// Wait before initializing Ollama to reduce compute usage
|
|
|
|
if (responsePreview) await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000));
|
|
|
|
if (cancellationToken.isCancellationRequested) {
|
|
|
|
return [ item ];
|
|
|
|
}
|
|
|
|
|
2024-01-06 23:33:17 +00:00
|
|
|
// Set the label & inset text to a shortened, non-stream response
|
|
|
|
if (responsePreview) {
|
|
|
|
let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position));
|
|
|
|
prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length);
|
|
|
|
const response_preview = await axios.post(apiEndpoint, {
|
|
|
|
model: apiModel, // Change this to the model you want to use
|
|
|
|
prompt: prompt,
|
|
|
|
stream: false,
|
|
|
|
system: apiSystemMessage,
|
2024-01-09 02:21:32 +00:00
|
|
|
raw: rawInput,
|
2024-01-06 23:33:17 +00:00
|
|
|
options: {
|
2024-01-09 02:21:32 +00:00
|
|
|
num_predict: responsePreviewMaxTokens, // reduced compute max
|
2024-01-06 23:33:17 +00:00
|
|
|
stop: ['\n']
|
|
|
|
}
|
|
|
|
}, {
|
|
|
|
cancelToken: new axios.CancelToken((c) => {
|
|
|
|
const cancelPost = function () {
|
|
|
|
c("Autocompletion request terminated by completion cancel");
|
|
|
|
};
|
|
|
|
cancellationToken.onCancellationRequested(cancelPost);
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
if (response_preview.data.response.trim() != "") { // default if empty
|
|
|
|
item.label = response_preview.data.response.trimStart(); // tended to add whitespace at the beginning
|
|
|
|
item.insertText = response_preview.data.response.trimStart();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-21 06:33:07 +00:00
|
|
|
// Set the documentation to a message
|
2024-01-06 23:33:17 +00:00
|
|
|
item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Ollama');
|
2023-12-21 06:33:07 +00:00
|
|
|
// Set the command to trigger the completion
|
2024-01-28 02:22:47 +00:00
|
|
|
if (continueInline || !responsePreview) item.command = {
|
2023-12-27 01:04:50 +00:00
|
|
|
command: 'ollama-autocoder.autocomplete',
|
|
|
|
title: 'Autocomplete with Ollama',
|
|
|
|
arguments: [cancellationToken]
|
2023-12-21 06:33:07 +00:00
|
|
|
};
|
|
|
|
// Return the completion item
|
|
|
|
return [item];
|
2023-12-20 09:36:55 +00:00
|
|
|
},
|
|
|
|
},
|
2024-01-06 21:49:41 +00:00
|
|
|
...completionKeys.split("")
|
2023-12-20 09:36:55 +00:00
|
|
|
);
|
|
|
|
|
2023-12-21 06:33:07 +00:00
|
|
|
// Register a command for getting a completion from Ollama through command/keybind
|
|
|
|
const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand(
|
|
|
|
"ollama-autocoder.autocomplete",
|
2023-12-27 01:04:50 +00:00
|
|
|
(textEditor, _, cancellationToken?) => {
|
|
|
|
// no cancellation token from here, but there is one from completionProvider
|
|
|
|
autocompleteCommand(textEditor, cancellationToken);
|
2023-12-21 06:33:07 +00:00
|
|
|
}
|
|
|
|
);
|
|
|
|
|
2023-12-21 06:43:22 +00:00
|
|
|
// Add the commands & completion provider to the context
|
|
|
|
context.subscriptions.push(completionProvider);
|
2023-12-21 06:33:07 +00:00
|
|
|
context.subscriptions.push(externalAutocompleteCommand);
|
|
|
|
|
2023-12-20 09:36:55 +00:00
|
|
|
}
|
|
|
|
|
2023-12-21 00:27:42 +00:00
|
|
|
// This method is called when extension is deactivated
|
2023-12-20 09:36:55 +00:00
|
|
|
function deactivate() { }
|
|
|
|
|
|
|
|
module.exports = {
|
|
|
|
activate,
|
|
|
|
deactivate,
|
|
|
|
};
|