new update to be able to replace the prompt with text from the document

This commit is contained in:
Falko Victor Habel 2024-07-04 08:46:02 +02:00
parent 71319b43ad
commit 68b7eda25c
2 changed files with 47 additions and 39 deletions

View File

@ -2,7 +2,7 @@
"name": "fabelous-autocoder",
"displayName": "Fabelous Autocoder",
"description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality",
"version": "0.0.35",
"version": "0.0.4",
"icon": "icon.png",
"publisher": "fabel",
"license": "CC BY-ND 4.0",

View File

@ -51,42 +51,50 @@ function messageHeaderSub(document: vscode.TextDocument) {
}
// internal function for autocomplete, not directly exposed
async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) {
const document = textEditor.document;
const position = textEditor.selection.active;
// Get the current prompt
let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position));
prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length);
// Show a progress message
vscode.window.withProgress(
{
location: vscode.ProgressLocation.Notification,
title: "Ollama Autocoder",
cancellable: true,
},
async (progress, progressCancellationToken) => {
try {
progress.report({ message: "Starting model..." });
let axiosCancelPost: () => void;
const axiosCancelToken = new axios.CancelToken((c) => {
const cancelPost = function () {
c("Autocompletion request terminated by user cancel");
};
axiosCancelPost = cancelPost;
if (cancellationToken) cancellationToken.onCancellationRequested(cancelPost);
progressCancellationToken.onCancellationRequested(cancelPost);
vscode.workspace.onDidCloseTextDocument(cancelPost);
});
// Make a request to the ollama.ai REST API
const response = await axios.post(apiEndpoint, {
model: apiModel, // Change this to the model you want to use
prompt: messageHeaderSub(textEditor.document) + prompt,
stream: true,
raw: true,
options: {
num_predict: numPredict,
temperature: apiTemperature,
stop: ["```"]
}
const document = textEditor.document;
const position = textEditor.selection.active;
// Get the current prompt
let prompt = document.getText(new vscode.Range(document.lineAt(0).range.start, position));
prompt = prompt.substring(Math.max(0, prompt.length - promptWindowSize), prompt.length);
// Replace {Prompt} with the extracted text from the document
const sub = messageHeaderSub(document).replace("{Prompt}", prompt);
// Show a progress message
vscode.window.withProgress(
{
location: vscode.ProgressLocation.Notification,
title: "Ollama Autocoder",
cancellable: true,
},
async (progress, progressCancellationToken) => {
try {
progress.report({ message: "Starting model..." });
let axiosCancelPost: () => void;
const axiosCancelToken = new axios.CancelToken((c) => {
const cancelPost = function () {
c("Autocompletion request terminated by user cancel");
};
axiosCancelPost = cancelPost;
if (cancellationToken) cancellationToken.onCancellationRequested(cancelPost);
progressCancellationToken.onCancellationRequested(cancelPost);
vscode.workspace.onDidCloseTextDocument(cancelPost);
});
// Make a request to the ollama.ai REST API
const response = await axios.post(apiEndpoint, {
model: apiModel, // Change this to the model you want to use
prompt: sub, // Use the modified sub string with the replaced prompt
stream: true,
raw: true,
options: {
num_predict: numPredict,
temperature: apiTemperature,
stop: ["```"]
}
}, {
cancelToken: axiosCancelToken,
responseType: 'stream',
@ -135,7 +143,7 @@ async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationTo
// Keep cancel window available
const finished = new Promise((resolve) => {
response.data.on('end', () => {
progress.report({ message: "Ollama completion finished." });
progress.report({ message: "Fabelous completion finished." });
resolve(true);
});
axiosCancelToken.promise.finally(() => { // prevent notification from freezing on user input cancel
@ -146,7 +154,7 @@ async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationTo
} catch (err: any) {
// Show an error message
vscode.window.showErrorMessage(
"Ollama encountered an error: " + err.message
"Fabelous Autocoder encountered an error: " + err.message
);
console.log(err);
}