settings only update on config change, better progress reporting, changed name, added README and icon, added categories & keywords, changed activation event

This commit is contained in:
Nathan Hedge 2023-12-20 18:27:42 -06:00
parent fd0f553738
commit 4f26bf3af3
No known key found for this signature in database
GPG Key ID: 1ADBA36D6E304C5C
4 changed files with 110 additions and 53 deletions

View File

@ -1,3 +1,17 @@
# Ollama Coder # Ollama Autocoder
An ollama-based autocompletion engine. A simple to use Ollama autocompletion engine with options exposed.
## Requirements
- Ollama must be serving on the API endpoint applied in settings
- For installation of Ollama, visit [ollama.ai](https://ollama.ai)
- Ollama must have the model applied in settings installed.
- For fastest results, an Nvidia GPU or Apple Silicon is recommended. CPU still works on small models.
## How to Use
1. In a text document, press space or go to a new line. The option `Autocomplete with Ollama` will appear. Press enter to start generation.
2. After startup, the tokens will be streamed to your cursor.
3. To stop the generation early, press the "Cancel" button on the "Ollama Autocoder" notification
4. Once generation stops, the notification will disappear.

BIN
icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 188 KiB

View File

@ -1,55 +1,75 @@
{ {
"name": "ollama-coder", "name": "ollama-autocoder",
"displayName": "Ollama Coder", "displayName": "Ollama Autocoder",
"description": "An Ollama autocompletion engine with options exposed",
"version": "0.0.1", "version": "0.0.1",
"icon": "icon.png",
"publisher": "10nates", "publisher": "10nates",
"license": "MIT", "license": "MIT",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "https://github.com/10Nates/ollama-coder" "url": "https://github.com/10Nates/ollama-autocoder"
}, },
"engines": { "engines": {
"vscode": "^1.73.0" "vscode": "^1.73.0"
}, },
"categories": [ "categories": [
"Other" "Machine Learning",
"Snippets",
"Programming Languages"
],
"keywords": [
"llama",
"ollama",
"gpt",
"coding",
"autocomplete",
"open source",
"assistant",
"ai",
"llm"
], ],
"activationEvents": [ "activationEvents": [
"*" "onStartupFinished"
], ],
"main": "./out/extension.js", "main": "./out/extension.js",
"contributes": { "contributes": {
"configuration": { "configuration": {
"title": "Ollama Coder", "title": "Ollama Autocoder",
"properties": { "properties": {
"ollama-coder.endpoint": { "ollama-autocoder.endpoint": {
"type": "string", "type": "string",
"default": "http://localhost:11434/api/generate", "default": "http://localhost:11434/api/generate",
"description": "The endpoint of the ollama REST API" "description": "The endpoint of the ollama REST API"
}, },
"ollama-coder.model": { "ollama-autocoder.model": {
"type": "string", "type": "string",
"default": "deepseek-coder", "default": "openhermes2.5-mistral:7b-q4_K_M",
"description": "The model to use for generating completions" "description": "The model to use for generating completions"
}, },
"ollama-coder.system-message": { "ollama-autocoder.raw-input": {
"type": "string", "type": "boolean",
"default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.", "default": false,
"description": "The system message to use for code completions. Type DEFAULT for Makefile." "description": "Prompt the model without formatting. Disables system message."
}, },
"ollama-coder.max-tokens-predicted": { "ollama-autocoder.system-message": {
"type": "integer", "type": "string",
"default": 500, "default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.",
"description": "The maximum number of tokens generated by the model." "description": "The system message to use for code completions. Type DEFAULT for Makefile."
}, },
"ollama-coder.prompt-window-size": { "ollama-autocoder.max-tokens-predicted": {
"type": "integer", "type": "integer",
"default": 2000, "default": 500,
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)." "description": "The maximum number of tokens generated by the model."
} },
} "ollama-autocoder.prompt-window-size": {
"type": "integer",
"default": 2000,
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
}
}
} }
}, },
"scripts": { "scripts": {
"vscode:prepublish": "npm run compile", "vscode:prepublish": "npm run compile",
"compile": "tsc -p ./", "compile": "tsc -p ./",
@ -67,4 +87,4 @@
"dependencies": { "dependencies": {
"axios": "^1.6.2" "axios": "^1.6.2"
} }
} }

View File

@ -1,36 +1,56 @@
// Significant help from GPT4 // Significant help from GPT4
import * as vscode from "vscode"; import * as vscode from "vscode";
import axios, { AxiosResponse } from "axios"; import axios from "axios";
const VSConfig = vscode.workspace.getConfiguration("ollama-coder"); let VSConfig: vscode.WorkspaceConfiguration;
const apiEndpoint: string = VSConfig.get("apiEndpoint") || "http://localhost:11434/api/generate"; let apiEndpoint: string;
const apiModel: string = VSConfig.get("model") || "deepseek-coder"; let apiModel: string;
let apiSystemMessage: string | undefined = VSConfig.get("system-message"); let apiSystemMessage: string | undefined;
if (apiSystemMessage == "DEFAULT") apiSystemMessage = undefined; let numPredict: number;
const numPredict: number = VSConfig.get("max-tokens-predicted") || 500; let promptWindowSize: number;
const promptWindowSize: number = VSConfig.get("prompt-window-size") || 2000; let rawInput: boolean;
// Function called on ollama-coder.autocomplete function updateVSConfig() {
VSConfig = vscode.workspace.getConfiguration("ollama-autocoder");
apiEndpoint = VSConfig.get("apiEndpoint") || "http://localhost:11434/api/generate";
apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M";
apiSystemMessage = VSConfig.get("system-message");
numPredict = VSConfig.get("max-tokens-predicted") || 500;
promptWindowSize = VSConfig.get("prompt-window-size") || 2000;
rawInput = VSConfig.get("raw-input") || false;
if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined;
}
updateVSConfig();
// No need for restart for any of these settings
vscode.workspace.onDidChangeConfiguration(updateVSConfig);
// Function called on ollama-autocoder.autocomplete
async function autocompleteCommand(document: vscode.TextDocument, position: vscode.Position, prompt: string, cancellationToken: vscode.CancellationToken) { async function autocompleteCommand(document: vscode.TextDocument, position: vscode.Position, prompt: string, cancellationToken: vscode.CancellationToken) {
// Show a progress message // Show a progress message
vscode.window.withProgress( vscode.window.withProgress(
{ {
location: vscode.ProgressLocation.Notification, location: vscode.ProgressLocation.Notification,
title: "Getting a completion from Ollama...", title: "Ollama Autocoder",
cancellable: true, cancellable: true,
}, },
async (progress, progressCancellationToken) => { async (progress, progressCancellationToken) => {
try { try {
progress.report({ message: "Starting model..." });
// Make a request to the ollama.ai REST API // Make a request to the ollama.ai REST API
const response = await axios.post(apiEndpoint, { const response = await axios.post(apiEndpoint, {
model: apiModel, // Change this to the model you want to use model: apiModel, // Change this to the model you want to use
prompt: prompt, prompt: prompt,
stream: true, stream: true,
system: apiSystemMessage, system: apiSystemMessage,
raw: rawInput,
options: { options: {
num_predict: numPredict num_predict: numPredict
}, }
}, { }, {
cancelToken: new axios.CancelToken((c) => { cancelToken: new axios.CancelToken((c) => {
const cancelPost = function () { const cancelPost = function () {
@ -48,6 +68,8 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco
let currentPosition = position; let currentPosition = position;
response.data.on('data', async (d: Uint8Array) => { response.data.on('data', async (d: Uint8Array) => {
progress.report({ message: "Generating..." });
// Get a completion from the response // Get a completion from the response
const completion: string = JSON.parse(d.toString()).response; const completion: string = JSON.parse(d.toString()).response;
@ -75,7 +97,7 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco
currentPosition = newPosition; currentPosition = newPosition;
// completion bar // completion bar
progress.report({ increment: 1 / (numPredict/100) }); progress.report({ message: "Generating...", increment: 1 / (numPredict/100) });
// move cursor // move cursor
const editor = vscode.window.activeTextEditor; const editor = vscode.window.activeTextEditor;
@ -97,12 +119,13 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco
vscode.window.showErrorMessage( vscode.window.showErrorMessage(
"Ollama encountered an error: " + err.message "Ollama encountered an error: " + err.message
); );
console.log(err);
} }
} }
); );
} }
// This method is called when your extension is activated // This method is called when extension is activated
function activate(context: vscode.ExtensionContext) { function activate(context: vscode.ExtensionContext) {
// Register a completion provider for JavaScript files // Register a completion provider for JavaScript files
const provider = vscode.languages.registerCompletionItemProvider("*", { const provider = vscode.languages.registerCompletionItemProvider("*", {
@ -120,7 +143,7 @@ function activate(context: vscode.ExtensionContext) {
item.documentation = new vscode.MarkdownString('Press `Enter` to get a completion from Ollama'); item.documentation = new vscode.MarkdownString('Press `Enter` to get a completion from Ollama');
// Set the command to trigger the completion // Set the command to trigger the completion
item.command = { item.command = {
command: 'ollama-coder.autocomplete', command: 'ollama-autocoder.autocomplete',
title: 'Ollama', title: 'Ollama',
arguments: [document, position, prompt, cancellationToken] arguments: [document, position, prompt, cancellationToken]
}; };
@ -137,7 +160,7 @@ function activate(context: vscode.ExtensionContext) {
// Register a command for getting a completion from Ollama // Register a command for getting a completion from Ollama
const disposable = vscode.commands.registerCommand( const disposable = vscode.commands.registerCommand(
"ollama-coder.autocomplete", "ollama-autocoder.autocomplete",
autocompleteCommand autocompleteCommand
); );
@ -145,7 +168,7 @@ function activate(context: vscode.ExtensionContext) {
context.subscriptions.push(disposable); context.subscriptions.push(disposable);
} }
// This method is called when your extension is deactivated // This method is called when extension is deactivated
function deactivate() { } function deactivate() { }
module.exports = { module.exports = {