diff --git a/README.md b/README.md index dc16469..318ea8a 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,6 @@ A simple to use Ollama autocompletion engine with options exposed and streaming - Ollama must be serving on the API endpoint applied in settings - For installation of Ollama, visit [ollama.ai](https://ollama.ai) - Ollama must have the model applied in settings installed. -- For fastest results, an Nvidia GPU or Apple Silicon is recommended. CPU still works on small models. ## How to Use @@ -15,3 +14,9 @@ A simple to use Ollama autocompletion engine with options exposed and streaming 2. After startup, the tokens will be streamed to your cursor. 3. To stop the generation early, press the "Cancel" button on the "Ollama Autocoder" notification 4. Once generation stops, the notification will disappear. + +## Notes + +- For fastest results, an Nvidia GPU or Apple Silicon is recommended. CPU still works on small models. +- The prompt only sees behind the cursor. The model is unaware of text in front of its position. + \ No newline at end of file diff --git a/package.json b/package.json index 5fe1215..50ebec4 100644 --- a/package.json +++ b/package.json @@ -1,8 +1,8 @@ { "name": "ollama-autocoder", - "displayName": "Ollama Autocoder", + "displayName": "Ollama Autocode", "description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality", - "version": "0.0.1", + "version": "0.0.2", "icon": "icon.png", "publisher": "10nates", "license": "MIT", @@ -50,7 +50,7 @@ "ollama-autocoder.raw-input": { "type": "boolean", "default": false, - "description": "Prompt the model without formatting. Disables system message." + "description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode." }, "ollama-autocoder.system-message": { "type": "string", @@ -66,6 +66,11 @@ "type": "integer", "default": 2000, "description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)." + }, + "ollama-autocoder.cursor-follows": { + "type": "boolean", + "default": true, + "description": "The user's cursor will follow along with the generation. Disabling this can cause unintended effects when typing above/in front of the generation point, but could boost user productivity." } } } diff --git a/src/extension.ts b/src/extension.ts index e1a58ac..0b66404 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -10,6 +10,7 @@ let apiSystemMessage: string | undefined; let numPredict: number; let promptWindowSize: number; let rawInput: boolean; +let cursorFollows: boolean | undefined; function updateVSConfig() { VSConfig = vscode.workspace.getConfiguration("ollama-autocoder"); @@ -19,6 +20,7 @@ function updateVSConfig() { numPredict = VSConfig.get("max-tokens-predicted") || 500; promptWindowSize = VSConfig.get("prompt-window-size") || 2000; rawInput = VSConfig.get("raw-input") || false; + cursorFollows = VSConfig.get("cursor-follows"); if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined; } @@ -75,20 +77,18 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco //complete edit for token const edit = new vscode.WorkspaceEdit(); - const range = new vscode.Range( - currentPosition.line, - currentPosition.character, + const range = new vscode.Position( currentPosition.line, currentPosition.character ); - edit.replace(document.uri, range, completion); + edit.insert(document.uri, range, completion); await vscode.workspace.applyEdit(edit); // Move the cursor to the end of the completion const completionLines = completion.split("\n"); - const newPosition = position.with( - currentPosition.line + completionLines.length, - (completionLines.length > 0 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length + const newPosition = new vscode.Position( + currentPosition.line + completionLines.length - 1, + (completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length ); const newSelection = new vscode.Selection( newPosition, @@ -97,11 +97,13 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco currentPosition = newPosition; // completion bar - progress.report({ message: "Generating...", increment: 1 / (numPredict/100) }); + progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) }); // move cursor - const editor = vscode.window.activeTextEditor; - if (editor) editor.selection = newSelection; + if (cursorFollows) { + const editor = vscode.window.activeTextEditor; + if (editor) editor.selection = newSelection; + } }); // Keep cancel window available