fixed improper indentation and overwriting when inserting in the middle of preexisting code, clarified some information, added cursor-follows option.

This commit is contained in:
Nathan Hedge 2023-12-20 20:22:39 -06:00
parent 773a5a52cd
commit c68676e794
No known key found for this signature in database
GPG Key ID: 1ADBA36D6E304C5C
3 changed files with 26 additions and 14 deletions

View File

@ -7,7 +7,6 @@ A simple to use Ollama autocompletion engine with options exposed and streaming
- Ollama must be serving on the API endpoint applied in settings - Ollama must be serving on the API endpoint applied in settings
- For installation of Ollama, visit [ollama.ai](https://ollama.ai) - For installation of Ollama, visit [ollama.ai](https://ollama.ai)
- Ollama must have the model applied in settings installed. - Ollama must have the model applied in settings installed.
- For fastest results, an Nvidia GPU or Apple Silicon is recommended. CPU still works on small models.
## How to Use ## How to Use
@ -15,3 +14,9 @@ A simple to use Ollama autocompletion engine with options exposed and streaming
2. After startup, the tokens will be streamed to your cursor. 2. After startup, the tokens will be streamed to your cursor.
3. To stop the generation early, press the "Cancel" button on the "Ollama Autocoder" notification 3. To stop the generation early, press the "Cancel" button on the "Ollama Autocoder" notification
4. Once generation stops, the notification will disappear. 4. Once generation stops, the notification will disappear.
## Notes
- For fastest results, an Nvidia GPU or Apple Silicon is recommended. CPU still works on small models.
- The prompt only sees behind the cursor. The model is unaware of text in front of its position.

View File

@ -1,8 +1,8 @@
{ {
"name": "ollama-autocoder", "name": "ollama-autocoder",
"displayName": "Ollama Autocoder", "displayName": "Ollama Autocode",
"description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality", "description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality",
"version": "0.0.1", "version": "0.0.2",
"icon": "icon.png", "icon": "icon.png",
"publisher": "10nates", "publisher": "10nates",
"license": "MIT", "license": "MIT",
@ -50,7 +50,7 @@
"ollama-autocoder.raw-input": { "ollama-autocoder.raw-input": {
"type": "boolean", "type": "boolean",
"default": false, "default": false,
"description": "Prompt the model without formatting. Disables system message." "description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode."
}, },
"ollama-autocoder.system-message": { "ollama-autocoder.system-message": {
"type": "string", "type": "string",
@ -66,6 +66,11 @@
"type": "integer", "type": "integer",
"default": 2000, "default": 2000,
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)." "description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
},
"ollama-autocoder.cursor-follows": {
"type": "boolean",
"default": true,
"description": "The user's cursor will follow along with the generation. Disabling this can cause unintended effects when typing above/in front of the generation point, but could boost user productivity."
} }
} }
} }

View File

@ -10,6 +10,7 @@ let apiSystemMessage: string | undefined;
let numPredict: number; let numPredict: number;
let promptWindowSize: number; let promptWindowSize: number;
let rawInput: boolean; let rawInput: boolean;
let cursorFollows: boolean | undefined;
function updateVSConfig() { function updateVSConfig() {
VSConfig = vscode.workspace.getConfiguration("ollama-autocoder"); VSConfig = vscode.workspace.getConfiguration("ollama-autocoder");
@ -19,6 +20,7 @@ function updateVSConfig() {
numPredict = VSConfig.get("max-tokens-predicted") || 500; numPredict = VSConfig.get("max-tokens-predicted") || 500;
promptWindowSize = VSConfig.get("prompt-window-size") || 2000; promptWindowSize = VSConfig.get("prompt-window-size") || 2000;
rawInput = VSConfig.get("raw-input") || false; rawInput = VSConfig.get("raw-input") || false;
cursorFollows = VSConfig.get("cursor-follows");
if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined; if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined;
} }
@ -75,20 +77,18 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco
//complete edit for token //complete edit for token
const edit = new vscode.WorkspaceEdit(); const edit = new vscode.WorkspaceEdit();
const range = new vscode.Range( const range = new vscode.Position(
currentPosition.line,
currentPosition.character,
currentPosition.line, currentPosition.line,
currentPosition.character currentPosition.character
); );
edit.replace(document.uri, range, completion); edit.insert(document.uri, range, completion);
await vscode.workspace.applyEdit(edit); await vscode.workspace.applyEdit(edit);
// Move the cursor to the end of the completion // Move the cursor to the end of the completion
const completionLines = completion.split("\n"); const completionLines = completion.split("\n");
const newPosition = position.with( const newPosition = new vscode.Position(
currentPosition.line + completionLines.length, currentPosition.line + completionLines.length - 1,
(completionLines.length > 0 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length (completionLines.length > 1 ? 0 : currentPosition.character) + completionLines[completionLines.length - 1].length
); );
const newSelection = new vscode.Selection( const newSelection = new vscode.Selection(
newPosition, newPosition,
@ -97,11 +97,13 @@ async function autocompleteCommand(document: vscode.TextDocument, position: vsco
currentPosition = newPosition; currentPosition = newPosition;
// completion bar // completion bar
progress.report({ message: "Generating...", increment: 1 / (numPredict/100) }); progress.report({ message: "Generating...", increment: 1 / (numPredict / 100) });
// move cursor // move cursor
if (cursorFollows) {
const editor = vscode.window.activeTextEditor; const editor = vscode.window.activeTextEditor;
if (editor) editor.selection = newSelection; if (editor) editor.selection = newSelection;
}
}); });
// Keep cancel window available // Keep cancel window available