From e4ee5d6dd3f14c182312bd6b6d3134ea6e09dd46 Mon Sep 17 00:00:00 2001 From: Nathan Hedge <23344786+10Nates@users.noreply.github.com> Date: Thu, 21 Dec 2023 00:54:03 -0600 Subject: [PATCH] made settings prettier and also broke whatever settings were currently set. Had to do it at some point --- package.json | 8 ++++---- src/extension.ts | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/package.json b/package.json index 631718b..8618efc 100644 --- a/package.json +++ b/package.json @@ -47,22 +47,22 @@ "default": "openhermes2.5-mistral:7b-q4_K_M", "description": "The model to use for generating completions" }, - "ollama-autocoder.raw-input": { + "ollama-autocoder.raw input": { "type": "boolean", "default": false, "description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode." }, - "ollama-autocoder.system-message": { + "ollama-autocoder.system message": { "type": "string", "default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.", "description": "The system message to use for code completions. Type DEFAULT for Makefile." }, - "ollama-autocoder.max-tokens-predicted": { + "ollama-autocoder.max tokens predicted": { "type": "integer", "default": 500, "description": "The maximum number of tokens generated by the model." }, - "ollama-autocoder.prompt-window-size": { + "ollama-autocoder.prompt window size": { "type": "integer", "default": 2000, "description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)." diff --git a/src/extension.ts b/src/extension.ts index 7ee15ce..76caf97 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -15,10 +15,10 @@ function updateVSConfig() { VSConfig = vscode.workspace.getConfiguration("ollama-autocoder"); apiEndpoint = VSConfig.get("apiEndpoint") || "http://localhost:11434/api/generate"; apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with - apiSystemMessage = VSConfig.get("system-message"); - numPredict = VSConfig.get("max-tokens-predicted") || 500; - promptWindowSize = VSConfig.get("prompt-window-size") || 2000; - rawInput = VSConfig.get("raw-input"); + apiSystemMessage = VSConfig.get("system message"); + numPredict = VSConfig.get("max tokens predicted") || 500; + promptWindowSize = VSConfig.get("prompt window size") || 2000; + rawInput = VSConfig.get("raw input"); if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined; }