made settings prettier and also broke whatever settings were currently set. Had to do it at some point

This commit is contained in:
Nathan Hedge 2023-12-21 00:54:03 -06:00
parent 259730806f
commit e4ee5d6dd3
No known key found for this signature in database
GPG Key ID: 1ADBA36D6E304C5C
2 changed files with 8 additions and 8 deletions

View File

@ -47,22 +47,22 @@
"default": "openhermes2.5-mistral:7b-q4_K_M", "default": "openhermes2.5-mistral:7b-q4_K_M",
"description": "The model to use for generating completions" "description": "The model to use for generating completions"
}, },
"ollama-autocoder.raw-input": { "ollama-autocoder.raw input": {
"type": "boolean", "type": "boolean",
"default": false, "default": false,
"description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode." "description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode."
}, },
"ollama-autocoder.system-message": { "ollama-autocoder.system message": {
"type": "string", "type": "string",
"default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.", "default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.",
"description": "The system message to use for code completions. Type DEFAULT for Makefile." "description": "The system message to use for code completions. Type DEFAULT for Makefile."
}, },
"ollama-autocoder.max-tokens-predicted": { "ollama-autocoder.max tokens predicted": {
"type": "integer", "type": "integer",
"default": 500, "default": 500,
"description": "The maximum number of tokens generated by the model." "description": "The maximum number of tokens generated by the model."
}, },
"ollama-autocoder.prompt-window-size": { "ollama-autocoder.prompt window size": {
"type": "integer", "type": "integer",
"default": 2000, "default": 2000,
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)." "description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."

View File

@ -15,10 +15,10 @@ function updateVSConfig() {
VSConfig = vscode.workspace.getConfiguration("ollama-autocoder"); VSConfig = vscode.workspace.getConfiguration("ollama-autocoder");
apiEndpoint = VSConfig.get("apiEndpoint") || "http://localhost:11434/api/generate"; apiEndpoint = VSConfig.get("apiEndpoint") || "http://localhost:11434/api/generate";
apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with
apiSystemMessage = VSConfig.get("system-message"); apiSystemMessage = VSConfig.get("system message");
numPredict = VSConfig.get("max-tokens-predicted") || 500; numPredict = VSConfig.get("max tokens predicted") || 500;
promptWindowSize = VSConfig.get("prompt-window-size") || 2000; promptWindowSize = VSConfig.get("prompt window size") || 2000;
rawInput = VSConfig.get("raw-input"); rawInput = VSConfig.get("raw input");
if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined; if (apiSystemMessage == "DEFAULT" || rawInput) apiSystemMessage = undefined;
} }