removed all unnessasarry input fields
This commit is contained in:
parent
dc33d4bb85
commit
077f18c2f9
70
package.json
70
package.json
|
@ -2,7 +2,7 @@
|
|||
"name": "fabelous-autocoder",
|
||||
"displayName": "Fabelous Autocoder",
|
||||
"description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality",
|
||||
"version": "0.0.1",
|
||||
"version": "0.0.3",
|
||||
"icon": "icon.png",
|
||||
"publisher": "fabel",
|
||||
"license": "CC BY-ND 4.0",
|
||||
|
@ -43,114 +43,78 @@
|
|||
"main": "./out/extension.js",
|
||||
"contributes": {
|
||||
"configuration": {
|
||||
"title": "Ollama Autocoder",
|
||||
"title": "Fabelous Autocoder",
|
||||
"properties": {
|
||||
"ollama-autocoder.endpoint": {
|
||||
"fabelous-autocoder.endpoint": {
|
||||
"type": "string",
|
||||
"default": "http://localhost:11434/api/generate",
|
||||
"description": "The endpoint of the ollama REST API"
|
||||
},
|
||||
"ollama-autocoder.authentication": {
|
||||
"fabelous-autocoder.authentication": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Authorization Token for Ollama"
|
||||
},
|
||||
"ollama-autocoder.model": {
|
||||
"fabelous-autocoder.model": {
|
||||
"type": "string",
|
||||
"default": "openhermes2.5-mistral:7b-q4_K_M",
|
||||
"default": "",
|
||||
"description": "The model to use for generating completions"
|
||||
},
|
||||
"ollama-autocoder.message header": {
|
||||
"fabelous-autocoder.message header": {
|
||||
"type": "string",
|
||||
"editPresentation": "multilineText",
|
||||
"default": "The following is a complete {LANG} file named {FILE_NAME} in the project {PROJECT_NAME}. Anything NOT code is written as a CODE COMMENT. \n\n```\n",
|
||||
"description": "Pseudo-system prompt, optimized for code completion. It is recommended to keep the format the same if modified. Leave blank for no formatting (raw)."
|
||||
},
|
||||
"ollama-autocoder.max tokens predicted": {
|
||||
"fabelous-autocoder.max tokens predicted": {
|
||||
"type": "integer",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens generated by the model."
|
||||
},
|
||||
"ollama-autocoder.prompt window size": {
|
||||
"fabelous-autocoder.prompt window size": {
|
||||
"type": "integer",
|
||||
"default": 2000,
|
||||
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
|
||||
},
|
||||
"ollama-autocoder.completion keys": {
|
||||
"fabelous-autocoder.completion keys": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "Character that the autocompletion item provider appear on. Multiple characters will be treated as different entries. REQUIRES RELOAD"
|
||||
},
|
||||
"ollama-autocoder.response preview": {
|
||||
"fabelous-autocoder.response preview": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Inline completion label will be the first line of response. Max is 10 tokens, but this is unlikely to be reached. If the first line is empty, the default label will be used. Not streamable, disable on slow devices."
|
||||
},
|
||||
"ollama-autocoder.preview max tokens": {
|
||||
"fabelous-autocoder.preview max tokens": {
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"description": "The maximum number of tokens generated by the model for the response preview. Typically not reached as the preview stops on newline. Recommended to keep very low due to computational cost."
|
||||
},
|
||||
"ollama-autocoder.preview delay": {
|
||||
"fabelous-autocoder.preview delay": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Time to wait in seconds before starting inline preview generation. Prevents Ollama server from running briefly every time the completion key is pressed, which causes unnecessary compute usage. If you are not on a battery powered device, set this to 0 for a more responsive experience."
|
||||
},
|
||||
"ollama-autocoder.continue inline": {
|
||||
"fabelous-autocoder.continue inline": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Ollama continues autocompletion after what is previewed inline. Disabling disables that feature as some may find it irritating. Multiline completion is still accessible through the shortcut even after disabling."
|
||||
|
||||
},
|
||||
"ollama-autocoder.temperature": {
|
||||
"fabelous-autocoder.temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Temperature of the model. It is recommended to set it lower than you would for dialogue."
|
||||
},
|
||||
"ollama-autocoder.keep alive": {
|
||||
"fabelous-autocoder.keep alive": {
|
||||
"type": "number",
|
||||
"default": 10,
|
||||
"description": "Time in minutes before Ollama unloads the model."
|
||||
},
|
||||
"ollama-autocoder.top k": {
|
||||
"type": "integer",
|
||||
"description": "Top k sampling for the model."
|
||||
},
|
||||
"ollama-autocoder.top p": {
|
||||
"fabelous-autocoder.top p": {
|
||||
"type": "number",
|
||||
"description": "Top p sampling for the model."
|
||||
},
|
||||
"ollama-autocoder.tfs z": {
|
||||
"type": "number",
|
||||
"description": "TFS z sampling for the model."
|
||||
},
|
||||
"ollama-autocoder.typical p": {
|
||||
"type": "number",
|
||||
"description": "Typical p sampling for the model."
|
||||
},
|
||||
"ollama-autocoder.repeat last n": {
|
||||
"type": "number",
|
||||
"description": "Repeat the last n tokens of the prompt. This can be useful for code completion as it allows the model to have more context."
|
||||
},
|
||||
"ollama-autocoder.repeat penalty": {
|
||||
"type": "number",
|
||||
"description": "Repetition penalty for the model."
|
||||
},
|
||||
"ollama-autocoder.presence penalty": {
|
||||
"type": "number",
|
||||
"description": "Presence penalty for the model."
|
||||
},
|
||||
"ollama-autocoder.frequency penalty": {
|
||||
"type": "number",
|
||||
"description": "Frequency penalty for the model."
|
||||
},
|
||||
"ollama-autocoder.num batch": {
|
||||
"type": "number",
|
||||
"description": "Batch size."
|
||||
},
|
||||
"ollama-autocoder.num keep": {
|
||||
"type": "number",
|
||||
"description": "Number of keep tokens for the model."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -17,19 +17,11 @@ let responsePreviewMaxTokens: number;
|
|||
let responsePreviewDelay: number;
|
||||
let continueInline: boolean | undefined;
|
||||
let keepAlive: number | undefined;
|
||||
let topK: number | undefined;
|
||||
let topP: number | undefined;
|
||||
let tfsZ: number | undefined;
|
||||
let typicalP: number | undefined;
|
||||
let repeatLastN: number | undefined;
|
||||
let repeatPenalty: number | undefined;
|
||||
let presencePenalty: number | undefined;
|
||||
let frequencyPenalty: number | undefined;
|
||||
let numBatch: number | undefined;
|
||||
let numKeep: number | undefined;
|
||||
|
||||
|
||||
function updateVSConfig() {
|
||||
VSConfig = vscode.workspace.getConfiguration("ollama-autocoder");
|
||||
VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder");
|
||||
apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate";
|
||||
apiAuthentication = VSConfig.get("authentication") || "";
|
||||
apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with
|
||||
|
@ -41,18 +33,9 @@ function updateVSConfig() {
|
|||
responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50;
|
||||
responsePreviewDelay = VSConfig.get("preview delay") || 0; // Must be || 0 instead of || [default] because of truthy
|
||||
continueInline = VSConfig.get("continue inline");
|
||||
apiTemperature = VSConfig.get("temperature") || 0.5;
|
||||
keepAlive = VSConfig.get("keep alive") || undefined;
|
||||
topK = VSConfig.get("top k") || undefined;
|
||||
topP = VSConfig.get("top p") || undefined;
|
||||
tfsZ = VSConfig.get("tfs z") || undefined;
|
||||
typicalP = VSConfig.get("typical p") || undefined;
|
||||
repeatLastN = VSConfig.get("repeat last n") || undefined;
|
||||
repeatPenalty = VSConfig.get("repeat penalty") || undefined;
|
||||
presencePenalty = VSConfig.get("presence penalty") || undefined;
|
||||
frequencyPenalty = VSConfig.get("frequency penalty") || undefined;
|
||||
numBatch = VSConfig.get("num batch") || undefined;
|
||||
numKeep = VSConfig.get("num keep") || undefined;
|
||||
apiTemperature = VSConfig.get("temperature") || 0.7;
|
||||
keepAlive = VSConfig.get("keep alive") || 30;
|
||||
topP = VSConfig.get("top p") || 1;
|
||||
}
|
||||
|
||||
updateVSConfig();
|
||||
|
@ -195,16 +178,7 @@ async function provideCompletionItems(document: vscode.TextDocument, position: v
|
|||
temperature: apiTemperature,
|
||||
stop: ['\n', '```'],
|
||||
...keepAlive && { keep_alive: keepAlive },
|
||||
...topK && { top_k: topK },
|
||||
...topP && { top_p: topP },
|
||||
...tfsZ && { tfs_z: tfsZ },
|
||||
...typicalP && { typical_p: typicalP },
|
||||
...repeatLastN && { repeat_last_n: repeatLastN },
|
||||
...repeatPenalty && { repeat_penalty: repeatPenalty },
|
||||
...presencePenalty && { presence_penalty: presencePenalty },
|
||||
...frequencyPenalty && { frequency_penalty: frequencyPenalty },
|
||||
...numBatch && { num_batch: numBatch },
|
||||
...numKeep && { num_keep: numKeep },
|
||||
}
|
||||
}, {
|
||||
cancelToken: new axios.CancelToken((c) => {
|
||||
|
|
Loading…
Reference in New Issue