diff --git a/src/extension.ts b/src/extension.ts index 08da8e4..52600ab 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -16,7 +16,7 @@ let responsePreviewMaxTokens: number; function updateVSConfig() { VSConfig = vscode.workspace.getConfiguration("ollama-autocoder"); - apiEndpoint = VSConfig.get("apiEndpoint") || "http://localhost:11434/api/generate"; + apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate"; apiModel = VSConfig.get("model") || "openhermes2.5-mistral:7b-q4_K_M"; // The model I tested with apiSystemMessage = VSConfig.get("system message"); numPredict = VSConfig.get("max tokens predicted") || 500;