Fabelous-Autocoder/package.json

95 lines
2.9 KiB
JSON
Raw Normal View History

2023-12-20 09:36:55 +00:00
{
"name": "ollama-autocoder",
"displayName": "Ollama Autocoder",
2023-12-21 00:30:34 +00:00
"description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality",
"version": "0.0.2",
"icon": "icon.png",
2023-12-20 09:36:55 +00:00
"publisher": "10nates",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/10Nates/ollama-autocoder"
2023-12-20 09:36:55 +00:00
},
"engines": {
"vscode": "^1.73.0"
},
"categories": [
"Machine Learning",
"Snippets",
"Programming Languages"
],
"keywords": [
"llama",
"ollama",
"gpt",
"coding",
"autocomplete",
"open source",
"assistant",
"ai",
"llm"
2023-12-20 09:36:55 +00:00
],
"activationEvents": [
"onStartupFinished"
2023-12-20 09:36:55 +00:00
],
"main": "./out/extension.js",
"contributes": {
"configuration": {
"title": "Ollama Autocoder",
"properties": {
"ollama-autocoder.endpoint": {
"type": "string",
"default": "http://localhost:11434/api/generate",
"description": "The endpoint of the ollama REST API"
},
"ollama-autocoder.model": {
"type": "string",
"default": "openhermes2.5-mistral:7b-q4_K_M",
"description": "The model to use for generating completions"
},
"ollama-autocoder.raw-input": {
"type": "boolean",
"default": false,
"description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode."
},
"ollama-autocoder.system-message": {
"type": "string",
"default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.",
"description": "The system message to use for code completions. Type DEFAULT for Makefile."
},
"ollama-autocoder.max-tokens-predicted": {
"type": "integer",
"default": 500,
"description": "The maximum number of tokens generated by the model."
},
"ollama-autocoder.prompt-window-size": {
"type": "integer",
"default": 2000,
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
},
"ollama-autocoder.cursor-follows": {
"type": "boolean",
"default": true,
"description": "The user's cursor will follow along with the generation. Disabling this can cause unintended effects when typing above/in front of the generation point, but could boost user productivity."
}
}
2023-12-20 09:36:55 +00:00
}
},
2023-12-20 09:36:55 +00:00
"scripts": {
"vscode:prepublish": "npm run compile",
"compile": "tsc -p ./",
"lint": "eslint \"src/**/*.ts\"",
"watch": "tsc -watch -p ./"
},
"devDependencies": {
"@types/node": "^16.18.34",
"@types/vscode": "^1.73.0",
"@typescript-eslint/eslint-plugin": "^6.7.0",
"@typescript-eslint/parser": "^6.7.0",
"eslint": "^8.26.0",
"typescript": "^5.3.2"
},
"dependencies": {
"axios": "^1.6.2"
}
}