Fabelous-Autocoder/package.json

115 lines
3.4 KiB
JSON

{
"name": "ollama-autocoder",
"displayName": "Ollama Autocoder",
"description": "A simple to use Ollama autocompletion engine with options exposed and streaming functionality",
"version": "0.0.5",
"icon": "icon.png",
"publisher": "10nates",
"license": "MIT",
"bugs": {
"url": "https://github.com/10Nates/ollama-autocoder/issues"
},
"sponsor": {
"url": "https://ko-fi.com/natehedge"
},
"repository": {
"type": "git",
"url": "https://github.com/10Nates/ollama-autocoder"
},
"engines": {
"vscode": "^1.73.0"
},
"categories": [
"Machine Learning",
"Snippets",
"Programming Languages"
],
"keywords": [
"llama",
"ollama",
"gpt",
"coding",
"autocomplete",
"open source",
"assistant",
"ai",
"llm"
],
"galleryBanner": {
"color": "#f5ead1"
},
"activationEvents": [
"onStartupFinished"
],
"main": "./out/extension.js",
"contributes": {
"configuration": {
"title": "Ollama Autocoder",
"properties": {
"ollama-autocoder.endpoint": {
"type": "string",
"default": "http://localhost:11434/api/generate",
"description": "The endpoint of the ollama REST API"
},
"ollama-autocoder.model": {
"type": "string",
"default": "openhermes2.5-mistral:7b-q4_K_M",
"description": "The model to use for generating completions"
},
"ollama-autocoder.raw input": {
"type": "boolean",
"default": false,
"description": "Prompt the model without formatting. Disables system message. Turn this on if you are having trouble with a model falling out of coding mode."
},
"ollama-autocoder.system message": {
"type": "string",
"default": "You are a code autocompletion engine. Respond with a continuation of the code provided and nothing else. Code should not be in a code block. Anything that is not code should be written as a code comment.",
"description": "The system message to use for code completions. Type DEFAULT for Makefile."
},
"ollama-autocoder.max tokens predicted": {
"type": "integer",
"default": 500,
"description": "The maximum number of tokens generated by the model."
},
"ollama-autocoder.prompt window size": {
"type": "integer",
"default": 2000,
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
},
"ollama-autocoder.completion keys": {
"type": "string",
"default": " ",
"description": "Character that the autocompletion item provider appear on. Multiple characters will be treated as different entries. REQUIRES RELOAD"
},
"ollama-autocoder.response preview": {
"type": "boolean",
"default": true,
"description": "Inline completion label will be the first line of response. Max is 10 tokens, but this is unlikely to be reached. If the first line is empty, the default label will be used. Not streamable, disable on slow devices."
}
}
},
"commands": [
{
"command": "ollama-autocoder.autocomplete",
"title": "Autocomplete with Ollama"
}
]
},
"scripts": {
"vscode:prepublish": "npm run compile",
"compile": "tsc -p ./",
"lint": "eslint \"src/**/*.ts\"",
"watch": "tsc -watch -p ./"
},
"devDependencies": {
"@types/node": "^16.18.34",
"@types/vscode": "^1.73.0",
"@typescript-eslint/eslint-plugin": "^6.7.0",
"@typescript-eslint/parser": "^6.7.0",
"eslint": "^8.26.0",
"typescript": "^5.3.2"
},
"dependencies": {
"axios": "^1.6.2"
}
}