Compare commits
41 Commits
Author | SHA1 | Date |
---|---|---|
Falko Victor Habel | d1ccb89544 | |
Falko Victor Habel | c0af28ee1e | |
Falko Victor Habel | 89b179bbe3 | |
Falko Victor Habel | a7292cdcea | |
Falko Victor Habel | 2233f4ec63 | |
Falko Victor Habel | 12f411fbac | |
Falko Victor Habel | a3bf2f93bb | |
Falko Victor Habel | c1d0a53720 | |
Falko Victor Habel | e02296c5a7 | |
Falko Victor Habel | 621db41722 | |
Falko Victor Habel | e234969638 | |
Falko Victor Habel | 439a538c7e | |
Falko Victor Habel | a56d47747d | |
Falko Victor Habel | 96a3971b71 | |
Falko Victor Habel | 424cc124b2 | |
Falko Victor Habel | dcb5a3bbdf | |
Falko Victor Habel | f6ea8494a7 | |
Falko Victor Habel | f122d99ba1 | |
Falko Victor Habel | 804a3113e4 | |
Falko Victor Habel | 4c5eb334df | |
Falko Victor Habel | d8e97c2b4e | |
Falko Victor Habel | b6241855fc | |
Falko Victor Habel | ccd3528325 | |
Falko Victor Habel | 77e328fcee | |
Falko Victor Habel | 6a953b7a12 | |
Falko Victor Habel | ffd22f7cc6 | |
Falko Victor Habel | 0416896254 | |
Falko Victor Habel | 9109c199e6 | |
Falko Victor Habel | 8e72d08d53 | |
Falko Victor Habel | 2a46e061d3 | |
Falko Victor Habel | 27571fcad8 | |
Falko Victor Habel | 66c88a053b | |
Falko Victor Habel | ac7afb4b4e | |
Falko Victor Habel | fb6e9a5d1f | |
Falko Victor Habel | 678c4823b3 | |
Falko Victor Habel | 77e0dbc048 | |
Falko Victor Habel | 8ac3879ee0 | |
Falko Victor Habel | 0450a222e2 | |
Falko Victor Habel | 916ea8ca4a | |
Falko Victor Habel | 58c9af0256 | |
Falko Victor Habel | 91d13d36df |
|
@ -0,0 +1 @@
|
|||
vsce package --baseContentUrl https://gitea.fabelous.app/fabel/Fabelous-Autocoder/src/branch/main --baseImagesUrl https://gitea.fabelous.app/fabel/Fabelous-Autocoder/src/branch/main
|
102
README.md
102
README.md
|
@ -1,76 +1,58 @@
|
|||
# Fabelous Autocoder
|
||||
|
||||
Fabelous Autocoder is a Visual Studio Code extension that provides an easy-to-use interface for Ollama autocompletion. This extension allows developers to use Ollama's powerful language models to generate code completions as they type. It is highly customizable, allowing users to configure various settings to fit their needs.
|
||||
|
||||
![Fabelous Autocoder in Action](demo.gif)
|
||||
|
||||
|
||||
|
||||
|
||||
Fabelous Autocoder is a powerful VS Code extension that provides intelligent code completion using advanced language models. It offers seamless integration with your development workflow, allowing you to generate and preview code suggestions with ease.
|
||||
|
||||
## Features
|
||||
|
||||
- Autocompletion using Ollama language models
|
||||
- Customizable completion keys
|
||||
- Inline preview of generated completions
|
||||
- Configurable maximum tokens predicted
|
||||
- Configurable prompt window size
|
||||
- Configurable response preview delay
|
||||
- Configurable temperature for the model
|
||||
- **Intelligent Code Completion**: Leverages advanced language models to provide context-aware code suggestions.
|
||||
- **Preview Functionality**: View generated code completions before accepting them.
|
||||
- **Easy Accept/Decline**: Use simple keyboard shortcuts to accept or decline suggestions.
|
||||
- **Customizable**: Configure various parameters like API endpoint, model, and response behavior.
|
||||
- **Language Agnostic**: Works with multiple programming languages.
|
||||
|
||||
## Installation
|
||||
You can also download the extension from the release tab of the following Git repository:
|
||||
## How It Works
|
||||
|
||||
[Fabelous-Autocoder Git Repository](https://gitea.fabelous.app/fabel/Fabelous-Autocoder.git)
|
||||
|
||||
To do so, follow these steps:
|
||||
|
||||
1. Visit the repository link.
|
||||
2. Click on the "Releases" tab.
|
||||
3. Look for the latest release and click on it.
|
||||
4. Download the extension file compatible with your operating system.
|
||||
5. Install the extension manually in Visual Studio Code.
|
||||
|
||||
After installation, you'll be able to use the Fabelous Autocoder extension in your Visual Studio Code environment.
|
||||
|
||||
## Configuration
|
||||
|
||||
Fabelous Autocoder is highly customizable, allowing users to configure various settings to fit their needs. To access the configuration settings, follow these steps:
|
||||
|
||||
1. Open Visual Studio Code
|
||||
2. Click on the Settings icon on the sidebar (or press `Ctrl+,`)
|
||||
3. Search for "Fabelous Autocoder" in the search bar
|
||||
4. Configure the desired settings
|
||||
|
||||
Here are some of the available configuration options:
|
||||
|
||||
- `fabelous-autocoder.endpoint`: The endpoint of the Ollama REST API
|
||||
- `fabelous-autocoder.authentication`: The authentication token for Ollama
|
||||
- `fabelous-autocoder.model`: The model to use for generating completions
|
||||
- `fabelous-autocoder.max tokens predicted`: The maximum number of tokens generated by the model
|
||||
- `fabelous-autocoder.prompt window size`: The size of the prompt in characters
|
||||
- `fabelous-autocoder.completion keys`: The characters that trigger the autocompletion item provider
|
||||
- `fabelous-autocoder.response preview`: Whether to show a preview of the generated completion inline
|
||||
- `fabelous-autocoder.preview max tokens`: The maximum number of tokens generated for the response preview
|
||||
- `fabelous-autocoder.preview delay`: The time to wait before starting inline preview generation
|
||||
- `fabelous-autocoder.continue inline`: Whether to continue autocompletion after the inline preview
|
||||
- `fabelous-autocoder.temperature`: The temperature of the model
|
||||
- `fabelous-autocoder.keep alive`: The time in minutes before Ollama unloads the model
|
||||
|
||||
Note that changing the `completion keys` setting requires a reload of Visual Studio Code.
|
||||
1. Trigger the autocompletion by typing a completion key (configurable, default is space).
|
||||
2. The extension sends your current code context to the configured API.
|
||||
3. A code completion is generated and displayed as a preview.
|
||||
4. Accept the completion with `Tab` or decline it with `Backspace`.
|
||||
|
||||
## Usage
|
||||
|
||||
To use Fabelous Autocoder, simply start typing in the editor. When the configured completion keys are pressed, the extension will generate a completion using the configured Ollama model. The completion will be displayed inline with a preview of the generated code. If the `continue inline` setting is enabled, the extension will continue generating completions after the inline preview.
|
||||
![Fabelous Autocoder Showcase](demo.gif)
|
||||
|
||||
To generate a multi-line completion, press `Enter` after the inline preview. This will open a new editor with the generated completion.
|
||||
1. **Trigger Completion**: Type normally and hit the completion key (space by default).
|
||||
2. **Preview**: The suggested completion appears in light gray text.
|
||||
3. **Accept**: Press `Tab` to accept the entire suggestion.
|
||||
4. **Decline**: Press `Backspace` to remove the preview and decline the suggestion.
|
||||
5. **Partial Accept**: You can continue typing to partially accept the suggestion.
|
||||
|
||||
To customize the behavior of the extension, see the Configuration section above.
|
||||
## Configuration
|
||||
|
||||
## License
|
||||
Customize Fabelous Autocoder through VS Code settings:
|
||||
|
||||
Fabelous Autocoder is licensed under the CC BY-ND 4.0 license. See the [LICENSE](https://gitea.fabelous.app/fabel/Fabelous-Autocoder/src/branch/main/LICENSE) file for more information.
|
||||
- `fabelous-autocoder.endpoint`: API endpoint for the language model.
|
||||
- `fabelous-autocoder.model`: Specify the model to use.
|
||||
- `fabelous-autocoder.temperature`: Control the randomness of completions.
|
||||
- `fabelous-autocoder.max tokens predicted`: Set the maximum length of completions.
|
||||
- `fabelous-autocoder.prompt window size`: Adjust the context window size.
|
||||
- `fabelous-autocoder.completion keys`: Set custom completion trigger keys.
|
||||
- `fabelous-autocoder.response preview`: Toggle preview functionality.
|
||||
- `fabelous-autocoder.preview max tokens`: Limit preview length.
|
||||
- `fabelous-autocoder.preview delay`: Add delay before showing preview.
|
||||
- `fabelous-autocoder.continue inline`: Control inline continuation behavior.
|
||||
|
||||
## Acknowledgments
|
||||
## Installation
|
||||
|
||||
Fabelous Autocoder was created by [Falko Habel](https://gitea.fabelous.app/fabel). It was inspired by the [Ollama](https://ollama.ai) project.
|
||||
|
||||
1. [Click here to download the latest version](https://gitea.fabelous.app/Fabel/Fabelous-Autocoder/releases/download/latest/fabelous-autocoder-0.2.0.vsix)
|
||||
2. Open Visual Studio Code
|
||||
3. Go to Extensions (Ctrl+Shift+X)
|
||||
4. Click on the three dots in the upper-right corner and select "Install from VSIX..."
|
||||
5. Navigate to the location where you extracted Fabelous Autocoder and select the .vsix file
|
||||
6. Click "Install" to install the extension
|
||||
|
||||
## Requirements
|
||||
|
||||
- VS Code version 1.89.0 or higher
|
||||
- Internet connection for API calls
|
||||
|
|
BIN
demo.gif
BIN
demo.gif
Binary file not shown.
Before Width: | Height: | Size: 213 KiB After Width: | Height: | Size: 1.3 MiB |
289
package.json
289
package.json
|
@ -1,136 +1,157 @@
|
|||
{
|
||||
"name": "fabelous-autocoder",
|
||||
"version": "0.1.7",
|
||||
"displayName": "Fabelous Autocoder",
|
||||
"description": "A simple to use Ollama autocompletion Plugin",
|
||||
"icon": "icon.png",
|
||||
"publisher": "fabel",
|
||||
"license": "CC BY-ND 4.0",
|
||||
"bugs": {
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder.git"
|
||||
},
|
||||
"engines": {
|
||||
"vscode": "^1.89.0"
|
||||
},
|
||||
"categories": [
|
||||
"Machine Learning",
|
||||
"Snippets",
|
||||
"Programming Languages"
|
||||
],
|
||||
"keywords": [
|
||||
"ollama",
|
||||
"coding",
|
||||
"autocomplete",
|
||||
"open source",
|
||||
"assistant",
|
||||
"ai",
|
||||
"llm"
|
||||
],
|
||||
"galleryBanner": {
|
||||
"color": "#133773"
|
||||
},
|
||||
"activationEvents": [
|
||||
"onStartupFinished"
|
||||
],
|
||||
"main": "./out/extension.js",
|
||||
"contributes": {
|
||||
"configuration": {
|
||||
"title": "Fabelous Autocoder",
|
||||
"properties": {
|
||||
"fabelous-autocoder.endpoint": {
|
||||
"type": "string",
|
||||
"default": "http://localhost:11434/api/generate",
|
||||
"description": "The endpoint of the ollama REST API"
|
||||
},
|
||||
"fabelous-autocoder.authentication": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Authorization Token for Ollama"
|
||||
},
|
||||
"fabelous-autocoder.model": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The model to use for generating completions"
|
||||
},
|
||||
"fabelous-autocoder.max tokens predicted": {
|
||||
"type": "integer",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens generated by the model."
|
||||
},
|
||||
"fabelous-autocoder.prompt window size": {
|
||||
"type": "integer",
|
||||
"default": 2000,
|
||||
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
|
||||
},
|
||||
"fabelous-autocoder.completion keys": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "Character that the autocompletion item provider appear on. Multiple characters will be treated as different entries. REQUIRES RELOAD"
|
||||
},
|
||||
"fabelous-autocoder.response preview": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Inline completion label will be the first line of response. Max is 10 tokens, but this is unlikely to be reached. If the first line is empty, the default label will be used. Not streamable, disable on slow devices."
|
||||
},
|
||||
"fabelous-autocoder.preview max tokens": {
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"description": "The maximum number of tokens generated by the model for the response preview. Typically not reached as the preview stops on newline. Recommended to keep very low due to computational cost."
|
||||
},
|
||||
"fabelous-autocoder.preview delay": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Time to wait in seconds before starting inline preview generation. Prevents Ollama server from running briefly every time the completion key is pressed, which causes unnecessary compute usage. If you are not on a battery powered device, set this to 0 for a more responsive experience."
|
||||
},
|
||||
"fabelous-autocoder.continue inline": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Ollama continues autocompletion after what is previewed inline. Disabling disables that feature as some may find it irritating. Multiline completion is still accessible through the shortcut even after disabling."
|
||||
|
||||
},
|
||||
"fabelous-autocoder.temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Temperature of the model. It is recommended to set it lower than you would for dialogue."
|
||||
},
|
||||
"fabelous-autocoder.keep alive": {
|
||||
"type": "number",
|
||||
"default": 10,
|
||||
"description": "Time in minutes before Ollama unloads the model."
|
||||
},
|
||||
"fabelous-autocoder.top p": {
|
||||
"type": "number",
|
||||
"description": "Top p sampling for the model."
|
||||
}
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"command": "fabelous-autocoder.autocomplete",
|
||||
"title": "Fabelous autocompletion"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scripts": {
|
||||
"vscode:prepublish": "npm run compile",
|
||||
"compile": "tsc --skipLibCheck -p ./",
|
||||
"package": "npm run compile && vsce package",
|
||||
"lint": "eslint \"src/**/*.ts\"",
|
||||
"watch": "tsc --skipLibCheck -watch -p ./"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.12.8",
|
||||
"@types/vscode": "^1.89.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.8.0",
|
||||
"@typescript-eslint/parser": "^7.8.0",
|
||||
"eslint": "^8.57.0",
|
||||
"typescript": "^5.4.5"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.6.8"
|
||||
}
|
||||
"name": "fabelous-autocoder",
|
||||
"version": "0.2.0",
|
||||
"displayName": "Fabelous Autocoder",
|
||||
"description": "A simple to use Ollama autocompletion Plugin",
|
||||
"icon": "icon.png",
|
||||
"publisher": "Falko Habel",
|
||||
"license": "CC BY-ND 4.0",
|
||||
"bugs": {
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder.git"
|
||||
},
|
||||
"engines": {
|
||||
"vscode": "^1.89.0"
|
||||
},
|
||||
"categories": [
|
||||
"Machine Learning",
|
||||
"Snippets",
|
||||
"Programming Languages"
|
||||
],
|
||||
"keywords": [
|
||||
"ollama",
|
||||
"coding",
|
||||
"autocomplete",
|
||||
"open source",
|
||||
"assistant",
|
||||
"ai",
|
||||
"llm"
|
||||
],
|
||||
"galleryBanner": {
|
||||
"color": "#133773"
|
||||
},
|
||||
"activationEvents": [
|
||||
"onStartupFinished"
|
||||
],
|
||||
"main": "./out/extension.js",
|
||||
"contributes": {
|
||||
"configuration": {
|
||||
"title": "Fabelous Autocoder",
|
||||
"properties": {
|
||||
"fabelous-autocoder.endpoint": {
|
||||
"type": "string",
|
||||
"default": "http://localhost:11434/api/generate",
|
||||
"description": "The endpoint of the ollama REST API"
|
||||
},
|
||||
"fabelous-autocoder.authentication": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Authorization Token for Ollama"
|
||||
},
|
||||
"fabelous-autocoder.model": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The model to use for generating completions"
|
||||
},
|
||||
"fabelous-autocoder.max tokens predicted": {
|
||||
"type": "integer",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens generated by the model."
|
||||
},
|
||||
"fabelous-autocoder.prompt window size": {
|
||||
"type": "integer",
|
||||
"default": 2000,
|
||||
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
|
||||
},
|
||||
"fabelous-autocoder.completion keys": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "Character that the autocompletion item provider appear on. Multiple characters will be treated as different entries. REQUIRES RELOAD"
|
||||
},
|
||||
"fabelous-autocoder.response preview": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Inline completion label will be the first line of response. Max is 10 tokens, but this is unlikely to be reached. If the first line is empty, the default label will be used. Not streamable, disable on slow devices."
|
||||
},
|
||||
"fabelous-autocoder.preview max tokens": {
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"description": "The maximum number of tokens generated by the model for the response preview. Typically not reached as the preview stops on newline. Recommended to keep very low due to computational cost."
|
||||
},
|
||||
"fabelous-autocoder.preview delay": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Time to wait in seconds before starting inline preview generation. Prevents Ollama server from running briefly every time the completion key is pressed, which causes unnecessary compute usage. If you are not on a battery powered device, set this to 0 for a more responsive experience."
|
||||
},
|
||||
"fabelous-autocoder.continue inline": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Ollama continues autocompletion after what is previewed inline. Disabling disables that feature as some may find it irritating. Multiline completion is still accessible through the shortcut even after disabling."
|
||||
},
|
||||
"fabelous-autocoder.temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Temperature of the model. It is recommended to set it lower than you would for dialogue."
|
||||
},
|
||||
"fabelous-autocoder.keep alive": {
|
||||
"type": "number",
|
||||
"default": 10,
|
||||
"description": "Time in minutes before Ollama unloads the model."
|
||||
},
|
||||
"fabelous-autocoder.top p": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Top p sampling for the model."
|
||||
},
|
||||
"fabelous-autocoder.enableLineByLineAcceptance": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Enable line-by-line acceptance of the generated code."
|
||||
}
|
||||
}
|
||||
},
|
||||
"keybindings": [
|
||||
{
|
||||
"command": "fabelous-autocoder.handleTab",
|
||||
"key": "tab",
|
||||
"when": "editorTextFocus && !editorTabMovesFocus"
|
||||
},
|
||||
{
|
||||
"command": "fabelous-autocoder.handleBackspace",
|
||||
"key": "backspace",
|
||||
"when": "editorTextFocus"
|
||||
}
|
||||
],
|
||||
"commands": [
|
||||
{
|
||||
"command": "fabelous-autocoder.autocomplete",
|
||||
"title": "Fabelous Autocompletion"
|
||||
},
|
||||
{
|
||||
"command": "fabelous-autocoder.handleTab",
|
||||
"title": "Handle Tab"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scripts": {
|
||||
"vscode:prepublish": "npm run compile",
|
||||
"compile": "tsc --skipLibCheck -p ./",
|
||||
"package": "npm run compile && vsce package",
|
||||
"lint": "eslint \"src/**/*.ts\"",
|
||||
"watch": "tsc --skipLibCheck -watch -p ./"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.12.8",
|
||||
"@types/vscode": "^1.89.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.8.0",
|
||||
"@typescript-eslint/parser": "^7.8.0",
|
||||
"eslint": "^8.57.0",
|
||||
"typescript": "^5.4.5"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.6.8"
|
||||
}
|
||||
}
|
416
src/extension.ts
416
src/extension.ts
|
@ -1,206 +1,328 @@
|
|||
import * as vscode from "vscode";
|
||||
import axios from "axios";
|
||||
import * as vscode from 'vscode';
|
||||
import axios from 'axios';
|
||||
|
||||
let VSConfig: vscode.WorkspaceConfiguration;
|
||||
let apiEndpoint: string;
|
||||
let apiAuthentication: string;
|
||||
let apiModel: string;
|
||||
let apiTemperature: number;
|
||||
let numPredict: number;
|
||||
let promptWindowSize: number;
|
||||
let completionKeys: string;
|
||||
let responsePreview: boolean | undefined;
|
||||
let responsePreviewMaxTokens: number;
|
||||
let responsePreviewDelay: number;
|
||||
let continueInline: boolean | undefined;
|
||||
let keepAlive: number | undefined;
|
||||
let topP: number | undefined;
|
||||
let config: {
|
||||
apiEndpoint: string;
|
||||
apiAuthentication: string;
|
||||
apiModel: string;
|
||||
apiTemperature: number;
|
||||
numPredict: number;
|
||||
promptWindowSize: number;
|
||||
completionKeys: string[];
|
||||
responsePreview: boolean;
|
||||
responsePreviewMaxTokens: number;
|
||||
responsePreviewDelay: number;
|
||||
continueInline: boolean;
|
||||
keepAlive: number;
|
||||
topP: number;
|
||||
};
|
||||
|
||||
function updateVSConfig() {
|
||||
VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder");
|
||||
apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate";
|
||||
apiAuthentication = VSConfig.get("authentication") || "";
|
||||
apiModel = VSConfig.get("model") || "fabelous-coder:latest";
|
||||
numPredict = VSConfig.get("max tokens predicted") || 1000;
|
||||
promptWindowSize = VSConfig.get("prompt window size") || 2000;
|
||||
completionKeys = VSConfig.get("completion keys") || " ";
|
||||
responsePreview = VSConfig.get("response preview");
|
||||
responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50;
|
||||
responsePreviewDelay = VSConfig.get("preview delay") || 0;
|
||||
continueInline = VSConfig.get("continue inline");
|
||||
apiTemperature = VSConfig.get("temperature") || 0.7;
|
||||
keepAlive = VSConfig.get("keep alive") || 30;
|
||||
topP = VSConfig.get("top p") || 1;
|
||||
let previewDecorationType: vscode.TextEditorDecorationType;
|
||||
let activeCompletionManager: CompletionManager | null = null;
|
||||
|
||||
function updateConfig() {
|
||||
const vsConfig = vscode.workspace.getConfiguration('fabelous-autocoder');
|
||||
config = {
|
||||
apiEndpoint: vsConfig.get('endpoint') || 'http://localhost:11434/api/generate',
|
||||
apiAuthentication: vsConfig.get('authentication') || '',
|
||||
apiModel: vsConfig.get('model') || 'fabelous-coder:latest',
|
||||
apiTemperature: vsConfig.get('temperature') || 0.7,
|
||||
numPredict: vsConfig.get('max tokens predicted') || 1000,
|
||||
promptWindowSize: vsConfig.get('prompt window size') || 2000,
|
||||
completionKeys: (vsConfig.get('completion keys') as string || ' ').split(''),
|
||||
responsePreview: vsConfig.get('response preview') || false,
|
||||
responsePreviewMaxTokens: vsConfig.get('preview max tokens') || 50,
|
||||
responsePreviewDelay: vsConfig.get('preview delay') || 0,
|
||||
continueInline: vsConfig.get('continue inline') || false,
|
||||
keepAlive: vsConfig.get('keep alive') || 30,
|
||||
topP: vsConfig.get('top p') || 1,
|
||||
};
|
||||
}
|
||||
|
||||
updateVSConfig();
|
||||
vscode.workspace.onDidChangeConfiguration(updateVSConfig);
|
||||
function createPreviewDecorationType() {
|
||||
previewDecorationType = vscode.window.createTextEditorDecorationType({
|
||||
after: {
|
||||
color: '#888888',
|
||||
fontStyle: 'italic',
|
||||
},
|
||||
textDecoration: 'none; display: none;',
|
||||
});
|
||||
}
|
||||
|
||||
function getContextLines(document: vscode.TextDocument, position: vscode.Position): string {
|
||||
const lines = [];
|
||||
const startLine = Math.max(0, position.line - 1);
|
||||
const endLine = position.line;
|
||||
|
||||
for (let i = startLine; i <= endLine; i++) {
|
||||
lines.push(document.lineAt(i).text);
|
||||
}
|
||||
|
||||
return lines.join("\n");
|
||||
return document.getText(new vscode.Range(startLine, 0, endLine, position.character));
|
||||
}
|
||||
|
||||
|
||||
function createFIMPrompt(prefix: string, language: string): string {
|
||||
return `<fim_prefix>${prefix}<fim_middle><fim_suffix>${language}\n`;
|
||||
}
|
||||
|
||||
async function generateCompletion(prompt: string, cancellationToken: vscode.CancellationToken): Promise<string> {
|
||||
const axiosCancelToken = new axios.CancelToken((c) => {
|
||||
cancellationToken.onCancellationRequested(() => c('Request cancelled'));
|
||||
});
|
||||
|
||||
async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) {
|
||||
const document = textEditor.document;
|
||||
const position = textEditor.selection.active;
|
||||
const response = await axios.post(config.apiEndpoint, {
|
||||
model: config.apiModel,
|
||||
prompt: prompt,
|
||||
stream: false,
|
||||
raw: true,
|
||||
options: {
|
||||
num_predict: config.numPredict,
|
||||
temperature: config.apiTemperature,
|
||||
stop: ['<fim_suffix>', '```'],
|
||||
keep_alive: config.keepAlive,
|
||||
top_p: config.topP,
|
||||
}
|
||||
}, {
|
||||
cancelToken: axiosCancelToken,
|
||||
headers: {
|
||||
'Authorization': config.apiAuthentication
|
||||
}
|
||||
});
|
||||
|
||||
const context = getContextLines(document, position);
|
||||
return response.data.response.replace(/<fim_middle>|<fim_suffix>|<fim_prefix>/g, '').trim();
|
||||
}
|
||||
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
class CompletionManager {
|
||||
private textEditor: vscode.TextEditor;
|
||||
private document: vscode.TextDocument;
|
||||
private startPosition: vscode.Position;
|
||||
private completionText: string;
|
||||
private insertedLineCount: number = 0; // Track the number of inserted lines
|
||||
|
||||
vscode.window.withProgress(
|
||||
{
|
||||
location: vscode.ProgressLocation.Notification,
|
||||
title: "Fabelous Autocoder",
|
||||
cancellable: true,
|
||||
},
|
||||
async (progress, progressCancellationToken) => {
|
||||
try {
|
||||
progress.report({ message: "Starting model..." });
|
||||
constructor(textEditor: vscode.TextEditor, startPosition: vscode.Position, completionText: string) {
|
||||
this.textEditor = textEditor;
|
||||
this.document = textEditor.document;
|
||||
this.startPosition = startPosition;
|
||||
this.completionText = completionText;
|
||||
}
|
||||
|
||||
let axiosCancelPost: () => void;
|
||||
const axiosCancelToken = new axios.CancelToken((c) => {
|
||||
axiosCancelPost = () => {
|
||||
c("Autocompletion request terminated by user cancel");
|
||||
};
|
||||
if (cancellationToken) cancellationToken.onCancellationRequested(axiosCancelPost);
|
||||
progressCancellationToken.onCancellationRequested(axiosCancelPost);
|
||||
vscode.workspace.onDidCloseTextDocument(axiosCancelPost);
|
||||
});
|
||||
public async showPreview() {
|
||||
if (!previewDecorationType) {
|
||||
createPreviewDecorationType();
|
||||
}
|
||||
|
||||
const response = await axios.post(apiEndpoint, {
|
||||
model: apiModel,
|
||||
prompt: fimPrompt,
|
||||
stream: false,
|
||||
raw: true,
|
||||
options: {
|
||||
num_predict: numPredict,
|
||||
temperature: apiTemperature,
|
||||
stop: ["<fim_suffix>", "```"]
|
||||
}
|
||||
}, {
|
||||
cancelToken: axiosCancelToken,
|
||||
headers: {
|
||||
'Authorization': apiAuthentication
|
||||
}
|
||||
});
|
||||
const completionLines = this.completionText.split('\n').length;
|
||||
|
||||
progress.report({ message: "Generating..." });
|
||||
// Adjust the start position to line after the original start position
|
||||
const adjustedStartPosition = this.startPosition.translate(0, 0);
|
||||
|
||||
let completionText = response.data.response;
|
||||
// Remove any FIM tags and leading/trailing whitespace
|
||||
completionText = completionText.replace(/<fim_middle>|<fim_suffix>|<fim_prefix>/g, '').trim();
|
||||
// Step 1: Insert blank lines to make space for the preview
|
||||
const edit = new vscode.WorkspaceEdit();
|
||||
const linePadding = '\n'.repeat(completionLines + 1); // Include extra line break for visual separation
|
||||
edit.insert(this.document.uri, adjustedStartPosition, linePadding);
|
||||
await vscode.workspace.applyEdit(edit);
|
||||
|
||||
// Remove the context lines
|
||||
const startLine = Math.max(0, position.line - 1);
|
||||
const endLine = position.line;
|
||||
const rangeToReplace = new vscode.Range(
|
||||
this.insertedLineCount = completionLines + 1;
|
||||
|
||||
// Step 2: Apply decorations
|
||||
const previewRanges: vscode.DecorationOptions[] = this.completionText.split('\n').map((line, index) => {
|
||||
const lineNumber = adjustedStartPosition.line + index + 1; // Start preview one line later
|
||||
return {
|
||||
range: new vscode.Range(
|
||||
new vscode.Position(lineNumber, 0),
|
||||
new vscode.Position(lineNumber, 0)
|
||||
),
|
||||
renderOptions: {
|
||||
after: {
|
||||
contentText: line,
|
||||
color: '#888888',
|
||||
fontStyle: 'italic',
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
this.textEditor.setDecorations(previewDecorationType, previewRanges);
|
||||
}
|
||||
|
||||
|
||||
public async acceptCompletion() {
|
||||
const edit = new vscode.WorkspaceEdit();
|
||||
const completionLines = this.completionText.split('\n');
|
||||
const numberOfLines = completionLines.length;
|
||||
|
||||
// Ensure the start position is never negative
|
||||
const safeStartPosition = new vscode.Position(Math.max(0, this.startPosition.line - 1), 0);
|
||||
|
||||
// Prepare the range to replace
|
||||
const rangeToReplace = new vscode.Range(
|
||||
safeStartPosition,
|
||||
this.startPosition.translate(numberOfLines, 0)
|
||||
);
|
||||
|
||||
// Construct the content to insert
|
||||
const contentToInsert = (safeStartPosition.line === 0 ? '' : '\n') + this.completionText + '\n';
|
||||
edit.replace(this.document.uri, rangeToReplace, contentToInsert);
|
||||
|
||||
await vscode.workspace.applyEdit(edit);
|
||||
|
||||
// Clear the preview decorations
|
||||
this.clearPreview();
|
||||
|
||||
// Set activeCompletionManager to null
|
||||
activeCompletionManager = null;
|
||||
|
||||
// Calculate the new cursor position from the inserted content
|
||||
const lastCompletionLine = completionLines[completionLines.length - 1];
|
||||
const newPosition = new vscode.Position(
|
||||
this.startPosition.line + numberOfLines - 1,
|
||||
lastCompletionLine.length
|
||||
);
|
||||
|
||||
// Set the new cursor position
|
||||
this.textEditor.selection = new vscode.Selection(newPosition, newPosition);
|
||||
}
|
||||
|
||||
|
||||
|
||||
public clearPreview() {
|
||||
this.textEditor.setDecorations(previewDecorationType, []); // Remove all preview decorations
|
||||
}
|
||||
|
||||
public async declineCompletion() {
|
||||
this.clearPreview(); // Clear the preview decorations
|
||||
|
||||
try {
|
||||
const document = this.textEditor.document;
|
||||
const currentPosition = this.textEditor.selection.active;
|
||||
|
||||
// Calculate the range of lines to remove
|
||||
const startLine = this.startPosition.line + 1;
|
||||
const endLine = currentPosition.line;
|
||||
if (endLine > startLine) {
|
||||
const workspaceEdit = new vscode.WorkspaceEdit();
|
||||
|
||||
// Create a range from start of startLine to end of endLine
|
||||
const range = new vscode.Range(
|
||||
new vscode.Position(startLine, 0),
|
||||
new vscode.Position(endLine, document.lineAt(endLine).text.length)
|
||||
);
|
||||
|
||||
// Delete the range
|
||||
workspaceEdit.delete(document.uri, range);
|
||||
|
||||
// Apply the edit
|
||||
const edit = new vscode.WorkspaceEdit();
|
||||
edit.replace(document.uri, rangeToReplace, completionText);
|
||||
await vscode.workspace.applyEdit(edit);
|
||||
await vscode.workspace.applyEdit(workspaceEdit);
|
||||
|
||||
// Move the cursor to the end of the inserted text
|
||||
const newPosition = new vscode.Position(startLine + completionText.split('\n').length - 1, completionText.split('\n').pop()!.length);
|
||||
textEditor.selection = new vscode.Selection(newPosition, newPosition);
|
||||
// Move the cursor back to the original position
|
||||
this.textEditor.selection = new vscode.Selection(this.startPosition, this.startPosition);
|
||||
|
||||
progress.report({ message: "Fabelous completion finished." });
|
||||
|
||||
} catch (err: any) {
|
||||
vscode.window.showErrorMessage(
|
||||
"Fabelous Autocoder encountered an error: " + err.message
|
||||
);
|
||||
console.log(err);
|
||||
console.log(`Lines ${startLine + 1} to ${endLine + 1} removed successfully`);
|
||||
activeCompletionManager = null;
|
||||
} else {
|
||||
console.log('No lines to remove');
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error declining completion:', error);
|
||||
vscode.window.showErrorMessage(`Error removing lines: ${error}`);
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) {
|
||||
const item = new vscode.CompletionItem("Fabelous autocompletion");
|
||||
item.insertText = new vscode.SnippetString('${1:}');
|
||||
async function autocompleteCommand(textEditor: vscode.TextEditor, edit: vscode.TextEditorEdit, ...args: any[]) {
|
||||
const cancellationTokenSource = new vscode.CancellationTokenSource();
|
||||
const cancellationToken = cancellationTokenSource.token;
|
||||
|
||||
if (responsePreview) {
|
||||
await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000));
|
||||
try {
|
||||
const document = textEditor.document;
|
||||
const position = textEditor.selection.active;
|
||||
const context = getContextLines(document, position);
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
|
||||
const completionText = await vscode.window.withProgress({
|
||||
location: vscode.ProgressLocation.Notification,
|
||||
title: 'Fabelous Autocoder',
|
||||
cancellable: true,
|
||||
}, async (progress, progressCancellationToken) => {
|
||||
progress.report({ message: 'Generating...' });
|
||||
return await generateCompletion(fimPrompt, progressCancellationToken);
|
||||
});
|
||||
|
||||
console.log('Completion generated:', completionText);
|
||||
|
||||
|
||||
const completionManager = new CompletionManager(textEditor, position, completionText);
|
||||
await completionManager.showPreview();
|
||||
activeCompletionManager = completionManager;
|
||||
|
||||
|
||||
} catch (err: any) {
|
||||
console.error('Error in autocompleteCommand:', err);
|
||||
vscode.window.showErrorMessage(`Fabelous Autocoder encountered an error: ${err.message}`);
|
||||
} finally {
|
||||
cancellationTokenSource.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
async function handleTab() {
|
||||
if (activeCompletionManager) {
|
||||
await activeCompletionManager.acceptCompletion();
|
||||
} else {
|
||||
await vscode.commands.executeCommand('tab');
|
||||
}
|
||||
}
|
||||
|
||||
async function handleBackspace() {
|
||||
if (activeCompletionManager) {
|
||||
await activeCompletionManager.declineCompletion();
|
||||
} else {
|
||||
await vscode.commands.executeCommand('deleteLeft');
|
||||
}
|
||||
}
|
||||
|
||||
async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) {
|
||||
const item = new vscode.CompletionItem('Fabelous autocompletion');
|
||||
item.insertText = new vscode.SnippetString('${1:}');
|
||||
item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Fabelous Autocoder');
|
||||
|
||||
if (config.responsePreview) {
|
||||
await new Promise(resolve => setTimeout(resolve, config.responsePreviewDelay * 1000));
|
||||
if (cancellationToken.isCancellationRequested) {
|
||||
return [ item ];
|
||||
return [item];
|
||||
}
|
||||
|
||||
const context = getContextLines(document, position);
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
|
||||
try {
|
||||
const response_preview = await axios.post(apiEndpoint, {
|
||||
model: apiModel,
|
||||
prompt: fimPrompt,
|
||||
stream: false,
|
||||
raw: true,
|
||||
options: {
|
||||
num_predict: responsePreviewMaxTokens,
|
||||
temperature: apiTemperature,
|
||||
stop: ['<fim_suffix>', '\n', '```'],
|
||||
...(keepAlive && { keep_alive: keepAlive }),
|
||||
...(topP && { top_p: topP }),
|
||||
}
|
||||
}, {
|
||||
cancelToken: new axios.CancelToken((c) => {
|
||||
cancellationToken.onCancellationRequested(() => c("Autocompletion request terminated by completion cancel"));
|
||||
})
|
||||
});
|
||||
const result = await generateCompletion(fimPrompt, cancellationToken);
|
||||
const preview = (result as any).preview;
|
||||
if (preview) {
|
||||
item.detail = preview.split('\n')[0];
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error fetching preview:", error);
|
||||
console.error('Error fetching preview:', error);
|
||||
}
|
||||
}
|
||||
|
||||
item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Fabelous Autocoder');
|
||||
if (continueInline || !responsePreview) {
|
||||
if (config.continueInline || !config.responsePreview) {
|
||||
item.command = {
|
||||
command: 'fabelous-autocoder.autocomplete',
|
||||
title: 'Fabelous Autocomplete',
|
||||
arguments: [cancellationToken]
|
||||
arguments: []
|
||||
};
|
||||
}
|
||||
|
||||
return [item];
|
||||
}
|
||||
export function activate(context: vscode.ExtensionContext) {
|
||||
updateConfig();
|
||||
createPreviewDecorationType();
|
||||
|
||||
function activate(context: vscode.ExtensionContext) {
|
||||
const completionProvider = vscode.languages.registerCompletionItemProvider("*", {
|
||||
provideCompletionItems
|
||||
},
|
||||
...completionKeys.split("")
|
||||
context.subscriptions.push(
|
||||
vscode.workspace.onDidChangeConfiguration(updateConfig),
|
||||
vscode.languages.registerCompletionItemProvider('*', { provideCompletionItems }, ...config.completionKeys),
|
||||
vscode.commands.registerTextEditorCommand('fabelous-autocoder.autocomplete', autocompleteCommand),
|
||||
vscode.commands.registerCommand('fabelous-autocoder.handleTab', handleTab),
|
||||
vscode.commands.registerCommand('fabelous-autocoder.handleBackspace', handleBackspace) // Add this line
|
||||
);
|
||||
const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand(
|
||||
"fabelous-autocoder.autocomplete",
|
||||
(textEditor, _, cancellationToken?) => {
|
||||
autocompleteCommand(textEditor, cancellationToken);
|
||||
}
|
||||
);
|
||||
context.subscriptions.push(completionProvider);
|
||||
context.subscriptions.push(externalAutocompleteCommand);
|
||||
}
|
||||
|
||||
function deactivate() { }
|
||||
|
||||
module.exports = {
|
||||
activate,
|
||||
deactivate,
|
||||
};
|
||||
|
||||
export function deactivate() {}
|
||||
|
|
Loading…
Reference in New Issue