Compare commits
No commits in common. "main" and "0.1.7" have entirely different histories.
1
2exe.txt
1
2exe.txt
|
@ -1 +0,0 @@
|
|||
vsce package --baseContentUrl https://gitea.fabelous.app/fabel/Fabelous-Autocoder/src/branch/main --baseImagesUrl https://gitea.fabelous.app/fabel/Fabelous-Autocoder/src/branch/main
|
96
README.md
96
README.md
|
@ -1,58 +1,76 @@
|
|||
# Fabelous Autocoder
|
||||
|
||||
Fabelous Autocoder is a powerful VS Code extension that provides intelligent code completion using advanced language models. It offers seamless integration with your development workflow, allowing you to generate and preview code suggestions with ease.
|
||||
Fabelous Autocoder is a Visual Studio Code extension that provides an easy-to-use interface for Ollama autocompletion. This extension allows developers to use Ollama's powerful language models to generate code completions as they type. It is highly customizable, allowing users to configure various settings to fit their needs.
|
||||
|
||||
![Fabelous Autocoder in Action](demo.gif)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- **Intelligent Code Completion**: Leverages advanced language models to provide context-aware code suggestions.
|
||||
- **Preview Functionality**: View generated code completions before accepting them.
|
||||
- **Easy Accept/Decline**: Use simple keyboard shortcuts to accept or decline suggestions.
|
||||
- **Customizable**: Configure various parameters like API endpoint, model, and response behavior.
|
||||
- **Language Agnostic**: Works with multiple programming languages.
|
||||
- Autocompletion using Ollama language models
|
||||
- Customizable completion keys
|
||||
- Inline preview of generated completions
|
||||
- Configurable maximum tokens predicted
|
||||
- Configurable prompt window size
|
||||
- Configurable response preview delay
|
||||
- Configurable temperature for the model
|
||||
|
||||
## How It Works
|
||||
## Installation
|
||||
You can also download the extension from the release tab of the following Git repository:
|
||||
|
||||
1. Trigger the autocompletion by typing a completion key (configurable, default is space).
|
||||
2. The extension sends your current code context to the configured API.
|
||||
3. A code completion is generated and displayed as a preview.
|
||||
4. Accept the completion with `Tab` or decline it with `Backspace`.
|
||||
[Fabelous-Autocoder Git Repository](https://gitea.fabelous.app/fabel/Fabelous-Autocoder.git)
|
||||
|
||||
## Usage
|
||||
To do so, follow these steps:
|
||||
|
||||
![Fabelous Autocoder Showcase](demo.gif)
|
||||
1. Visit the repository link.
|
||||
2. Click on the "Releases" tab.
|
||||
3. Look for the latest release and click on it.
|
||||
4. Download the extension file compatible with your operating system.
|
||||
5. Install the extension manually in Visual Studio Code.
|
||||
|
||||
1. **Trigger Completion**: Type normally and hit the completion key (space by default).
|
||||
2. **Preview**: The suggested completion appears in light gray text.
|
||||
3. **Accept**: Press `Tab` to accept the entire suggestion.
|
||||
4. **Decline**: Press `Backspace` to remove the preview and decline the suggestion.
|
||||
5. **Partial Accept**: You can continue typing to partially accept the suggestion.
|
||||
After installation, you'll be able to use the Fabelous Autocoder extension in your Visual Studio Code environment.
|
||||
|
||||
## Configuration
|
||||
|
||||
Customize Fabelous Autocoder through VS Code settings:
|
||||
Fabelous Autocoder is highly customizable, allowing users to configure various settings to fit their needs. To access the configuration settings, follow these steps:
|
||||
|
||||
- `fabelous-autocoder.endpoint`: API endpoint for the language model.
|
||||
- `fabelous-autocoder.model`: Specify the model to use.
|
||||
- `fabelous-autocoder.temperature`: Control the randomness of completions.
|
||||
- `fabelous-autocoder.max tokens predicted`: Set the maximum length of completions.
|
||||
- `fabelous-autocoder.prompt window size`: Adjust the context window size.
|
||||
- `fabelous-autocoder.completion keys`: Set custom completion trigger keys.
|
||||
- `fabelous-autocoder.response preview`: Toggle preview functionality.
|
||||
- `fabelous-autocoder.preview max tokens`: Limit preview length.
|
||||
- `fabelous-autocoder.preview delay`: Add delay before showing preview.
|
||||
- `fabelous-autocoder.continue inline`: Control inline continuation behavior.
|
||||
1. Open Visual Studio Code
|
||||
2. Click on the Settings icon on the sidebar (or press `Ctrl+,`)
|
||||
3. Search for "Fabelous Autocoder" in the search bar
|
||||
4. Configure the desired settings
|
||||
|
||||
## Installation
|
||||
Here are some of the available configuration options:
|
||||
|
||||
- `fabelous-autocoder.endpoint`: The endpoint of the Ollama REST API
|
||||
- `fabelous-autocoder.authentication`: The authentication token for Ollama
|
||||
- `fabelous-autocoder.model`: The model to use for generating completions
|
||||
- `fabelous-autocoder.max tokens predicted`: The maximum number of tokens generated by the model
|
||||
- `fabelous-autocoder.prompt window size`: The size of the prompt in characters
|
||||
- `fabelous-autocoder.completion keys`: The characters that trigger the autocompletion item provider
|
||||
- `fabelous-autocoder.response preview`: Whether to show a preview of the generated completion inline
|
||||
- `fabelous-autocoder.preview max tokens`: The maximum number of tokens generated for the response preview
|
||||
- `fabelous-autocoder.preview delay`: The time to wait before starting inline preview generation
|
||||
- `fabelous-autocoder.continue inline`: Whether to continue autocompletion after the inline preview
|
||||
- `fabelous-autocoder.temperature`: The temperature of the model
|
||||
- `fabelous-autocoder.keep alive`: The time in minutes before Ollama unloads the model
|
||||
|
||||
1. [Click here to download the latest version](https://gitea.fabelous.app/Fabel/Fabelous-Autocoder/releases/download/latest/fabelous-autocoder-0.2.0.vsix)
|
||||
2. Open Visual Studio Code
|
||||
3. Go to Extensions (Ctrl+Shift+X)
|
||||
4. Click on the three dots in the upper-right corner and select "Install from VSIX..."
|
||||
5. Navigate to the location where you extracted Fabelous Autocoder and select the .vsix file
|
||||
6. Click "Install" to install the extension
|
||||
Note that changing the `completion keys` setting requires a reload of Visual Studio Code.
|
||||
|
||||
## Requirements
|
||||
## Usage
|
||||
|
||||
- VS Code version 1.89.0 or higher
|
||||
- Internet connection for API calls
|
||||
To use Fabelous Autocoder, simply start typing in the editor. When the configured completion keys are pressed, the extension will generate a completion using the configured Ollama model. The completion will be displayed inline with a preview of the generated code. If the `continue inline` setting is enabled, the extension will continue generating completions after the inline preview.
|
||||
|
||||
To generate a multi-line completion, press `Enter` after the inline preview. This will open a new editor with the generated completion.
|
||||
|
||||
To customize the behavior of the extension, see the Configuration section above.
|
||||
|
||||
## License
|
||||
|
||||
Fabelous Autocoder is licensed under the CC BY-ND 4.0 license. See the [LICENSE](https://gitea.fabelous.app/fabel/Fabelous-Autocoder/src/branch/main/LICENSE) file for more information.
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
Fabelous Autocoder was created by [Falko Habel](https://gitea.fabelous.app/fabel). It was inspired by the [Ollama](https://ollama.ai) project.
|
BIN
demo.gif
BIN
demo.gif
Binary file not shown.
Before Width: | Height: | Size: 1.3 MiB After Width: | Height: | Size: 213 KiB |
291
package.json
291
package.json
|
@ -1,157 +1,136 @@
|
|||
{
|
||||
"name": "fabelous-autocoder",
|
||||
"version": "0.2.0",
|
||||
"displayName": "Fabelous Autocoder",
|
||||
"description": "A simple to use Ollama autocompletion Plugin",
|
||||
"icon": "icon.png",
|
||||
"publisher": "Falko Habel",
|
||||
"license": "CC BY-ND 4.0",
|
||||
"bugs": {
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder.git"
|
||||
},
|
||||
"engines": {
|
||||
"vscode": "^1.89.0"
|
||||
},
|
||||
"categories": [
|
||||
"Machine Learning",
|
||||
"Snippets",
|
||||
"Programming Languages"
|
||||
],
|
||||
"keywords": [
|
||||
"ollama",
|
||||
"coding",
|
||||
"autocomplete",
|
||||
"open source",
|
||||
"assistant",
|
||||
"ai",
|
||||
"llm"
|
||||
],
|
||||
"galleryBanner": {
|
||||
"color": "#133773"
|
||||
},
|
||||
"activationEvents": [
|
||||
"onStartupFinished"
|
||||
],
|
||||
"main": "./out/extension.js",
|
||||
"contributes": {
|
||||
"configuration": {
|
||||
"title": "Fabelous Autocoder",
|
||||
"properties": {
|
||||
"fabelous-autocoder.endpoint": {
|
||||
"type": "string",
|
||||
"default": "http://localhost:11434/api/generate",
|
||||
"description": "The endpoint of the ollama REST API"
|
||||
},
|
||||
"fabelous-autocoder.authentication": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Authorization Token for Ollama"
|
||||
},
|
||||
"fabelous-autocoder.model": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The model to use for generating completions"
|
||||
},
|
||||
"fabelous-autocoder.max tokens predicted": {
|
||||
"type": "integer",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens generated by the model."
|
||||
},
|
||||
"fabelous-autocoder.prompt window size": {
|
||||
"type": "integer",
|
||||
"default": 2000,
|
||||
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
|
||||
},
|
||||
"fabelous-autocoder.completion keys": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "Character that the autocompletion item provider appear on. Multiple characters will be treated as different entries. REQUIRES RELOAD"
|
||||
},
|
||||
"fabelous-autocoder.response preview": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Inline completion label will be the first line of response. Max is 10 tokens, but this is unlikely to be reached. If the first line is empty, the default label will be used. Not streamable, disable on slow devices."
|
||||
},
|
||||
"fabelous-autocoder.preview max tokens": {
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"description": "The maximum number of tokens generated by the model for the response preview. Typically not reached as the preview stops on newline. Recommended to keep very low due to computational cost."
|
||||
},
|
||||
"fabelous-autocoder.preview delay": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Time to wait in seconds before starting inline preview generation. Prevents Ollama server from running briefly every time the completion key is pressed, which causes unnecessary compute usage. If you are not on a battery powered device, set this to 0 for a more responsive experience."
|
||||
},
|
||||
"fabelous-autocoder.continue inline": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Ollama continues autocompletion after what is previewed inline. Disabling disables that feature as some may find it irritating. Multiline completion is still accessible through the shortcut even after disabling."
|
||||
},
|
||||
"fabelous-autocoder.temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Temperature of the model. It is recommended to set it lower than you would for dialogue."
|
||||
},
|
||||
"fabelous-autocoder.keep alive": {
|
||||
"type": "number",
|
||||
"default": 10,
|
||||
"description": "Time in minutes before Ollama unloads the model."
|
||||
},
|
||||
"fabelous-autocoder.top p": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Top p sampling for the model."
|
||||
},
|
||||
"fabelous-autocoder.enableLineByLineAcceptance": {
|
||||
"type": "boolean",
|
||||
"default": false,
|
||||
"description": "Enable line-by-line acceptance of the generated code."
|
||||
}
|
||||
}
|
||||
},
|
||||
"keybindings": [
|
||||
{
|
||||
"command": "fabelous-autocoder.handleTab",
|
||||
"key": "tab",
|
||||
"when": "editorTextFocus && !editorTabMovesFocus"
|
||||
},
|
||||
{
|
||||
"command": "fabelous-autocoder.handleBackspace",
|
||||
"key": "backspace",
|
||||
"when": "editorTextFocus"
|
||||
}
|
||||
],
|
||||
"commands": [
|
||||
{
|
||||
"command": "fabelous-autocoder.autocomplete",
|
||||
"title": "Fabelous Autocompletion"
|
||||
},
|
||||
{
|
||||
"command": "fabelous-autocoder.handleTab",
|
||||
"title": "Handle Tab"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scripts": {
|
||||
"vscode:prepublish": "npm run compile",
|
||||
"compile": "tsc --skipLibCheck -p ./",
|
||||
"package": "npm run compile && vsce package",
|
||||
"lint": "eslint \"src/**/*.ts\"",
|
||||
"watch": "tsc --skipLibCheck -watch -p ./"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.12.8",
|
||||
"@types/vscode": "^1.89.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.8.0",
|
||||
"@typescript-eslint/parser": "^7.8.0",
|
||||
"eslint": "^8.57.0",
|
||||
"typescript": "^5.4.5"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.6.8"
|
||||
}
|
||||
}
|
||||
"name": "fabelous-autocoder",
|
||||
"version": "0.1.7",
|
||||
"displayName": "Fabelous Autocoder",
|
||||
"description": "A simple to use Ollama autocompletion Plugin",
|
||||
"icon": "icon.png",
|
||||
"publisher": "fabel",
|
||||
"license": "CC BY-ND 4.0",
|
||||
"bugs": {
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder/issues"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitea.fabelous.app/fabel/Fabelous-Autocoder.git"
|
||||
},
|
||||
"engines": {
|
||||
"vscode": "^1.89.0"
|
||||
},
|
||||
"categories": [
|
||||
"Machine Learning",
|
||||
"Snippets",
|
||||
"Programming Languages"
|
||||
],
|
||||
"keywords": [
|
||||
"ollama",
|
||||
"coding",
|
||||
"autocomplete",
|
||||
"open source",
|
||||
"assistant",
|
||||
"ai",
|
||||
"llm"
|
||||
],
|
||||
"galleryBanner": {
|
||||
"color": "#133773"
|
||||
},
|
||||
"activationEvents": [
|
||||
"onStartupFinished"
|
||||
],
|
||||
"main": "./out/extension.js",
|
||||
"contributes": {
|
||||
"configuration": {
|
||||
"title": "Fabelous Autocoder",
|
||||
"properties": {
|
||||
"fabelous-autocoder.endpoint": {
|
||||
"type": "string",
|
||||
"default": "http://localhost:11434/api/generate",
|
||||
"description": "The endpoint of the ollama REST API"
|
||||
},
|
||||
"fabelous-autocoder.authentication": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Authorization Token for Ollama"
|
||||
},
|
||||
"fabelous-autocoder.model": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "The model to use for generating completions"
|
||||
},
|
||||
"fabelous-autocoder.max tokens predicted": {
|
||||
"type": "integer",
|
||||
"default": 1000,
|
||||
"description": "The maximum number of tokens generated by the model."
|
||||
},
|
||||
"fabelous-autocoder.prompt window size": {
|
||||
"type": "integer",
|
||||
"default": 2000,
|
||||
"description": "The size of the prompt in characters. NOT tokens, so can be set about 1.5-2x the max tokens of the model (varies)."
|
||||
},
|
||||
"fabelous-autocoder.completion keys": {
|
||||
"type": "string",
|
||||
"default": " ",
|
||||
"description": "Character that the autocompletion item provider appear on. Multiple characters will be treated as different entries. REQUIRES RELOAD"
|
||||
},
|
||||
"fabelous-autocoder.response preview": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Inline completion label will be the first line of response. Max is 10 tokens, but this is unlikely to be reached. If the first line is empty, the default label will be used. Not streamable, disable on slow devices."
|
||||
},
|
||||
"fabelous-autocoder.preview max tokens": {
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"description": "The maximum number of tokens generated by the model for the response preview. Typically not reached as the preview stops on newline. Recommended to keep very low due to computational cost."
|
||||
},
|
||||
"fabelous-autocoder.preview delay": {
|
||||
"type": "number",
|
||||
"default": 1,
|
||||
"description": "Time to wait in seconds before starting inline preview generation. Prevents Ollama server from running briefly every time the completion key is pressed, which causes unnecessary compute usage. If you are not on a battery powered device, set this to 0 for a more responsive experience."
|
||||
},
|
||||
"fabelous-autocoder.continue inline": {
|
||||
"type": "boolean",
|
||||
"default": true,
|
||||
"description": "Ollama continues autocompletion after what is previewed inline. Disabling disables that feature as some may find it irritating. Multiline completion is still accessible through the shortcut even after disabling."
|
||||
|
||||
},
|
||||
"fabelous-autocoder.temperature": {
|
||||
"type": "number",
|
||||
"default": 0.5,
|
||||
"description": "Temperature of the model. It is recommended to set it lower than you would for dialogue."
|
||||
},
|
||||
"fabelous-autocoder.keep alive": {
|
||||
"type": "number",
|
||||
"default": 10,
|
||||
"description": "Time in minutes before Ollama unloads the model."
|
||||
},
|
||||
"fabelous-autocoder.top p": {
|
||||
"type": "number",
|
||||
"description": "Top p sampling for the model."
|
||||
}
|
||||
}
|
||||
},
|
||||
"commands": [
|
||||
{
|
||||
"command": "fabelous-autocoder.autocomplete",
|
||||
"title": "Fabelous autocompletion"
|
||||
}
|
||||
]
|
||||
},
|
||||
"scripts": {
|
||||
"vscode:prepublish": "npm run compile",
|
||||
"compile": "tsc --skipLibCheck -p ./",
|
||||
"package": "npm run compile && vsce package",
|
||||
"lint": "eslint \"src/**/*.ts\"",
|
||||
"watch": "tsc --skipLibCheck -watch -p ./"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.12.8",
|
||||
"@types/vscode": "^1.89.0",
|
||||
"@typescript-eslint/eslint-plugin": "^7.8.0",
|
||||
"@typescript-eslint/parser": "^7.8.0",
|
||||
"eslint": "^8.57.0",
|
||||
"typescript": "^5.4.5"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.6.8"
|
||||
}
|
||||
}
|
466
src/extension.ts
466
src/extension.ts
|
@ -1,328 +1,206 @@
|
|||
import * as vscode from 'vscode';
|
||||
import axios from 'axios';
|
||||
import * as vscode from "vscode";
|
||||
import axios from "axios";
|
||||
|
||||
let config: {
|
||||
apiEndpoint: string;
|
||||
apiAuthentication: string;
|
||||
apiModel: string;
|
||||
apiTemperature: number;
|
||||
numPredict: number;
|
||||
promptWindowSize: number;
|
||||
completionKeys: string[];
|
||||
responsePreview: boolean;
|
||||
responsePreviewMaxTokens: number;
|
||||
responsePreviewDelay: number;
|
||||
continueInline: boolean;
|
||||
keepAlive: number;
|
||||
topP: number;
|
||||
};
|
||||
let VSConfig: vscode.WorkspaceConfiguration;
|
||||
let apiEndpoint: string;
|
||||
let apiAuthentication: string;
|
||||
let apiModel: string;
|
||||
let apiTemperature: number;
|
||||
let numPredict: number;
|
||||
let promptWindowSize: number;
|
||||
let completionKeys: string;
|
||||
let responsePreview: boolean | undefined;
|
||||
let responsePreviewMaxTokens: number;
|
||||
let responsePreviewDelay: number;
|
||||
let continueInline: boolean | undefined;
|
||||
let keepAlive: number | undefined;
|
||||
let topP: number | undefined;
|
||||
|
||||
let previewDecorationType: vscode.TextEditorDecorationType;
|
||||
let activeCompletionManager: CompletionManager | null = null;
|
||||
|
||||
function updateConfig() {
|
||||
const vsConfig = vscode.workspace.getConfiguration('fabelous-autocoder');
|
||||
config = {
|
||||
apiEndpoint: vsConfig.get('endpoint') || 'http://localhost:11434/api/generate',
|
||||
apiAuthentication: vsConfig.get('authentication') || '',
|
||||
apiModel: vsConfig.get('model') || 'fabelous-coder:latest',
|
||||
apiTemperature: vsConfig.get('temperature') || 0.7,
|
||||
numPredict: vsConfig.get('max tokens predicted') || 1000,
|
||||
promptWindowSize: vsConfig.get('prompt window size') || 2000,
|
||||
completionKeys: (vsConfig.get('completion keys') as string || ' ').split(''),
|
||||
responsePreview: vsConfig.get('response preview') || false,
|
||||
responsePreviewMaxTokens: vsConfig.get('preview max tokens') || 50,
|
||||
responsePreviewDelay: vsConfig.get('preview delay') || 0,
|
||||
continueInline: vsConfig.get('continue inline') || false,
|
||||
keepAlive: vsConfig.get('keep alive') || 30,
|
||||
topP: vsConfig.get('top p') || 1,
|
||||
};
|
||||
function updateVSConfig() {
|
||||
VSConfig = vscode.workspace.getConfiguration("fabelous-autocoder");
|
||||
apiEndpoint = VSConfig.get("endpoint") || "http://localhost:11434/api/generate";
|
||||
apiAuthentication = VSConfig.get("authentication") || "";
|
||||
apiModel = VSConfig.get("model") || "fabelous-coder:latest";
|
||||
numPredict = VSConfig.get("max tokens predicted") || 1000;
|
||||
promptWindowSize = VSConfig.get("prompt window size") || 2000;
|
||||
completionKeys = VSConfig.get("completion keys") || " ";
|
||||
responsePreview = VSConfig.get("response preview");
|
||||
responsePreviewMaxTokens = VSConfig.get("preview max tokens") || 50;
|
||||
responsePreviewDelay = VSConfig.get("preview delay") || 0;
|
||||
continueInline = VSConfig.get("continue inline");
|
||||
apiTemperature = VSConfig.get("temperature") || 0.7;
|
||||
keepAlive = VSConfig.get("keep alive") || 30;
|
||||
topP = VSConfig.get("top p") || 1;
|
||||
}
|
||||
|
||||
function createPreviewDecorationType() {
|
||||
previewDecorationType = vscode.window.createTextEditorDecorationType({
|
||||
after: {
|
||||
color: '#888888',
|
||||
fontStyle: 'italic',
|
||||
},
|
||||
textDecoration: 'none; display: none;',
|
||||
});
|
||||
}
|
||||
updateVSConfig();
|
||||
vscode.workspace.onDidChangeConfiguration(updateVSConfig);
|
||||
|
||||
function getContextLines(document: vscode.TextDocument, position: vscode.Position): string {
|
||||
const lines = [];
|
||||
const startLine = Math.max(0, position.line - 1);
|
||||
const endLine = position.line;
|
||||
return document.getText(new vscode.Range(startLine, 0, endLine, position.character));
|
||||
|
||||
for (let i = startLine; i <= endLine; i++) {
|
||||
lines.push(document.lineAt(i).text);
|
||||
}
|
||||
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
|
||||
function createFIMPrompt(prefix: string, language: string): string {
|
||||
return `<fim_prefix>${prefix}<fim_middle><fim_suffix>${language}\n`;
|
||||
}
|
||||
|
||||
async function generateCompletion(prompt: string, cancellationToken: vscode.CancellationToken): Promise<string> {
|
||||
const axiosCancelToken = new axios.CancelToken((c) => {
|
||||
cancellationToken.onCancellationRequested(() => c('Request cancelled'));
|
||||
});
|
||||
|
||||
const response = await axios.post(config.apiEndpoint, {
|
||||
model: config.apiModel,
|
||||
prompt: prompt,
|
||||
stream: false,
|
||||
raw: true,
|
||||
options: {
|
||||
num_predict: config.numPredict,
|
||||
temperature: config.apiTemperature,
|
||||
stop: ['<fim_suffix>', '```'],
|
||||
keep_alive: config.keepAlive,
|
||||
top_p: config.topP,
|
||||
}
|
||||
}, {
|
||||
cancelToken: axiosCancelToken,
|
||||
headers: {
|
||||
'Authorization': config.apiAuthentication
|
||||
}
|
||||
});
|
||||
async function autocompleteCommand(textEditor: vscode.TextEditor, cancellationToken?: vscode.CancellationToken) {
|
||||
const document = textEditor.document;
|
||||
const position = textEditor.selection.active;
|
||||
|
||||
return response.data.response.replace(/<fim_middle>|<fim_suffix>|<fim_prefix>/g, '').trim();
|
||||
}
|
||||
const context = getContextLines(document, position);
|
||||
|
||||
class CompletionManager {
|
||||
private textEditor: vscode.TextEditor;
|
||||
private document: vscode.TextDocument;
|
||||
private startPosition: vscode.Position;
|
||||
private completionText: string;
|
||||
private insertedLineCount: number = 0; // Track the number of inserted lines
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
|
||||
constructor(textEditor: vscode.TextEditor, startPosition: vscode.Position, completionText: string) {
|
||||
this.textEditor = textEditor;
|
||||
this.document = textEditor.document;
|
||||
this.startPosition = startPosition;
|
||||
this.completionText = completionText;
|
||||
}
|
||||
vscode.window.withProgress(
|
||||
{
|
||||
location: vscode.ProgressLocation.Notification,
|
||||
title: "Fabelous Autocoder",
|
||||
cancellable: true,
|
||||
},
|
||||
async (progress, progressCancellationToken) => {
|
||||
try {
|
||||
progress.report({ message: "Starting model..." });
|
||||
|
||||
public async showPreview() {
|
||||
if (!previewDecorationType) {
|
||||
createPreviewDecorationType();
|
||||
}
|
||||
|
||||
const completionLines = this.completionText.split('\n').length;
|
||||
|
||||
// Adjust the start position to line after the original start position
|
||||
const adjustedStartPosition = this.startPosition.translate(0, 0);
|
||||
|
||||
// Step 1: Insert blank lines to make space for the preview
|
||||
const edit = new vscode.WorkspaceEdit();
|
||||
const linePadding = '\n'.repeat(completionLines + 1); // Include extra line break for visual separation
|
||||
edit.insert(this.document.uri, adjustedStartPosition, linePadding);
|
||||
await vscode.workspace.applyEdit(edit);
|
||||
|
||||
this.insertedLineCount = completionLines + 1;
|
||||
|
||||
// Step 2: Apply decorations
|
||||
const previewRanges: vscode.DecorationOptions[] = this.completionText.split('\n').map((line, index) => {
|
||||
const lineNumber = adjustedStartPosition.line + index + 1; // Start preview one line later
|
||||
return {
|
||||
range: new vscode.Range(
|
||||
new vscode.Position(lineNumber, 0),
|
||||
new vscode.Position(lineNumber, 0)
|
||||
),
|
||||
renderOptions: {
|
||||
after: {
|
||||
contentText: line,
|
||||
color: '#888888',
|
||||
fontStyle: 'italic',
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
this.textEditor.setDecorations(previewDecorationType, previewRanges);
|
||||
}
|
||||
|
||||
let axiosCancelPost: () => void;
|
||||
const axiosCancelToken = new axios.CancelToken((c) => {
|
||||
axiosCancelPost = () => {
|
||||
c("Autocompletion request terminated by user cancel");
|
||||
};
|
||||
if (cancellationToken) cancellationToken.onCancellationRequested(axiosCancelPost);
|
||||
progressCancellationToken.onCancellationRequested(axiosCancelPost);
|
||||
vscode.workspace.onDidCloseTextDocument(axiosCancelPost);
|
||||
});
|
||||
|
||||
public async acceptCompletion() {
|
||||
const edit = new vscode.WorkspaceEdit();
|
||||
const completionLines = this.completionText.split('\n');
|
||||
const numberOfLines = completionLines.length;
|
||||
|
||||
// Ensure the start position is never negative
|
||||
const safeStartPosition = new vscode.Position(Math.max(0, this.startPosition.line - 1), 0);
|
||||
const response = await axios.post(apiEndpoint, {
|
||||
model: apiModel,
|
||||
prompt: fimPrompt,
|
||||
stream: false,
|
||||
raw: true,
|
||||
options: {
|
||||
num_predict: numPredict,
|
||||
temperature: apiTemperature,
|
||||
stop: ["<fim_suffix>", "```"]
|
||||
}
|
||||
}, {
|
||||
cancelToken: axiosCancelToken,
|
||||
headers: {
|
||||
'Authorization': apiAuthentication
|
||||
}
|
||||
});
|
||||
|
||||
// Prepare the range to replace
|
||||
const rangeToReplace = new vscode.Range(
|
||||
safeStartPosition,
|
||||
this.startPosition.translate(numberOfLines, 0)
|
||||
);
|
||||
progress.report({ message: "Generating..." });
|
||||
|
||||
// Construct the content to insert
|
||||
const contentToInsert = (safeStartPosition.line === 0 ? '' : '\n') + this.completionText + '\n';
|
||||
edit.replace(this.document.uri, rangeToReplace, contentToInsert);
|
||||
|
||||
await vscode.workspace.applyEdit(edit);
|
||||
|
||||
// Clear the preview decorations
|
||||
this.clearPreview();
|
||||
let completionText = response.data.response;
|
||||
// Remove any FIM tags and leading/trailing whitespace
|
||||
completionText = completionText.replace(/<fim_middle>|<fim_suffix>|<fim_prefix>/g, '').trim();
|
||||
|
||||
// Set activeCompletionManager to null
|
||||
activeCompletionManager = null;
|
||||
|
||||
// Calculate the new cursor position from the inserted content
|
||||
const lastCompletionLine = completionLines[completionLines.length - 1];
|
||||
const newPosition = new vscode.Position(
|
||||
this.startPosition.line + numberOfLines - 1,
|
||||
lastCompletionLine.length
|
||||
);
|
||||
|
||||
// Set the new cursor position
|
||||
this.textEditor.selection = new vscode.Selection(newPosition, newPosition);
|
||||
}
|
||||
|
||||
|
||||
|
||||
public clearPreview() {
|
||||
this.textEditor.setDecorations(previewDecorationType, []); // Remove all preview decorations
|
||||
}
|
||||
|
||||
public async declineCompletion() {
|
||||
this.clearPreview(); // Clear the preview decorations
|
||||
|
||||
try {
|
||||
const document = this.textEditor.document;
|
||||
const currentPosition = this.textEditor.selection.active;
|
||||
|
||||
// Calculate the range of lines to remove
|
||||
const startLine = this.startPosition.line + 1;
|
||||
const endLine = currentPosition.line;
|
||||
if (endLine > startLine) {
|
||||
const workspaceEdit = new vscode.WorkspaceEdit();
|
||||
|
||||
// Create a range from start of startLine to end of endLine
|
||||
const range = new vscode.Range(
|
||||
// Remove the context lines
|
||||
const startLine = Math.max(0, position.line - 1);
|
||||
const endLine = position.line;
|
||||
const rangeToReplace = new vscode.Range(
|
||||
new vscode.Position(startLine, 0),
|
||||
new vscode.Position(endLine, document.lineAt(endLine).text.length)
|
||||
);
|
||||
|
||||
// Delete the range
|
||||
workspaceEdit.delete(document.uri, range);
|
||||
|
||||
|
||||
// Apply the edit
|
||||
await vscode.workspace.applyEdit(workspaceEdit);
|
||||
|
||||
// Move the cursor back to the original position
|
||||
this.textEditor.selection = new vscode.Selection(this.startPosition, this.startPosition);
|
||||
|
||||
console.log(`Lines ${startLine + 1} to ${endLine + 1} removed successfully`);
|
||||
activeCompletionManager = null;
|
||||
} else {
|
||||
console.log('No lines to remove');
|
||||
const edit = new vscode.WorkspaceEdit();
|
||||
edit.replace(document.uri, rangeToReplace, completionText);
|
||||
await vscode.workspace.applyEdit(edit);
|
||||
|
||||
// Move the cursor to the end of the inserted text
|
||||
const newPosition = new vscode.Position(startLine + completionText.split('\n').length - 1, completionText.split('\n').pop()!.length);
|
||||
textEditor.selection = new vscode.Selection(newPosition, newPosition);
|
||||
|
||||
progress.report({ message: "Fabelous completion finished." });
|
||||
|
||||
} catch (err: any) {
|
||||
vscode.window.showErrorMessage(
|
||||
"Fabelous Autocoder encountered an error: " + err.message
|
||||
);
|
||||
console.log(err);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error declining completion:', error);
|
||||
vscode.window.showErrorMessage(`Error removing lines: ${error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function autocompleteCommand(textEditor: vscode.TextEditor, edit: vscode.TextEditorEdit, ...args: any[]) {
|
||||
const cancellationTokenSource = new vscode.CancellationTokenSource();
|
||||
const cancellationToken = cancellationTokenSource.token;
|
||||
|
||||
try {
|
||||
const document = textEditor.document;
|
||||
const position = textEditor.selection.active;
|
||||
const context = getContextLines(document, position);
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
|
||||
const completionText = await vscode.window.withProgress({
|
||||
location: vscode.ProgressLocation.Notification,
|
||||
title: 'Fabelous Autocoder',
|
||||
cancellable: true,
|
||||
}, async (progress, progressCancellationToken) => {
|
||||
progress.report({ message: 'Generating...' });
|
||||
return await generateCompletion(fimPrompt, progressCancellationToken);
|
||||
});
|
||||
|
||||
console.log('Completion generated:', completionText);
|
||||
|
||||
|
||||
const completionManager = new CompletionManager(textEditor, position, completionText);
|
||||
await completionManager.showPreview();
|
||||
activeCompletionManager = completionManager;
|
||||
|
||||
|
||||
} catch (err: any) {
|
||||
console.error('Error in autocompleteCommand:', err);
|
||||
vscode.window.showErrorMessage(`Fabelous Autocoder encountered an error: ${err.message}`);
|
||||
} finally {
|
||||
cancellationTokenSource.dispose();
|
||||
}
|
||||
}
|
||||
|
||||
async function handleTab() {
|
||||
if (activeCompletionManager) {
|
||||
await activeCompletionManager.acceptCompletion();
|
||||
} else {
|
||||
await vscode.commands.executeCommand('tab');
|
||||
}
|
||||
}
|
||||
|
||||
async function handleBackspace() {
|
||||
if (activeCompletionManager) {
|
||||
await activeCompletionManager.declineCompletion();
|
||||
} else {
|
||||
await vscode.commands.executeCommand('deleteLeft');
|
||||
}
|
||||
}
|
||||
|
||||
async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) {
|
||||
const item = new vscode.CompletionItem('Fabelous autocompletion');
|
||||
item.insertText = new vscode.SnippetString('${1:}');
|
||||
item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Fabelous Autocoder');
|
||||
|
||||
if (config.responsePreview) {
|
||||
await new Promise(resolve => setTimeout(resolve, config.responsePreviewDelay * 1000));
|
||||
if (cancellationToken.isCancellationRequested) {
|
||||
return [item];
|
||||
}
|
||||
|
||||
const context = getContextLines(document, position);
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
|
||||
try {
|
||||
const result = await generateCompletion(fimPrompt, cancellationToken);
|
||||
const preview = (result as any).preview;
|
||||
if (preview) {
|
||||
item.detail = preview.split('\n')[0];
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error fetching preview:', error);
|
||||
}
|
||||
}
|
||||
|
||||
if (config.continueInline || !config.responsePreview) {
|
||||
item.command = {
|
||||
command: 'fabelous-autocoder.autocomplete',
|
||||
title: 'Fabelous Autocomplete',
|
||||
arguments: []
|
||||
};
|
||||
}
|
||||
|
||||
return [item];
|
||||
}
|
||||
export function activate(context: vscode.ExtensionContext) {
|
||||
updateConfig();
|
||||
createPreviewDecorationType();
|
||||
|
||||
context.subscriptions.push(
|
||||
vscode.workspace.onDidChangeConfiguration(updateConfig),
|
||||
vscode.languages.registerCompletionItemProvider('*', { provideCompletionItems }, ...config.completionKeys),
|
||||
vscode.commands.registerTextEditorCommand('fabelous-autocoder.autocomplete', autocompleteCommand),
|
||||
vscode.commands.registerCommand('fabelous-autocoder.handleTab', handleTab),
|
||||
vscode.commands.registerCommand('fabelous-autocoder.handleBackspace', handleBackspace) // Add this line
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
async function provideCompletionItems(document: vscode.TextDocument, position: vscode.Position, cancellationToken: vscode.CancellationToken) {
|
||||
const item = new vscode.CompletionItem("Fabelous autocompletion");
|
||||
item.insertText = new vscode.SnippetString('${1:}');
|
||||
|
||||
if (responsePreview) {
|
||||
await new Promise(resolve => setTimeout(resolve, responsePreviewDelay * 1000));
|
||||
if (cancellationToken.isCancellationRequested) {
|
||||
return [ item ];
|
||||
}
|
||||
|
||||
export function deactivate() {}
|
||||
const context = getContextLines(document, position);
|
||||
const fimPrompt = createFIMPrompt(context, document.languageId);
|
||||
|
||||
try {
|
||||
const response_preview = await axios.post(apiEndpoint, {
|
||||
model: apiModel,
|
||||
prompt: fimPrompt,
|
||||
stream: false,
|
||||
raw: true,
|
||||
options: {
|
||||
num_predict: responsePreviewMaxTokens,
|
||||
temperature: apiTemperature,
|
||||
stop: ['<fim_suffix>', '\n', '```'],
|
||||
...(keepAlive && { keep_alive: keepAlive }),
|
||||
...(topP && { top_p: topP }),
|
||||
}
|
||||
}, {
|
||||
cancelToken: new axios.CancelToken((c) => {
|
||||
cancellationToken.onCancellationRequested(() => c("Autocompletion request terminated by completion cancel"));
|
||||
})
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error fetching preview:", error);
|
||||
}
|
||||
}
|
||||
|
||||
item.documentation = new vscode.MarkdownString('Press `Enter` to get an autocompletion from Fabelous Autocoder');
|
||||
if (continueInline || !responsePreview) {
|
||||
item.command = {
|
||||
command: 'fabelous-autocoder.autocomplete',
|
||||
title: 'Fabelous Autocomplete',
|
||||
arguments: [cancellationToken]
|
||||
};
|
||||
}
|
||||
return [item];
|
||||
}
|
||||
|
||||
function activate(context: vscode.ExtensionContext) {
|
||||
const completionProvider = vscode.languages.registerCompletionItemProvider("*", {
|
||||
provideCompletionItems
|
||||
},
|
||||
...completionKeys.split("")
|
||||
);
|
||||
const externalAutocompleteCommand = vscode.commands.registerTextEditorCommand(
|
||||
"fabelous-autocoder.autocomplete",
|
||||
(textEditor, _, cancellationToken?) => {
|
||||
autocompleteCommand(textEditor, cancellationToken);
|
||||
}
|
||||
);
|
||||
context.subscriptions.push(completionProvider);
|
||||
context.subscriptions.push(externalAutocompleteCommand);
|
||||
}
|
||||
|
||||
function deactivate() { }
|
||||
|
||||
module.exports = {
|
||||
activate,
|
||||
deactivate,
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue