From 4dc355b2cb0d297bad5b93e4b876a217774feb70 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 19 May 2024 14:03:20 +0200 Subject: [PATCH] fixed bug related to not given config --- project.py | 24 ++++++++---------------- scripts/BaseOllama.py | 5 ++++- test_project.py | 36 ++++++++++++------------------------ 3 files changed, 24 insertions(+), 41 deletions(-) diff --git a/project.py b/project.py index 475b707..9f51f75 100644 --- a/project.py +++ b/project.py @@ -31,14 +31,9 @@ def main(): elif config["mode"] == "terminal": handle_terminal(args) elif config["mode"] == "gui": - try: - config["ollamaConfig"]["base_header"] = json.loads(config["ollamaConfig"]["base_header"]) - config["ollamaConfig"]["embeddings_header"] = json.loads(config["ollamaConfig"]["embeddings_header"]) - except json.decoder.JSONDecodeError: - """can be ignored if no header needed""" - pass # start gui try: + print(config["ollamaConfig"]["embeddings_header"] ) gui = ChatGUI(**config["ollamaConfig"]) gui.mainloop() except TypeError: @@ -58,11 +53,14 @@ def configure(): embeddings_url = input("Enter embeddings URL (standard: http://localhost:11434): ") or "http://localhost:11434" base_model = input("Enter base model (standard: 'mistral'): ") or "mistral" embeddings_model = input("Enter embeddings model (standard: 'mxbai-embed-large'): ") or "mxbai-embed-large" - base_header = input("Authentication for base model (standard: empty): ") or "" - embeddings_header = input("Authentication for embeddings model (standard: empty): ") or "" + base_header_key = input("Authentication Key for base model (standard: empty): ") or "" + base_header_value = input("Authentication Value for base model (standard: empty): ") or "" + embeddings_header_key = input("Authentication Key for embeddings model (standard: empty): ") or "" + embeddings_header_value = input("Authentication Value for embeddings model (standard: empty): ") or "" - return {"mode": mode, "ollamaConfig":{ "base_url": base_llm_url, "embeddings_url": embeddings_url, "base_model": base_model, - "embeddings_model": embeddings_model, "base_header": base_header, "embeddings_header": embeddings_header}} + return {"mode": mode, "ollamaConfig":{"base_url": base_llm_url, "embeddings_url": embeddings_url, "base_model": base_model, + "embeddings_model": embeddings_model, "base_header":{base_header_key: base_header_value} + ,"embeddings_header" :{embeddings_header_key: embeddings_header_value}}} def read_config(): if not os.path.exists(CONFIG_FILE): @@ -88,12 +86,6 @@ def handle_change_mode(args): def handle_terminal(args): config = read_config() - try: - config["ollamaConfig"]["base_header"] = json.loads(config["ollamaConfig"]["base_header"]) - config["ollamaConfig"]["embeddings_header"] = json.loads(config["ollamaConfig"]["embeddings_header"]) - except json.decoder.JSONDecodeError: - """can be ignored if no header needed""" - pass if args.p: try: diff --git a/scripts/BaseOllama.py b/scripts/BaseOllama.py index 4402669..1eb6ed9 100644 --- a/scripts/BaseOllama.py +++ b/scripts/BaseOllama.py @@ -27,4 +27,7 @@ class OllamaChatBot: messanges = messanges[:5] else: messanges = self.messanges - return self.ollama.invoke(messanges).content + try: + return self.ollama.invoke(messanges).content + except ValueError: + return "An unexpected Error occuried" diff --git a/test_project.py b/test_project.py index 05ef5bd..0e951c3 100644 --- a/test_project.py +++ b/test_project.py @@ -10,17 +10,9 @@ CONFIG_FILE = 'tests/config.json' def setup_config(): """Fixture to create a dummy config file before each test and remove it after.""" # Create the config file - initial_config = { - "mode": "terminal", - "ollamaConfig": { - "base_url": "http://localhost:11434", - "embeddings_url": "http://localhost:11434", - "base_model": "mistral", - "embeddings_model": "mxbai-embed-large", - "base_header": "", - "embeddings_header": "" - } - } + initial_config = {"mode": "terminal", + "ollamaConfig":{"base_url": 'https://ai.fabelous.app/v1/ollama/generic', "embeddings_url": 'http://localhost:11434', "base_model": 'mistral', + "embeddings_model": 'mxbai-embed-large', "base_header":{'': ''},"" :{'': ''}}} with open(CONFIG_FILE, 'w') as f: json.dump(initial_config, f) @@ -38,8 +30,10 @@ def test_configure(monkeypatch): 'http://localhost:11434', # Embeddings URL 'mistral', # Base model 'mxbai-embed-large', # Embeddings model - '{"Authorization": "Token xzy"}', # Base header for authentication - '{"Authorization": "Token xzy"}', # Embeddings header for authentication + 'Authorization', # Base Model authentication key + 'Token xzy', # Base Model authentication value + 'Authorization', # Embeddings key for authentication + 'Token xzy'# Embeddings value for authentication ]) monkeypatch.setattr('builtins.input', lambda _: next(inputs)) @@ -47,17 +41,11 @@ def test_configure(monkeypatch): config = configure() # Expected configurations based on the inputs - expected_config = { - "mode": "terminal", - "ollamaConfig": { - "base_url": "https://ai.fabelous.app/v1/ollama/generic", - "embeddings_url": "http://localhost:11434", - "base_model": "mistral", - "embeddings_model": "mxbai-embed-large", - "base_header": '{"Authorization": "Token xzy"}', - "embeddings_header": '{"Authorization": "Token xzy"}' - } - } + expected_config = {"mode": "terminal", + "ollamaConfig":{"base_url": 'https://ai.fabelous.app/v1/ollama/generic', "embeddings_url": 'http://localhost:11434', "base_model": 'mistral', + "embeddings_model": 'mxbai-embed-large', "base_header":{'Authorization': 'Token xzy'} + ,"embeddings_header" :{'Authorization': 'Token xzy'}}} + assert config['mode'] == expected_config['mode'], "Mode configuration does not match." assert config['ollamaConfig'] == expected_config['ollamaConfig'], "OllamaConfig does not match."