Compare commits

..

No commits in common. "0d1153503fe61f8082fa50d18471c88a3cfeae89" and "6a9f4ce20382ef1256dfc2433c0d236ecdb78d96" have entirely different histories.

6 changed files with 72 additions and 84 deletions

View File

@ -5,11 +5,7 @@
"embeddings_url": "http://localhost:11434", "embeddings_url": "http://localhost:11434",
"base_model": "mistral", "base_model": "mistral",
"embeddings_model": "mxbai-embed-large", "embeddings_model": "mxbai-embed-large",
"base_header": { "base_header": "",
"": "" "embeddings_header": ""
},
"embeddings_header": {
"": ""
}
} }
} }

View File

@ -31,9 +31,14 @@ def main():
elif config["mode"] == "terminal": elif config["mode"] == "terminal":
handle_terminal(args) handle_terminal(args)
elif config["mode"] == "gui": elif config["mode"] == "gui":
try:
config["ollamaConfig"]["base_header"] = json.loads(config["ollamaConfig"]["base_header"])
config["ollamaConfig"]["embeddings_header"] = json.loads(config["ollamaConfig"]["embeddings_header"])
except json.decoder.JSONDecodeError:
"""can be ignored if no header needed"""
pass
# start gui # start gui
try: try:
print(config["ollamaConfig"]["embeddings_header"] )
gui = ChatGUI(**config["ollamaConfig"]) gui = ChatGUI(**config["ollamaConfig"])
gui.mainloop() gui.mainloop()
except TypeError: except TypeError:
@ -53,14 +58,11 @@ def configure():
embeddings_url = input("Enter embeddings URL (standard: http://localhost:11434): ") or "http://localhost:11434" embeddings_url = input("Enter embeddings URL (standard: http://localhost:11434): ") or "http://localhost:11434"
base_model = input("Enter base model (standard: 'mistral'): ") or "mistral" base_model = input("Enter base model (standard: 'mistral'): ") or "mistral"
embeddings_model = input("Enter embeddings model (standard: 'mxbai-embed-large'): ") or "mxbai-embed-large" embeddings_model = input("Enter embeddings model (standard: 'mxbai-embed-large'): ") or "mxbai-embed-large"
base_header_key = input("Authentication Key for base model (standard: empty): ") or "" base_header = input("Authentication for base model (standard: empty): ") or ""
base_header_value = input("Authentication Value for base model (standard: empty): ") or "" embeddings_header = input("Authentication for embeddings model (standard: empty): ") or ""
embeddings_header_key = input("Authentication Key for embeddings model (standard: empty): ") or ""
embeddings_header_value = input("Authentication Value for embeddings model (standard: empty): ") or ""
return {"mode": mode, "ollamaConfig":{"base_url": base_llm_url, "embeddings_url": embeddings_url, "base_model": base_model, return {"mode": mode, "ollamaConfig":{ "base_url": base_llm_url, "embeddings_url": embeddings_url, "base_model": base_model,
"embeddings_model": embeddings_model, "base_header":{base_header_key: base_header_value} "embeddings_model": embeddings_model, "base_header": base_header, "embeddings_header": embeddings_header}}
,"embeddings_header" :{embeddings_header_key: embeddings_header_value}}}
def read_config(): def read_config():
if not os.path.exists(CONFIG_FILE): if not os.path.exists(CONFIG_FILE):
@ -86,6 +88,12 @@ def handle_change_mode(args):
def handle_terminal(args): def handle_terminal(args):
config = read_config() config = read_config()
try:
config["ollamaConfig"]["base_header"] = json.loads(config["ollamaConfig"]["base_header"])
config["ollamaConfig"]["embeddings_header"] = json.loads(config["ollamaConfig"]["embeddings_header"])
except json.decoder.JSONDecodeError:
"""can be ignored if no header needed"""
pass
if args.p: if args.p:
try: try:

View File

@ -4,10 +4,7 @@ class OllamaChatBot:
def __init__(self, base_url, model, headers): def __init__(self, base_url, model, headers):
self.base_url = base_url self.base_url = base_url
self.model = model self.model = model
if self.is_empty(headers): self.headers = headers
self.headers = ""
else:
self.headers = headers
self.messanges = [] self.messanges = []
if headers is None: if headers is None:
@ -21,10 +18,6 @@ class OllamaChatBot:
model=self.model, model=self.model,
headers = self.headers headers = self.headers
) )
def is_empty(self, dictionary):
return len(dictionary) == 1 and list(dictionary.keys())[0] == '' and list(dictionary.values())[0] == ''
def get_request(self, prompt): def get_request(self, prompt):
@ -34,7 +27,4 @@ class OllamaChatBot:
messanges = messanges[:5] messanges = messanges[:5]
else: else:
messanges = self.messanges messanges = self.messanges
try: return self.ollama.invoke(messanges).content
return self.ollama.invoke(messanges).content
except ValueError:
return "An unexpected Error occuried"

View File

@ -25,22 +25,19 @@ class ChatGUI(CTk.CTk):
self.start_message_processing_thread() self.start_message_processing_thread()
def get_response_from_ollama(self, prompt, context): def get_response_from_ollama(self, prompt, context):
try: if context != "":
if context != "": if self.context != context:
if self.context != context: checks = self.rag.receive_data(file_path=context)
checks = self.rag.receive_data(file_path=context) if checks[0]:
if checks[0]: return checks[1]
return checks[1] else:
else: self.context = context
self.context = context self.rag.init_ollama()
self.rag.init_ollama()
return self.rag.get_request(prompt=prompt)
return self.rag.get_request(prompt=prompt)
else: else:
return self.bot.get_request(prompt=prompt) return self.bot.get_request(prompt=prompt)
except ValueError:
return "An unexpected Error occuried"
def on_send(self, event=None): def on_send(self, event=None):
message = self.entry_bar.get().strip() message = self.entry_bar.get().strip()
@ -68,8 +65,7 @@ class ChatGUI(CTk.CTk):
def select_file(self): def select_file(self):
file_path = filedialog.askopenfilename() file_path = filedialog.askopenfilename()
self.file_entry.delete(0, "end") self.file_entry.insert(1, file_path)
self.file_entry.insert(0, file_path)
def create_widgets(self): def create_widgets(self):
self.geometry("900x600") self.geometry("900x600")
@ -113,8 +109,6 @@ class ChatGUI(CTk.CTk):
for message in self.history: for message in self.history:
message.pack_forget() message.pack_forget()
self.history = [] self.history = []
self.bot.messanges = []
self.rag.init_ollama()

View File

@ -6,6 +6,8 @@ from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma from langchain_community.vectorstores import Chroma
from langchain_community.chat_models import ChatOllama from langchain_community.chat_models import ChatOllama
from langchain.chains import RetrievalQA from langchain.chains import RetrievalQA
from pathlib import Path
import json
@ -19,15 +21,8 @@ class Rag:
self.base_url_llm = base_url_llm self.base_url_llm = base_url_llm
self.base_url_embed = base_url_embed self.base_url_embed = base_url_embed
self.base_header = base_header
if self.is_empty(base_header): self.embeddings_header = embeddings_header
self.base_header = ""
else:
self.base_header = base_header
if self.is_empty(embeddings_header):
self.embeddings_header = ""
else:
self.embeddings_header = embeddings_header
self.embeddings = OllamaEmbeddings(model=embeddings, headers=self.embeddings_header, base_url=self.base_url_embed) self.embeddings = OllamaEmbeddings(model=embeddings, headers=self.embeddings_header, base_url=self.base_url_embed)
def init_ollama(self): def init_ollama(self):
@ -54,6 +49,8 @@ class Rag:
case "html": # Corrected the typo in the variable name case "html": # Corrected the typo in the variable name
loader = UnstructuredHTMLLoader(file_path=file_path) loader = UnstructuredHTMLLoader(file_path=file_path)
data = loader.load() data = loader.load()
case "json":
data = json.loads(Path(file_path).read_text())
case "md": case "md":
loader = UnstructuredMarkdownLoader(file_path=file_path) loader = UnstructuredMarkdownLoader(file_path=file_path)
data = loader.load() data = loader.load()
@ -70,26 +67,17 @@ class Rag:
return True return True
def is_empty(self, dictionary):
return len(dictionary) == 1 and list(dictionary.keys())[0] == '' and list(dictionary.values())[0] == ''
def receive_data(self, file_path): def receive_data(self, file_path):
try: if self.get_file(file_path):
if self.get_file(file_path): text_splitter = RecursiveCharacterTextSplitter(chunk_size=250, chunk_overlap=0)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=250, chunk_overlap=0) splitted = text_splitter.split_documents(self.data)
splitted = text_splitter.split_documents(self.data) self.retriever = Chroma.from_documents(documents=splitted, embedding=self.embeddings).as_retriever()
self.retriever = Chroma.from_documents(documents=splitted, embedding=self.embeddings).as_retriever() return (False, "Success")
return (False, "Success") else:
else: return (True, f"'{file_path}' unsupported, read documentation for more information")
return (True, f"'{file_path}' unsupported, read documentation for more information")
except (ValueError, AttributeError):
return (True, "An unexpected Error occuried")
def get_request(self, prompt): def get_request(self, prompt):
qachain=RetrievalQA.from_chain_type(self.chat_ollama, retriever=self.retriever) qachain=RetrievalQA.from_chain_type(self.chat_ollama, retriever=self.retriever)
try: return qachain.invoke({"query": prompt})["result"]
return qachain.invoke({"query": prompt})["result"]
except ValueError:
return (True, "An unexpected Error occuried")

View File

@ -10,9 +10,17 @@ CONFIG_FILE = 'tests/config.json'
def setup_config(): def setup_config():
"""Fixture to create a dummy config file before each test and remove it after.""" """Fixture to create a dummy config file before each test and remove it after."""
# Create the config file # Create the config file
initial_config = {"mode": "terminal", initial_config = {
"ollamaConfig":{"base_url": 'https://ai.fabelous.app/v1/ollama/generic', "embeddings_url": 'http://localhost:11434', "base_model": 'mistral', "mode": "terminal",
"embeddings_model": 'mxbai-embed-large', "base_header":{'': ''},"" :{'': ''}}} "ollamaConfig": {
"base_url": "http://localhost:11434",
"embeddings_url": "http://localhost:11434",
"base_model": "mistral",
"embeddings_model": "mxbai-embed-large",
"base_header": "",
"embeddings_header": ""
}
}
with open(CONFIG_FILE, 'w') as f: with open(CONFIG_FILE, 'w') as f:
json.dump(initial_config, f) json.dump(initial_config, f)
@ -30,10 +38,8 @@ def test_configure(monkeypatch):
'http://localhost:11434', # Embeddings URL 'http://localhost:11434', # Embeddings URL
'mistral', # Base model 'mistral', # Base model
'mxbai-embed-large', # Embeddings model 'mxbai-embed-large', # Embeddings model
'Authorization', # Base Model authentication key '{"Authorization": "Token xzy"}', # Base header for authentication
'Token xzy', # Base Model authentication value '{"Authorization": "Token xzy"}', # Embeddings header for authentication
'Authorization', # Embeddings key for authentication
'Token xzy'# Embeddings value for authentication
]) ])
monkeypatch.setattr('builtins.input', lambda _: next(inputs)) monkeypatch.setattr('builtins.input', lambda _: next(inputs))
@ -41,11 +47,17 @@ def test_configure(monkeypatch):
config = configure() config = configure()
# Expected configurations based on the inputs # Expected configurations based on the inputs
expected_config = {"mode": "terminal", expected_config = {
"ollamaConfig":{"base_url": 'https://ai.fabelous.app/v1/ollama/generic', "embeddings_url": 'http://localhost:11434', "base_model": 'mistral', "mode": "terminal",
"embeddings_model": 'mxbai-embed-large', "base_header":{'Authorization': 'Token xzy'} "ollamaConfig": {
,"embeddings_header" :{'Authorization': 'Token xzy'}}} "base_url": "https://ai.fabelous.app/v1/ollama/generic",
"embeddings_url": "http://localhost:11434",
"base_model": "mistral",
"embeddings_model": "mxbai-embed-large",
"base_header": '{"Authorization": "Token xzy"}',
"embeddings_header": '{"Authorization": "Token xzy"}'
}
}
assert config['mode'] == expected_config['mode'], "Mode configuration does not match." assert config['mode'] == expected_config['mode'], "Mode configuration does not match."
assert config['ollamaConfig'] == expected_config['ollamaConfig'], "OllamaConfig does not match." assert config['ollamaConfig'] == expected_config['ollamaConfig'], "OllamaConfig does not match."