Compare commits
No commits in common. "0d1153503fe61f8082fa50d18471c88a3cfeae89" and "6a9f4ce20382ef1256dfc2433c0d236ecdb78d96" have entirely different histories.
0d1153503f
...
6a9f4ce203
|
@ -5,11 +5,7 @@
|
||||||
"embeddings_url": "http://localhost:11434",
|
"embeddings_url": "http://localhost:11434",
|
||||||
"base_model": "mistral",
|
"base_model": "mistral",
|
||||||
"embeddings_model": "mxbai-embed-large",
|
"embeddings_model": "mxbai-embed-large",
|
||||||
"base_header": {
|
"base_header": "",
|
||||||
"": ""
|
"embeddings_header": ""
|
||||||
},
|
|
||||||
"embeddings_header": {
|
|
||||||
"": ""
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
22
project.py
22
project.py
|
@ -31,9 +31,14 @@ def main():
|
||||||
elif config["mode"] == "terminal":
|
elif config["mode"] == "terminal":
|
||||||
handle_terminal(args)
|
handle_terminal(args)
|
||||||
elif config["mode"] == "gui":
|
elif config["mode"] == "gui":
|
||||||
|
try:
|
||||||
|
config["ollamaConfig"]["base_header"] = json.loads(config["ollamaConfig"]["base_header"])
|
||||||
|
config["ollamaConfig"]["embeddings_header"] = json.loads(config["ollamaConfig"]["embeddings_header"])
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
"""can be ignored if no header needed"""
|
||||||
|
pass
|
||||||
# start gui
|
# start gui
|
||||||
try:
|
try:
|
||||||
print(config["ollamaConfig"]["embeddings_header"] )
|
|
||||||
gui = ChatGUI(**config["ollamaConfig"])
|
gui = ChatGUI(**config["ollamaConfig"])
|
||||||
gui.mainloop()
|
gui.mainloop()
|
||||||
except TypeError:
|
except TypeError:
|
||||||
|
@ -53,14 +58,11 @@ def configure():
|
||||||
embeddings_url = input("Enter embeddings URL (standard: http://localhost:11434): ") or "http://localhost:11434"
|
embeddings_url = input("Enter embeddings URL (standard: http://localhost:11434): ") or "http://localhost:11434"
|
||||||
base_model = input("Enter base model (standard: 'mistral'): ") or "mistral"
|
base_model = input("Enter base model (standard: 'mistral'): ") or "mistral"
|
||||||
embeddings_model = input("Enter embeddings model (standard: 'mxbai-embed-large'): ") or "mxbai-embed-large"
|
embeddings_model = input("Enter embeddings model (standard: 'mxbai-embed-large'): ") or "mxbai-embed-large"
|
||||||
base_header_key = input("Authentication Key for base model (standard: empty): ") or ""
|
base_header = input("Authentication for base model (standard: empty): ") or ""
|
||||||
base_header_value = input("Authentication Value for base model (standard: empty): ") or ""
|
embeddings_header = input("Authentication for embeddings model (standard: empty): ") or ""
|
||||||
embeddings_header_key = input("Authentication Key for embeddings model (standard: empty): ") or ""
|
|
||||||
embeddings_header_value = input("Authentication Value for embeddings model (standard: empty): ") or ""
|
|
||||||
|
|
||||||
return {"mode": mode, "ollamaConfig":{ "base_url": base_llm_url, "embeddings_url": embeddings_url, "base_model": base_model,
|
return {"mode": mode, "ollamaConfig":{ "base_url": base_llm_url, "embeddings_url": embeddings_url, "base_model": base_model,
|
||||||
"embeddings_model": embeddings_model, "base_header":{base_header_key: base_header_value}
|
"embeddings_model": embeddings_model, "base_header": base_header, "embeddings_header": embeddings_header}}
|
||||||
,"embeddings_header" :{embeddings_header_key: embeddings_header_value}}}
|
|
||||||
|
|
||||||
def read_config():
|
def read_config():
|
||||||
if not os.path.exists(CONFIG_FILE):
|
if not os.path.exists(CONFIG_FILE):
|
||||||
|
@ -86,6 +88,12 @@ def handle_change_mode(args):
|
||||||
|
|
||||||
def handle_terminal(args):
|
def handle_terminal(args):
|
||||||
config = read_config()
|
config = read_config()
|
||||||
|
try:
|
||||||
|
config["ollamaConfig"]["base_header"] = json.loads(config["ollamaConfig"]["base_header"])
|
||||||
|
config["ollamaConfig"]["embeddings_header"] = json.loads(config["ollamaConfig"]["embeddings_header"])
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
"""can be ignored if no header needed"""
|
||||||
|
pass
|
||||||
|
|
||||||
if args.p:
|
if args.p:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -4,9 +4,6 @@ class OllamaChatBot:
|
||||||
def __init__(self, base_url, model, headers):
|
def __init__(self, base_url, model, headers):
|
||||||
self.base_url = base_url
|
self.base_url = base_url
|
||||||
self.model = model
|
self.model = model
|
||||||
if self.is_empty(headers):
|
|
||||||
self.headers = ""
|
|
||||||
else:
|
|
||||||
self.headers = headers
|
self.headers = headers
|
||||||
self.messanges = []
|
self.messanges = []
|
||||||
|
|
||||||
|
@ -22,10 +19,6 @@ class OllamaChatBot:
|
||||||
headers = self.headers
|
headers = self.headers
|
||||||
)
|
)
|
||||||
|
|
||||||
def is_empty(self, dictionary):
|
|
||||||
return len(dictionary) == 1 and list(dictionary.keys())[0] == '' and list(dictionary.values())[0] == ''
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_request(self, prompt):
|
def get_request(self, prompt):
|
||||||
messanges = []
|
messanges = []
|
||||||
|
@ -34,7 +27,4 @@ class OllamaChatBot:
|
||||||
messanges = messanges[:5]
|
messanges = messanges[:5]
|
||||||
else:
|
else:
|
||||||
messanges = self.messanges
|
messanges = self.messanges
|
||||||
try:
|
|
||||||
return self.ollama.invoke(messanges).content
|
return self.ollama.invoke(messanges).content
|
||||||
except ValueError:
|
|
||||||
return "An unexpected Error occuried"
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ class ChatGUI(CTk.CTk):
|
||||||
self.start_message_processing_thread()
|
self.start_message_processing_thread()
|
||||||
|
|
||||||
def get_response_from_ollama(self, prompt, context):
|
def get_response_from_ollama(self, prompt, context):
|
||||||
try:
|
|
||||||
if context != "":
|
if context != "":
|
||||||
if self.context != context:
|
if self.context != context:
|
||||||
checks = self.rag.receive_data(file_path=context)
|
checks = self.rag.receive_data(file_path=context)
|
||||||
|
@ -39,8 +38,6 @@ class ChatGUI(CTk.CTk):
|
||||||
|
|
||||||
else:
|
else:
|
||||||
return self.bot.get_request(prompt=prompt)
|
return self.bot.get_request(prompt=prompt)
|
||||||
except ValueError:
|
|
||||||
return "An unexpected Error occuried"
|
|
||||||
|
|
||||||
def on_send(self, event=None):
|
def on_send(self, event=None):
|
||||||
message = self.entry_bar.get().strip()
|
message = self.entry_bar.get().strip()
|
||||||
|
@ -68,8 +65,7 @@ class ChatGUI(CTk.CTk):
|
||||||
|
|
||||||
def select_file(self):
|
def select_file(self):
|
||||||
file_path = filedialog.askopenfilename()
|
file_path = filedialog.askopenfilename()
|
||||||
self.file_entry.delete(0, "end")
|
self.file_entry.insert(1, file_path)
|
||||||
self.file_entry.insert(0, file_path)
|
|
||||||
|
|
||||||
def create_widgets(self):
|
def create_widgets(self):
|
||||||
self.geometry("900x600")
|
self.geometry("900x600")
|
||||||
|
@ -113,8 +109,6 @@ class ChatGUI(CTk.CTk):
|
||||||
for message in self.history:
|
for message in self.history:
|
||||||
message.pack_forget()
|
message.pack_forget()
|
||||||
self.history = []
|
self.history = []
|
||||||
self.bot.messanges = []
|
|
||||||
self.rag.init_ollama()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,8 @@ from langchain_community.embeddings import OllamaEmbeddings
|
||||||
from langchain_community.vectorstores import Chroma
|
from langchain_community.vectorstores import Chroma
|
||||||
from langchain_community.chat_models import ChatOllama
|
from langchain_community.chat_models import ChatOllama
|
||||||
from langchain.chains import RetrievalQA
|
from langchain.chains import RetrievalQA
|
||||||
|
from pathlib import Path
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,14 +21,7 @@ class Rag:
|
||||||
|
|
||||||
self.base_url_llm = base_url_llm
|
self.base_url_llm = base_url_llm
|
||||||
self.base_url_embed = base_url_embed
|
self.base_url_embed = base_url_embed
|
||||||
|
|
||||||
if self.is_empty(base_header):
|
|
||||||
self.base_header = ""
|
|
||||||
else:
|
|
||||||
self.base_header = base_header
|
self.base_header = base_header
|
||||||
if self.is_empty(embeddings_header):
|
|
||||||
self.embeddings_header = ""
|
|
||||||
else:
|
|
||||||
self.embeddings_header = embeddings_header
|
self.embeddings_header = embeddings_header
|
||||||
self.embeddings = OllamaEmbeddings(model=embeddings, headers=self.embeddings_header, base_url=self.base_url_embed)
|
self.embeddings = OllamaEmbeddings(model=embeddings, headers=self.embeddings_header, base_url=self.base_url_embed)
|
||||||
|
|
||||||
|
@ -54,6 +49,8 @@ class Rag:
|
||||||
case "html": # Corrected the typo in the variable name
|
case "html": # Corrected the typo in the variable name
|
||||||
loader = UnstructuredHTMLLoader(file_path=file_path)
|
loader = UnstructuredHTMLLoader(file_path=file_path)
|
||||||
data = loader.load()
|
data = loader.load()
|
||||||
|
case "json":
|
||||||
|
data = json.loads(Path(file_path).read_text())
|
||||||
case "md":
|
case "md":
|
||||||
loader = UnstructuredMarkdownLoader(file_path=file_path)
|
loader = UnstructuredMarkdownLoader(file_path=file_path)
|
||||||
data = loader.load()
|
data = loader.load()
|
||||||
|
@ -70,13 +67,8 @@ class Rag:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_empty(self, dictionary):
|
|
||||||
return len(dictionary) == 1 and list(dictionary.keys())[0] == '' and list(dictionary.values())[0] == ''
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def receive_data(self, file_path):
|
def receive_data(self, file_path):
|
||||||
try:
|
|
||||||
if self.get_file(file_path):
|
if self.get_file(file_path):
|
||||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=250, chunk_overlap=0)
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=250, chunk_overlap=0)
|
||||||
splitted = text_splitter.split_documents(self.data)
|
splitted = text_splitter.split_documents(self.data)
|
||||||
|
@ -84,12 +76,8 @@ class Rag:
|
||||||
return (False, "Success")
|
return (False, "Success")
|
||||||
else:
|
else:
|
||||||
return (True, f"'{file_path}' unsupported, read documentation for more information")
|
return (True, f"'{file_path}' unsupported, read documentation for more information")
|
||||||
except (ValueError, AttributeError):
|
|
||||||
return (True, "An unexpected Error occuried")
|
|
||||||
def get_request(self, prompt):
|
def get_request(self, prompt):
|
||||||
qachain=RetrievalQA.from_chain_type(self.chat_ollama, retriever=self.retriever)
|
qachain=RetrievalQA.from_chain_type(self.chat_ollama, retriever=self.retriever)
|
||||||
try:
|
|
||||||
return qachain.invoke({"query": prompt})["result"]
|
return qachain.invoke({"query": prompt})["result"]
|
||||||
except ValueError:
|
|
||||||
return (True, "An unexpected Error occuried")
|
|
||||||
|
|
|
@ -10,9 +10,17 @@ CONFIG_FILE = 'tests/config.json'
|
||||||
def setup_config():
|
def setup_config():
|
||||||
"""Fixture to create a dummy config file before each test and remove it after."""
|
"""Fixture to create a dummy config file before each test and remove it after."""
|
||||||
# Create the config file
|
# Create the config file
|
||||||
initial_config = {"mode": "terminal",
|
initial_config = {
|
||||||
"ollamaConfig":{"base_url": 'https://ai.fabelous.app/v1/ollama/generic', "embeddings_url": 'http://localhost:11434', "base_model": 'mistral',
|
"mode": "terminal",
|
||||||
"embeddings_model": 'mxbai-embed-large', "base_header":{'': ''},"" :{'': ''}}}
|
"ollamaConfig": {
|
||||||
|
"base_url": "http://localhost:11434",
|
||||||
|
"embeddings_url": "http://localhost:11434",
|
||||||
|
"base_model": "mistral",
|
||||||
|
"embeddings_model": "mxbai-embed-large",
|
||||||
|
"base_header": "",
|
||||||
|
"embeddings_header": ""
|
||||||
|
}
|
||||||
|
}
|
||||||
with open(CONFIG_FILE, 'w') as f:
|
with open(CONFIG_FILE, 'w') as f:
|
||||||
json.dump(initial_config, f)
|
json.dump(initial_config, f)
|
||||||
|
|
||||||
|
@ -30,10 +38,8 @@ def test_configure(monkeypatch):
|
||||||
'http://localhost:11434', # Embeddings URL
|
'http://localhost:11434', # Embeddings URL
|
||||||
'mistral', # Base model
|
'mistral', # Base model
|
||||||
'mxbai-embed-large', # Embeddings model
|
'mxbai-embed-large', # Embeddings model
|
||||||
'Authorization', # Base Model authentication key
|
'{"Authorization": "Token xzy"}', # Base header for authentication
|
||||||
'Token xzy', # Base Model authentication value
|
'{"Authorization": "Token xzy"}', # Embeddings header for authentication
|
||||||
'Authorization', # Embeddings key for authentication
|
|
||||||
'Token xzy'# Embeddings value for authentication
|
|
||||||
])
|
])
|
||||||
|
|
||||||
monkeypatch.setattr('builtins.input', lambda _: next(inputs))
|
monkeypatch.setattr('builtins.input', lambda _: next(inputs))
|
||||||
|
@ -41,11 +47,17 @@ def test_configure(monkeypatch):
|
||||||
config = configure()
|
config = configure()
|
||||||
|
|
||||||
# Expected configurations based on the inputs
|
# Expected configurations based on the inputs
|
||||||
expected_config = {"mode": "terminal",
|
expected_config = {
|
||||||
"ollamaConfig":{"base_url": 'https://ai.fabelous.app/v1/ollama/generic', "embeddings_url": 'http://localhost:11434', "base_model": 'mistral',
|
"mode": "terminal",
|
||||||
"embeddings_model": 'mxbai-embed-large', "base_header":{'Authorization': 'Token xzy'}
|
"ollamaConfig": {
|
||||||
,"embeddings_header" :{'Authorization': 'Token xzy'}}}
|
"base_url": "https://ai.fabelous.app/v1/ollama/generic",
|
||||||
|
"embeddings_url": "http://localhost:11434",
|
||||||
|
"base_model": "mistral",
|
||||||
|
"embeddings_model": "mxbai-embed-large",
|
||||||
|
"base_header": '{"Authorization": "Token xzy"}',
|
||||||
|
"embeddings_header": '{"Authorization": "Token xzy"}'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
assert config['mode'] == expected_config['mode'], "Mode configuration does not match."
|
assert config['mode'] == expected_config['mode'], "Mode configuration does not match."
|
||||||
assert config['ollamaConfig'] == expected_config['ollamaConfig'], "OllamaConfig does not match."
|
assert config['ollamaConfig'] == expected_config['ollamaConfig'], "OllamaConfig does not match."
|
||||||
|
|
Loading…
Reference in New Issue