working llm response and changed to response in the articel language

This commit is contained in:
Falko Victor Habel 2024-10-08 17:17:30 +02:00
parent d6bbdc3767
commit 2436eab591
2 changed files with 81 additions and 9 deletions

View File

@ -10,16 +10,15 @@ class ArticleRater:
token = f.read().strip()
return {"Authorization": f"Token {token}"}
def get_response(self, article, result):
def get_response(self, article, result, confidence):
ollama_params = {
"base_url": self.client,
"model": "mistral-nemo:12b-instruct-2407-q8_0",
"headers": self.headers,
"system": "Give a short explanation max 1-3 sentence why this article is rated like that. Begin the RESPONSE ALWAYS with the RESULT and the confidence"
"system": "ANTWORTE in der SPRACHE, die auch der ARTIKEL hat. Erkläre in 1-3 Sätzen, warum es sich um solch einen Artikel handeln könnte und woran man es erkennen kann. Beginne die NACHRICHT immer mit dem Resultat under Wahrscheinlichkeit"
}
message = (f"A Machine Learning Model labeled an article the following: "
f"Result: {result['result']}, Confidence: {result['confidence']}, The Article: {article}")
message = (f"A Machine Learning Model labeled an article the following, but RESPOND in the LANGUAGE FROM the ARTICLE: Result: {result} Confidence: {confidence}, The Article: {article}")
# Initialize the Ollama object with the prepared parameters
llm = Ollama(**ollama_params)
@ -44,9 +43,10 @@ Derzeit ist noch unklar, wie es nach dem schweren Cringe-Attentat um den Präsid
Der Secret Service und das FBI wollen jetzt Untersuchungen einleiten, wie es zu dem für alle Beteiligten höchst unangenehmen Vorfall kommen konnte.
Bizarr: Elon Musk befindet sich trotz seiner Tat nach wie vor auf freiem Fuß und könnte jederzeit wieder auf republikanischen Wahlkampfveranstaltungen für peinliche Momente sorgen."""
result = {"result": "FAKE", "confidence": 0.9996}
result = "FAKE"
confidence = 0.9996
# Capture the stream response
response_stream = article_rater.get_response(article, result)
response_stream = article_rater.get_response(article, result, confidence=confidence)
for chunk in response_stream:
print(chunk, end='', flush=True)

View File

@ -1,6 +1,7 @@
from views.mainScreen import MainFrame
from models.data import TextData
from Ai.interence import VeraMindInference
from Ai.llm import ArticleRater
class MainFrameController:
@ -8,7 +9,7 @@ class MainFrameController:
def __init__(self,frame:MainFrame) -> None:
self.frame = frame
self.model_inference = VeraMindInference('VeraMind-Mini')
self.rater = ArticleRater()
def get_textdata(self) -> TextData:
text_data = TextData()
@ -20,12 +21,83 @@ class MainFrameController:
def press_check_button(self):
text_data = self.get_textdata()
print(f"text:{text_data.text}")
print(f"text:{text_data}")
self.prediction(text_data)
self.frame.output_textbox.configure(state="normal")
self.frame.output_textbox.delete("0.0", "end")
self.frame.output_textbox.insert("0.0",f"{text_data.get_output()}")
response_stream = self.rater.get_response(text_data.text, text_data.confidence, text_data.result)
display_chunks = []
highlight_chunks = []
DISPLAY_CHUNK_SIZE = 1 # Display as each chunk arrives
HIGHLIGHT_BATCH_SIZE = 5
OVERLAP_SIZE = 2
for chunk in response_stream:
display_chunks.append(chunk)
highlight_chunks.append(chunk)
# Display chunk
if len(display_chunks) == DISPLAY_CHUNK_SIZE:
start_index = self.frame.output_textbox.index("end-1c")
self.frame.output_textbox.insert("end", ''.join(display_chunks))
self.frame.output_textbox.see("end") # Scroll to the end
self.frame.update_idletasks() # Update the textbox immediately
display_chunks = []
# Process highlighting
if len(highlight_chunks) >= HIGHLIGHT_BATCH_SIZE:
start_index = self.frame.output_textbox.index(f"end-{sum(len(c) for c in highlight_chunks)}c")
end_index = self.frame.output_textbox.index("end-1c")
self.highlight_words(start_index, end_index)
# Maintain overlap and reset for next batch
highlight_chunks = highlight_chunks[-OVERLAP_SIZE:]
# Highlight remaining chunks if any
if highlight_chunks:
start_index = self.frame.output_textbox.index(f"end-{sum(len(c) for c in highlight_chunks)}c")
end_index = self.frame.output_textbox.index("end-1c")
self.highlight_words(start_index, end_index)
self.frame.output_textbox.configure(state="disabled")
def highlight_words(self, start_index, end_index):
content = self.frame.output_textbox.get(start_index, end_index)
print(content)
current_index = start_index
while True:
# Find "FAKE"
fake_index = content.find("FAKE")
if fake_index != -1:
fake_pos_start = f"{current_index}+{fake_index}c"
fake_pos_end = f"{fake_pos_start}+4c"
self.frame.output_textbox.tag_add("fake_color", fake_pos_start, fake_pos_end)
self.frame.output_textbox.tag_config("fake_color", foreground="red")
content = content[fake_index + 4:]
current_index = fake_pos_end
else:
break
current_index = start_index
content = self.frame.output_textbox.get(start_index, end_index)
while True:
# Find "REAL"
real_index = content.find("REAL")
if real_index != -1:
real_pos_start = f"{current_index}+{real_index}c"
real_pos_end = f"{real_pos_start}+4c"
self.frame.output_textbox.tag_add("real_color", real_pos_start, real_pos_end)
self.frame.output_textbox.tag_config("real_color", foreground="green")
content = content[real_index + 4:]
current_index = real_pos_end
else:
break
def prediction(self, text_data:TextData) -> TextData:
result = self.model_inference.predict(text_data.text)