llm optional gemacht

This commit is contained in:
Falko Victor Habel 2024-10-15 21:57:24 +02:00
parent 7a1aae3d45
commit 69fb3eb406
1 changed files with 20 additions and 52 deletions

View File

@ -7,12 +7,7 @@ from utils.database.database import FakeNewsChecker
from models.provider import Provider from models.provider import Provider
from collections import Counter from collections import Counter
from Ai.llm import ArticleRater from Ai.llm import ArticleRater
from Ai.Token import get_token
BAD_WORDS = ["FAKE", "SATIRE", "Fake", "fake", "fake news", "Fake News", "FakeNews"]
GOOD_WORDS = ["REAL", "real ", "Real", "Reale News", "reale", "reale News", "realen", "Real News"]
BAD_COLOR = "#ff8080"
GOOD_COLOR = "#80ff8f"
WORDS = BAD_WORDS + GOOD_WORDS
class MainFrameController: class MainFrameController:
@ -47,63 +42,36 @@ class MainFrameController:
return text_data return text_data
def press_check_button(self): def press_check_button(self):
self.frame.result_label.configure(text="", fg_color="#333333")
self.frame.confidence_label.configure(text="", fg_color="#333333")
text_data = self.get_text_data() text_data = self.get_text_data()
if not text_data.text.strip():
return
text_data = self._predict(text_data) text_data = self._predict(text_data)
self._add_to_db(text_data) self._add_to_db(text_data)
self.frame.output_textbox.configure(state="normal") self.frame.output_textbox.configure(state="normal")
self.frame.output_textbox.delete("0.0", "end") self.frame.output_textbox.delete("0.0", "end")
response_stream = self.rater.get_response(text_data.text, text_data.result, float(f"{text_data.confidence * 100:.2f}")) confidence = text_data.confidence * 100
self.frame.confidence_label.configure(text=f"{confidence:.2f}%")
highlight_buffer = deque(maxlen=5) result_color = "green" if text_data.result == "REAL" else "red"
self.frame.result_label.configure(text=text_data.result, fg_color=result_color)
for chunk in response_stream: confidence_color = "green" if confidence > 80 else ("orange" if confidence > 50 else "red")
# Display the chunk immediately self.frame.confidence_label.configure(fg_color=confidence_color)
self.frame.output_textbox.insert("end", chunk)
self.frame.output_textbox.see("end") if get_token().strip():
self.frame.update_idletasks() response_stream = self.rater.get_response(text_data.text, text_data.result, confidence)
# Add to highlight buffer for chunk in response_stream:
highlight_buffer.append(chunk) self.frame.output_textbox.insert("end", chunk)
self.frame.output_textbox.see("end")
self.frame.update_idletasks()
# Process highlighting when buffer is full
if len(highlight_buffer) == 5:
self._process_highlighting(highlight_buffer)
# Process any remaining chunks in the buffer
if highlight_buffer:
self._process_highlighting(highlight_buffer)
self.frame.output_textbox.configure(state="disabled")
self.update_provider_list()
def _process_highlighting(self, highlight_buffer):
start_index = self.frame.output_textbox.index(f"end-{sum(len(c) for c in highlight_buffer)}c")
end_index = self.frame.output_textbox.index("end")
self._highlight_words(start_index, end_index)
# Keep overlap of 2 chunks
highlight_buffer = deque(list(highlight_buffer)[-3:], maxlen=5)
def _highlight_words(self, start_index, end_index):
content = self.frame.output_textbox.get(start_index, end_index)
for word in WORDS:
start = 0
while True:
pos = content.find(word, start)
if pos == -1:
break
word_start = f"{start_index}+{pos}c"
word_end = f"{word_start}+{len(word)}c"
tag_name = f"{word.lower()}_color"
self.frame.output_textbox.tag_add(tag_name, word_start, word_end)
if word in BAD_WORDS:
self.frame.output_textbox.tag_config(tag_name, foreground=BAD_COLOR)
elif word in GOOD_WORDS:
self.frame.output_textbox.tag_config(tag_name, foreground=GOOD_COLOR)
start = pos + len(word)
def _predict(self, text_data: TextData) -> TextData: def _predict(self, text_data: TextData) -> TextData:
""" """
Make a prediction using the VeraMind model. Make a prediction using the VeraMind model.