diff --git a/README.md b/README.md index 84c0e48..bdc49e7 100644 --- a/README.md +++ b/README.md @@ -40,4 +40,4 @@ To use the Fake News Checker application, follow these steps: ## License -This application is licensed under the MIT license. See the [LICENSE](LICENSE) file for more details. \ No newline at end of file +This application is licensed under the Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International. See the [LICENSE](LICENSE) file for more details. \ No newline at end of file diff --git a/docs/use-case-diagram.png b/docs/use-case-diagram.png new file mode 100644 index 0000000..56927d8 Binary files /dev/null and b/docs/use-case-diagram.png differ diff --git a/src/Ai/llm.py b/src/Ai/llm.py index f2a566c..c907087 100644 --- a/src/Ai/llm.py +++ b/src/Ai/llm.py @@ -1,4 +1,5 @@ -from langchain_community.llms import Ollama +from langchain_ollama import ChatOllama +from langchain_core.messages import AIMessage import os class ArticleRater: @@ -6,6 +7,8 @@ class ArticleRater: self.client = "https://ai.fabelous.app/v1/ollama/generic" self.token = self._get_token() self.headers = {"Authorization": f"Token {self.token}"} + self.model = "phi3.5:3.8b-mini-instruct-q4_K_M" + self.llm = ChatOllama(model=self.model, client_kwargs={'headers': self.headers}, base_url=self.client) def _get_token(self): if os.path.exists("Token/Token.txt"): @@ -15,22 +18,16 @@ class ArticleRater: return None def get_response(self, article, result, confidence): - ollama_params = { - "base_url": self.client, - "model": "mistral-nemo:12b-instruct-2407-q8_0", - "headers": self.headers, - "system": """Ein Mashine Learning Model hat einen Text bewertet, ob es sich um FakeNews handelt oder um Reale News. + messages = [ + ("system", """Ein Mashine Learning Model hat einen Text bewertet, ob es sich um FakeNews handelt oder um Reale News. Erkläre in 1-2 Sätzen warum dieses Modell zu dieser Entscheidung. - DU SOLLST KEINE ÜBERSCHRIFTEN oder ähnliches ERKLÄREN. Du erhählst einen TEXT und sollst erklären wie das RESULTAT zustande kam""" - } + DU SOLLST KEINE ÜBERSCHRIFTEN oder ähnliches ERKLÄREN. Du erhählst einen TEXT und sollst erklären wie das RESULTAT zustande kam"""), + ("human", f"{article}, result: {result}, confidence {confidence}") + ] - message = (f"{article}, result: {result}, confidence {confidence}") - - # Initialize the Ollama object with the prepared parameters - llm = Ollama(**ollama_params) # Return the response stream - return llm.stream(message) + return self.llm.stream(messages) # Usage if __name__ == "__main__": @@ -43,4 +40,4 @@ if __name__ == "__main__": # Capture the stream response response_stream = article_rater.get_response(article, result, confidence=confidence) for chunk in response_stream: - print(chunk, end='', flush=True) + print(chunk.content, end="") diff --git a/src/controller/mainFrameController.py b/src/controller/mainFrameController.py index bf51879..d990722 100644 --- a/src/controller/mainFrameController.py +++ b/src/controller/mainFrameController.py @@ -67,7 +67,7 @@ class MainFrameController: response_stream = self.rater.get_response(text_data.text, text_data.result, confidence) for chunk in response_stream: - self.frame.output_textbox.insert("end", chunk) + self.frame.output_textbox.insert("end", chunk.content) self.frame.output_textbox.see("end") self.frame.update_idletasks()