diff --git a/.gitignore b/.gitignore index e18b131..6c16453 100644 --- a/.gitignore +++ b/.gitignore @@ -155,6 +155,7 @@ cython_debug/ #ML VeraMind-Mini/ +Token.txt # OS generated files # ###################### diff --git a/src/Ai/llm.py b/src/Ai/llm.py new file mode 100644 index 0000000..8343f19 --- /dev/null +++ b/src/Ai/llm.py @@ -0,0 +1,40 @@ +from langchain_community.llms import Ollama + +class ArticleRater: + def __init__(self): + self.client = "https://ai.fabelous.app/v1/ollama/generic" + self.headers = self._load_token("Token.txt") + + def _load_token(self, token_path): + with open(token_path, "r") as f: + token = f.read().strip() + return {"Authorization": f"Token {token}"} + + def get_response(self, article, result): + ollama_params = { + "base_url": self.client, + "model": "mistral-nemo:12b-instruct-2407-q8_0", + "headers": self.headers, + "system": "Give a short explanation max 1-3 sentence why this article is rated like that" + } + + message = (f"A Machine Learning Model labeled an article the following: " + f"Result: {result['result']}, Confidence: {result['confidence']}, The Article: {article}") + + # Initialize the Ollama object with the prepared parameters + llm = Ollama(**ollama_params) + + # Return the response stream + return llm.stream(message) + +# Usage +if __name__ == "main": + article_rater = ArticleRater() + + article = "Example article content." + result = {"result": "REAL", "confidence": 0.754} + + # Capture the stream response + response_stream = article_rater.get_response(article, result) + for chunk in response_stream: + print(chunk, end='', flush=True)