Merge pull request 'llm integration' (#10) from llm_integration into develop

Reviewed-on: Berufsschule/Veracity_AI#10
This commit is contained in:
Falko Victor Habel 2024-10-08 10:19:05 +00:00
commit 3d1f602b44
2 changed files with 41 additions and 0 deletions

1
.gitignore vendored
View File

@ -155,6 +155,7 @@ cython_debug/
#ML
VeraMind-Mini/
Token.txt
# OS generated files #
######################

40
src/Ai/llm.py Normal file
View File

@ -0,0 +1,40 @@
from langchain_community.llms import Ollama
class ArticleRater:
def __init__(self):
self.client = "https://ai.fabelous.app/v1/ollama/generic"
self.headers = self._load_token("Token.txt")
def _load_token(self, token_path):
with open(token_path, "r") as f:
token = f.read().strip()
return {"Authorization": f"Token {token}"}
def get_response(self, article, result):
ollama_params = {
"base_url": self.client,
"model": "mistral-nemo:12b-instruct-2407-q8_0",
"headers": self.headers,
"system": "Give a short explanation max 1-3 sentence why this article is rated like that"
}
message = (f"A Machine Learning Model labeled an article the following: "
f"Result: {result['result']}, Confidence: {result['confidence']}, The Article: {article}")
# Initialize the Ollama object with the prepared parameters
llm = Ollama(**ollama_params)
# Return the response stream
return llm.stream(message)
# Usage
if __name__ == "main":
article_rater = ArticleRater()
article = "Example article content."
result = {"result": "REAL", "confidence": 0.754}
# Capture the stream response
response_stream = article_rater.get_response(article, result)
for chunk in response_stream:
print(chunk, end='', flush=True)