Merge pull request 'docs_with_new_ui' (#16) from docs_with_new_ui into develop
Reviewed-on: Berufsschule/Veracity_AI#16
This commit is contained in:
commit
5c7d3211b0
|
@ -155,7 +155,7 @@ cython_debug/
|
||||||
|
|
||||||
#ML
|
#ML
|
||||||
VeraMind-Mini/
|
VeraMind-Mini/
|
||||||
Token.py
|
Token.txt
|
||||||
|
|
||||||
# OS generated files #
|
# OS generated files #
|
||||||
######################
|
######################
|
||||||
|
|
44
README.md
44
README.md
|
@ -1,3 +1,43 @@
|
||||||
# VeracityAI
|
# Veracity_AI
|
||||||
|
|
||||||
Projekt zum erkennen von Fake News.
|
## Overview
|
||||||
|
|
||||||
|
Veracity_AIn is designed to analyze articles and determine their authenticity using Natural Language Processing (NLP) techniques and machine learning models. The application consists of two main components: a user interface for inputting URLs or text, and a backend system that processes the data and makes predictions about the veracity of the article.
|
||||||
|
|
||||||
|
## User Interface
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The user interface is built using CustomTkinter, a modern and customizable Python UI library. The main screen is divided into three sections:
|
||||||
|
|
||||||
|
1. **Input Section**: This section allows users to enter the URL of an article or paste the text directly into the input box.
|
||||||
|
2. **Result Section**: Displays the result of the analysis (Real or Fake) and the confidence level in percentage.
|
||||||
|
3. **Leaderboard Section**: Shows a list of news providers along with their fake news percentages, sorted by the highest fake news rate.
|
||||||
|
|
||||||
|
## Backend System
|
||||||
|
|
||||||
|
The backend system is responsible for processing user input, communicating with the database, and making predictions using the VeraMind model. Here's an overview of its components:
|
||||||
|
|
||||||
|
### Data Models
|
||||||
|
- **TextData**: Stores the URL, text content, provider, result, confidence, and a flag indicating if the news is fake.
|
||||||
|
- **Provider**: Represents a news provider with attributes for name, total articles, fake articles count, and a list of associated TextData objects.
|
||||||
|
|
||||||
|
### Database
|
||||||
|
The application uses a DuckDB database to store analyzed data. The `FakeNewsChecker` class manages database operations such as inserting new data and fetching existing data.
|
||||||
|
|
||||||
|
### Machine Learning Model
|
||||||
|
- **VeraMindInference**: An inference engine for the VeraMind model, which is used to predict whether an article is real or fake news based on its text content.
|
||||||
|
- **ArticleRater**: A class that uses the Large Language Model (LLM) to generate a response based on the analyzed text data.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
To use the Fake News Checker application, follow these steps:
|
||||||
|
|
||||||
|
1. Enter an article URL or paste the text directly into the input box.
|
||||||
|
2. Click on the "Check" button to initiate the analysis process.
|
||||||
|
3. Once the analysis is complete, the result (Real or Fake) and confidence level will be displayed in the result section.
|
||||||
|
4. The leaderboard section will automatically update with the latest news providers' fake news percentages.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This application is licensed under the MIT license. See the [LICENSE](LICENSE) file for more details.
|
Binary file not shown.
After Width: | Height: | Size: 45 KiB |
|
@ -1,11 +1,15 @@
|
||||||
from langchain_community.llms import Ollama
|
from langchain_community.llms import Ollama
|
||||||
from Ai.Token import get_token
|
|
||||||
|
|
||||||
class ArticleRater:
|
class ArticleRater:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.client = "https://ai.fabelous.app/v1/ollama/generic"
|
self.client = "https://ai.fabelous.app/v1/ollama/generic"
|
||||||
self.headers = {"Authorization": f"Token {get_token()}"}
|
self.token = self._get_token()
|
||||||
|
self.headers = {"Authorization": f"Token {self.token}"}
|
||||||
|
|
||||||
|
def _get_token(self):
|
||||||
|
with open("Token/Token.txt", "r") as t:
|
||||||
|
return t.readline().strip()
|
||||||
def get_response(self, article, result, confidence):
|
def get_response(self, article, result, confidence):
|
||||||
ollama_params = {
|
ollama_params = {
|
||||||
"base_url": self.client,
|
"base_url": self.client,
|
||||||
|
|
|
@ -7,7 +7,6 @@ from utils.database.database import FakeNewsChecker
|
||||||
from models.provider import Provider
|
from models.provider import Provider
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
from Ai.llm import ArticleRater
|
from Ai.llm import ArticleRater
|
||||||
from Ai.Token import get_token
|
|
||||||
|
|
||||||
|
|
||||||
class MainFrameController:
|
class MainFrameController:
|
||||||
|
@ -63,7 +62,7 @@ class MainFrameController:
|
||||||
confidence_color = "green" if confidence > 80 else ("orange" if confidence > 50 else "red")
|
confidence_color = "green" if confidence > 80 else ("orange" if confidence > 50 else "red")
|
||||||
self.frame.confidence_label.configure(fg_color=confidence_color)
|
self.frame.confidence_label.configure(fg_color=confidence_color)
|
||||||
|
|
||||||
if get_token().strip():
|
if self.rater.token:
|
||||||
response_stream = self.rater.get_response(text_data.text, text_data.result, confidence)
|
response_stream = self.rater.get_response(text_data.text, text_data.result, confidence)
|
||||||
|
|
||||||
for chunk in response_stream:
|
for chunk in response_stream:
|
||||||
|
|
Loading…
Reference in New Issue