Veracity_AI/tests/Ai/test_ml_inference.py

45 lines
1.7 KiB
Python

import pytest
import torch
import os
import sys
# Add the src directory to the Python path
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
sys.path.insert(0, src_dir)
from Ai.interence import VeraMindInference
@pytest.fixture
def model_fixture():
model_path = "VeraMind-mini"
max_len = 512
return VeraMindInference(model_path, max_len)
def test_init(model_fixture):
assert model_fixture.device.type == "cuda" if torch.cuda.is_available() else "cpu"
def test_predict_fake(model_fixture):
fake_text = "Das ist sehr traurig"
prediction = model_fixture.predict(fake_text)
assert prediction["result"] == "FAKE", f"Expected FAKE, got {prediction['result']}"
assert prediction["confidence"] > 0.5, f"Confidence {prediction['confidence']} is not > 0.5"
assert prediction["is_fake"] in [True, 1], f"Expected is_fake to be True, got {prediction['is_fake']}"
def test_predict_real(model_fixture):
real_text = "Das sind die Freitag Abend Nachrichten"
prediction = model_fixture.predict(real_text)
assert prediction["result"] == "REAL", f"Expected REAL, got {prediction['result']}"
assert prediction["confidence"] > 0.5, f"Confidence {prediction['confidence']} is not > 0.5"
assert prediction["is_fake"] in [False, 0], f"Expected is_fake to be False or 0, got {prediction['is_fake']}"
def test_predict_confidence_range(model_fixture):
for _ in range(5):
text = "Insert a random text for testing"
prediction = model_fixture.predict(text)
assert 0 <= prediction["confidence"] <= 1, f"Confidence {prediction['confidence']} is not between 0 and 1"
if __name__ == "__main__":
pytest.main([__file__])