added tests

This commit is contained in:
Falko Victor Habel 2025-02-10 18:13:54 +01:00
parent 39661bf788
commit 25e0d4d72d
5 changed files with 204 additions and 0 deletions

2
pytest.ini Normal file
View File

@ -0,0 +1,2 @@
[pytest]
testpaths = tests

2
requirements.txt Normal file
View File

@ -0,0 +1,2 @@
pytest
pytest-cpp

138
result.py Normal file
View File

@ -0,0 +1,138 @@
import timeit
import random
import statistics
import matplotlib.pyplot as plt
import numpy as np
from fabelous_math import is_even, is_odd
def generate_test_numbers(count: int, min_val: int, max_val: int):
return random.sample(range(min_val, max_val), count)
def run_benchmark(numbers, func, iterations=100):
times = []
for _ in range(iterations):
start_time = timeit.default_timer()
for num in numbers:
func(num)
end_time = timeit.default_timer()
times.append(end_time - start_time)
return times
def create_visualization(results, title):
# Prepare data
methods = list(results.keys())
means = [statistics.mean(times) * 1000 for times in results.values()] # Convert to milliseconds
stds = [statistics.stdev(times) * 1000 for times in results.values()] # Convert to milliseconds
# Create figure with two subplots
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10), height_ratios=[2, 1])
fig.suptitle(title, fontsize=14)
plt.subplots_adjust(top=0.9) # Adjust spacing for title
# Bar plot
x = np.arange(len(methods))
width = 0.35
bars = ax1.bar(x, means, width, yerr=stds, capsize=5)
# Customize bar plot
ax1.set_ylabel('Time (milliseconds)')
ax1.set_xticks(x)
ax1.set_xticklabels(methods, rotation=0)
ax1.grid(True, axis='y', linestyle='--', alpha=0.7)
# Add value labels on bars
for bar in bars:
height = bar.get_height()
ax1.text(bar.get_x() + bar.get_width()/2., height,
f'{height:.3f}ms',
ha='center', va='bottom')
# Create table
cell_text = []
for method, times in results.items():
mean_time = statistics.mean(times) * 1000
std_dev = statistics.stdev(times) * 1000
min_time = min(times) * 1000
max_time = max(times) * 1000
cell_text.append([
method,
f"{mean_time:.3f}",
f"{std_dev:.3f}",
f"{min_time:.3f}",
f"{max_time:.3f}"
])
# Add table
ax2.axis('tight')
ax2.axis('off')
columns = ['Method', 'Mean (ms)', 'Std Dev (ms)', 'Min (ms)', 'Max (ms)']
table = ax2.table(cellText=cell_text,
colLabels=columns,
loc='center',
cellLoc='center')
# Adjust table appearance
table.auto_set_font_size(False)
table.set_fontsize(9)
table.scale(1.2, 1.5)
plt.tight_layout()
return plt
def main():
# Test parameters
SAMPLE_SIZE = 5000
LOW_RANGE = (1, 10000000)
HIGH_RANGE = (1000000000000, 1000000010000000)
ITERATIONS = 100
print(f"Running benchmarks with {SAMPLE_SIZE} numbers, {ITERATIONS} iterations each...")
# Generate test numbers
low_numbers = generate_test_numbers(SAMPLE_SIZE, *LOW_RANGE)
high_numbers = generate_test_numbers(SAMPLE_SIZE, *HIGH_RANGE)
# Run benchmarks for low numbers
print("\nTesting low numbers...")
low_results = {
'Fabelous Even': run_benchmark(low_numbers, is_even, ITERATIONS),
'Fabelous Odd': run_benchmark(low_numbers, is_odd, ITERATIONS),
'Modulo Even': run_benchmark(low_numbers, lambda x: x % 2 == 0, ITERATIONS),
'Modulo Odd': run_benchmark(low_numbers, lambda x: x % 2 == 1, ITERATIONS)
}
# Run benchmarks for high numbers
print("Testing high numbers...")
high_results = {
'Fabelous Even': run_benchmark(high_numbers, is_even, ITERATIONS),
'Fabelous Odd': run_benchmark(high_numbers, is_odd, ITERATIONS),
'Modulo Even': run_benchmark(high_numbers, lambda x: x % 2 == 0, ITERATIONS),
'Modulo Odd': run_benchmark(high_numbers, lambda x: x % 2 == 1, ITERATIONS)
}
# Create and save visualizations
print("\nGenerating visualizations...")
plt_low = create_visualization(low_results, 'Performance Comparison - Low Numbers')
plt_low.savefig('low_numbers_comparison.png')
plt_low.show()
plt_high = create_visualization(high_results, 'Performance Comparison - High Numbers')
plt_high.savefig('high_numbers_comparison.png')
plt_high.show()
# Print summary
print("\nSummary of Findings:")
print("-------------------")
print("1. Low Numbers Performance:")
for method, times in low_results.items():
mean_time = statistics.mean(times) * 1000
print(f" - {method}: {mean_time:.3f}ms average")
print("\n2. High Numbers Performance:")
for method, times in high_results.items():
mean_time = statistics.mean(times) * 1000
print(f" - {method}: {mean_time:.3f}ms average")
if __name__ == "__main__":
main()

View File

View File

@ -0,0 +1,62 @@
import pytest
from fabelous_math import is_even, is_odd
def test_is_even():
# Test positive even numbers
assert is_even(0) == True
assert is_even(2) == True
assert is_even(4) == True
assert is_even(100) == True
# Test positive odd numbers
assert is_even(1) == False
assert is_even(3) == False
assert is_even(99) == False
# Test negative even numbers
assert is_even(-2) == True
assert is_even(-4) == True
assert is_even(-100) == True
# Test negative odd numbers
assert is_even(-1) == False
assert is_even(-3) == False
assert is_even(-99) == False
# Test large numbers
assert is_even(1000000) == True
assert is_even(-1000001) == False
def test_is_odd():
# Test positive odd numbers
assert is_odd(1) == True
assert is_odd(3) == True
assert is_odd(99) == True
# Test positive even numbers
assert is_odd(0) == False
assert is_odd(2) == False
assert is_odd(4) == False
assert is_odd(100) == False
# Test negative odd numbers
assert is_odd(-1) == True
assert is_odd(-3) == True
assert is_odd(-99) == True
# Test negative even numbers
assert is_odd(-2) == False
assert is_odd(-4) == False
assert is_odd(-100) == False
# Test large numbers
assert is_odd(1000001) == True
assert is_odd(-1000000) == False
def test_is_even_is_odd_complementary():
# Ensure is_even and is_odd are complementary for various numbers
test_numbers = [0, 1, -1, 2, -2, 99, -99, 1000000, -1000001]
for num in test_numbers:
assert is_even(num) != is_odd(num), \
f"Failed for number {num}: is_even and is_odd should be opposite"