138 lines
4.6 KiB
Python
138 lines
4.6 KiB
Python
import timeit
|
|
import random
|
|
import statistics
|
|
import matplotlib.pyplot as plt
|
|
import numpy as np
|
|
from fabelous_math import is_even, is_odd
|
|
|
|
def generate_test_numbers(count: int, min_val: int, max_val: int):
|
|
return random.sample(range(min_val, max_val), count)
|
|
|
|
def run_benchmark(numbers, func, iterations=100):
|
|
times = []
|
|
for _ in range(iterations):
|
|
start_time = timeit.default_timer()
|
|
for num in numbers:
|
|
func(num)
|
|
end_time = timeit.default_timer()
|
|
times.append(end_time - start_time)
|
|
return times
|
|
|
|
def create_visualization(results, title):
|
|
# Prepare data
|
|
methods = list(results.keys())
|
|
means = [statistics.mean(times) * 1000 for times in results.values()] # Convert to milliseconds
|
|
stds = [statistics.stdev(times) * 1000 for times in results.values()] # Convert to milliseconds
|
|
|
|
# Create figure with two subplots
|
|
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 10), height_ratios=[2, 1])
|
|
fig.suptitle(title, fontsize=14)
|
|
plt.subplots_adjust(top=0.9) # Adjust spacing for title
|
|
|
|
# Bar plot
|
|
x = np.arange(len(methods))
|
|
width = 0.35
|
|
bars = ax1.bar(x, means, width, yerr=stds, capsize=5)
|
|
|
|
# Customize bar plot
|
|
ax1.set_ylabel('Time (milliseconds)')
|
|
ax1.set_xticks(x)
|
|
ax1.set_xticklabels(methods, rotation=0)
|
|
ax1.grid(True, axis='y', linestyle='--', alpha=0.7)
|
|
|
|
# Add value labels on bars
|
|
for bar in bars:
|
|
height = bar.get_height()
|
|
ax1.text(bar.get_x() + bar.get_width()/2., height,
|
|
f'{height:.3f}ms',
|
|
ha='center', va='bottom')
|
|
|
|
# Create table
|
|
cell_text = []
|
|
for method, times in results.items():
|
|
mean_time = statistics.mean(times) * 1000
|
|
std_dev = statistics.stdev(times) * 1000
|
|
min_time = min(times) * 1000
|
|
max_time = max(times) * 1000
|
|
|
|
cell_text.append([
|
|
method,
|
|
f"{mean_time:.3f}",
|
|
f"{std_dev:.3f}",
|
|
f"{min_time:.3f}",
|
|
f"{max_time:.3f}"
|
|
])
|
|
|
|
# Add table
|
|
ax2.axis('tight')
|
|
ax2.axis('off')
|
|
columns = ['Method', 'Mean (ms)', 'Std Dev (ms)', 'Min (ms)', 'Max (ms)']
|
|
table = ax2.table(cellText=cell_text,
|
|
colLabels=columns,
|
|
loc='center',
|
|
cellLoc='center')
|
|
|
|
# Adjust table appearance
|
|
table.auto_set_font_size(False)
|
|
table.set_fontsize(9)
|
|
table.scale(1.2, 1.5)
|
|
|
|
plt.tight_layout()
|
|
return plt
|
|
|
|
def main():
|
|
# Test parameters
|
|
SAMPLE_SIZE = 5000
|
|
LOW_RANGE = (1, 10000000)
|
|
HIGH_RANGE = (1000000000000, 1000000010000000)
|
|
ITERATIONS = 100
|
|
|
|
print(f"Running benchmarks with {SAMPLE_SIZE} numbers, {ITERATIONS} iterations each...")
|
|
|
|
# Generate test numbers
|
|
low_numbers = generate_test_numbers(SAMPLE_SIZE, *LOW_RANGE)
|
|
high_numbers = generate_test_numbers(SAMPLE_SIZE, *HIGH_RANGE)
|
|
|
|
# Run benchmarks for low numbers
|
|
print("\nTesting low numbers...")
|
|
low_results = {
|
|
'Fabelous Even': run_benchmark(low_numbers, is_even, ITERATIONS),
|
|
'Fabelous Odd': run_benchmark(low_numbers, is_odd, ITERATIONS),
|
|
'Modulo Even': run_benchmark(low_numbers, lambda x: x % 2 == 0, ITERATIONS),
|
|
'Modulo Odd': run_benchmark(low_numbers, lambda x: x % 2 == 1, ITERATIONS)
|
|
}
|
|
|
|
# Run benchmarks for high numbers
|
|
print("Testing high numbers...")
|
|
high_results = {
|
|
'Fabelous Even': run_benchmark(high_numbers, is_even, ITERATIONS),
|
|
'Fabelous Odd': run_benchmark(high_numbers, is_odd, ITERATIONS),
|
|
'Modulo Even': run_benchmark(high_numbers, lambda x: x % 2 == 0, ITERATIONS),
|
|
'Modulo Odd': run_benchmark(high_numbers, lambda x: x % 2 == 1, ITERATIONS)
|
|
}
|
|
|
|
# Create and save visualizations
|
|
print("\nGenerating visualizations...")
|
|
plt_low = create_visualization(low_results, 'Performance Comparison - Low Numbers')
|
|
plt_low.savefig('low_numbers_comparison.png')
|
|
plt_low.show()
|
|
|
|
plt_high = create_visualization(high_results, 'Performance Comparison - High Numbers')
|
|
plt_high.savefig('high_numbers_comparison.png')
|
|
plt_high.show()
|
|
|
|
# Print summary
|
|
print("\nSummary of Findings:")
|
|
print("-------------------")
|
|
print("1. Low Numbers Performance:")
|
|
for method, times in low_results.items():
|
|
mean_time = statistics.mean(times) * 1000
|
|
print(f" - {method}: {mean_time:.3f}ms average")
|
|
|
|
print("\n2. High Numbers Performance:")
|
|
for method, times in high_results.items():
|
|
mean_time = statistics.mean(times) * 1000
|
|
print(f" - {method}: {mean_time:.3f}ms average")
|
|
|
|
if __name__ == "__main__":
|
|
main() |