From 90bcdd346a2f300655cf81fa0e2a7d8285608e6b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 22 Feb 2025 10:45:30 +0100 Subject: [PATCH] even more downsized --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 13cef3e..8ef0fa1 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format @@ -94,7 +94,7 @@ from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=2, shuffle=True) +data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss()