From 443b9f5589d25575fc6a44b1a50aa04742ea968f Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 15:44:28 +0100 Subject: [PATCH] use cpu --- src/aiunn/finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index ec18832..59e3bf9 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -40,7 +40,7 @@ class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): combined_df = pd.DataFrame() for parquet_file in parquet_files: - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(500) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) combined_df = pd.concat([combined_df, df], ignore_index=True) self.df = combined_df.apply(self._validate_row, axis=1) @@ -97,7 +97,7 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" base_model = AIIABase.load(pretrained_model_path, precision="bf16") model = Upsampler(base_model) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = torch.device("cpu") # Move model to device using channels_last memory format. model = model.to(device, memory_format=torch.channels_last) @@ -109,7 +109,7 @@ dataset = UpscaleDataset([ "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], transform=transform) -data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Consider adjusting num_workers if needed. +data_loader = DataLoader(dataset, batch_size=2, shuffle=True) # Consider adjusting num_workers if needed. # Define loss function and optimizer. criterion = nn.MSELoss()