From 4af12873f27acb2c9b57569e5a9acbcacbe4fe62 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:54:33 +0100 Subject: [PATCH] cpu training --- src/aiunn/finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6300266..d675057 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -13,7 +13,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(4000) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format @@ -87,14 +87,14 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" # Load the model using the AIIA.load class method (the implementation copied in your query) model = AIIABase.load(pretrained_model_path) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = 'cpu' #torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) from torch import nn, optim from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=1, shuffle=True) +data_loader = DataLoader(dataset, batch_size=4, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss()