even more downsized
This commit is contained in:
parent
f576a0d789
commit
90bcdd346a
|
@ -15,7 +15,7 @@ class UpscaleDataset(Dataset):
|
|||
combined_df = pd.DataFrame()
|
||||
for parquet_file in parquet_files:
|
||||
# Load data with chunking for memory efficiency
|
||||
df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500)
|
||||
df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250)
|
||||
combined_df = pd.concat([combined_df, df], ignore_index=True)
|
||||
|
||||
# Validate data format
|
||||
|
@ -94,7 +94,7 @@ from torch.utils.data import DataLoader
|
|||
|
||||
# Create your dataset and dataloader
|
||||
dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform)
|
||||
data_loader = DataLoader(dataset, batch_size=2, shuffle=True)
|
||||
data_loader = DataLoader(dataset, batch_size=1, shuffle=True)
|
||||
|
||||
# Define a loss function and optimizer
|
||||
criterion = nn.MSELoss()
|
||||
|
|
Loading…
Reference in New Issue