develop #4
|
@ -4,6 +4,8 @@ from PIL import Image
|
|||
from torch.utils.data import Dataset
|
||||
from torchvision import transforms
|
||||
from aiia import AIIA
|
||||
import csv
|
||||
|
||||
|
||||
class UpscaleDataset(Dataset):
|
||||
def __init__(self, parquet_file, transform=None):
|
||||
|
@ -54,6 +56,16 @@ optimizer = optim.Adam(model.parameters(), lr=1e-4)
|
|||
num_epochs = 10
|
||||
model.train() # Set model in training mode
|
||||
|
||||
|
||||
csv_file = 'losses.csv'
|
||||
|
||||
# Create or open the CSV file and write the header if it doesn't exist
|
||||
with open(csv_file, mode='a', newline='') as file:
|
||||
writer = csv.writer(file)
|
||||
# Write the header only if the file is empty
|
||||
if file.tell() == 0:
|
||||
writer.writerow(['Epoch', 'Train Loss'])
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
epoch_loss = 0.0
|
||||
for low_res, high_res in data_loader:
|
||||
|
@ -66,7 +78,14 @@ for epoch in range(num_epochs):
|
|||
loss.backward()
|
||||
optimizer.step()
|
||||
epoch_loss += loss.item()
|
||||
print(f"Epoch {epoch + 1}, Loss: {epoch_loss / len(data_loader)}")
|
||||
|
||||
avg_epoch_loss = epoch_loss / len(data_loader)
|
||||
print(f"Epoch {epoch + 1}, Loss: {avg_epoch_loss}")
|
||||
|
||||
# Append the training loss to the CSV file
|
||||
with open(csv_file, mode='a', newline='') as file:
|
||||
writer = csv.writer(file)
|
||||
writer.writerow([epoch + 1, avg_epoch_loss])
|
||||
|
||||
# Optionally, save the finetuned model to a new directory
|
||||
finetuned_model_path = "aiuNN"
|
||||
|
|
Loading…
Reference in New Issue