From 2f14301550e8580f40e1c0c9de69a6ae500581d1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 7 Jan 2025 09:23:31 +0100 Subject: [PATCH 001/100] added init files for all different aiunn versions --- src/aiunn/EDSR/__init__.py | 0 src/aiunn/ESRGAN/__init__.py | 0 src/aiunn/SRCNN/__init__.py | 0 src/aiunn/__init__.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 src/aiunn/EDSR/__init__.py create mode 100644 src/aiunn/ESRGAN/__init__.py create mode 100644 src/aiunn/SRCNN/__init__.py create mode 100644 src/aiunn/__init__.py diff --git a/src/aiunn/EDSR/__init__.py b/src/aiunn/EDSR/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiunn/ESRGAN/__init__.py b/src/aiunn/ESRGAN/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiunn/SRCNN/__init__.py b/src/aiunn/SRCNN/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiunn/__init__.py b/src/aiunn/__init__.py new file mode 100644 index 0000000..e69de29 From d85faadcc14bd9c927fcb69995adc3b25daba489 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 29 Jan 2025 19:26:32 +0100 Subject: [PATCH 002/100] first finetune script --- src/aiunn/finetune.py | 144 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 src/aiunn/finetune.py diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py new file mode 100644 index 0000000..a911f7f --- /dev/null +++ b/src/aiunn/finetune.py @@ -0,0 +1,144 @@ +import torch +import pandas as pd +from PIL import Image +import io +from torch import nn +from torch.utils.data import Dataset, DataLoader +import torchvision.transforms as transforms +from aiia.model import AIIABase +from sklearn.model_selection import train_test_split + + +# Step 1: Define Custom Dataset Class +class ImageDataset(Dataset): + def __init__(self, dataframe, transform=None): + self.dataframe = dataframe + self.transform = transform + + def __len__(self): + return len(self.dataframe) + + def __getitem__(self, idx): + row = self.dataframe.iloc[idx] + + # Decode image_512 from bytes + img_bytes = row['image_512'] + img_stream = io.BytesIO(img_bytes) + low_res_image = Image.open(img_stream).convert('RGB') + + # Decode image_1024 from bytes + high_res_bytes = row['image_1024'] + high_stream = io.BytesIO(high_res_bytes) + high_res_image = Image.open(high_stream).convert('RGB') + + # Apply transformations if specified + if self.transform: + low_res_image = self.transform(low_res_image) + high_res_image = self.transform(high_res_image) + + return {'low_res': low_res_image, 'high_res': high_res_image} + + + +# Step 2: Load and Preprocess Data +# Read the dataset (assuming it's a DataFrame with columns 'image_512' and 'image_1024') +df1 = pd.read_parquet('/root/training_data/vision-dataset/image_upscaler.parquet') +df2 = pd.read_parquet('/root/training_data/vision-dataset/image_vec_upscaler.parquet') + +# Combine the two datasets into one DataFrame +df = pd.concat([df1, df2], ignore_index=True) + +# Split into training and validation sets +train_df, val_df = train_test_split(df, test_size=0.2, random_state=42) + +# Define preprocessing transforms +transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) +]) + +train_dataset = ImageDataset(train_df, transform=transform) +val_dataset = ImageDataset(val_df, transform=transform) + +# Create DataLoaders +batch_size = 2 +train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) +val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4) + +# Step 3: Load Pre-trained Model and Modify for Upscaling +model = AIIABase.load("AIIA-Base-512") + +# Freeze original CNN layers to prevent catastrophic forgetting +for param in model.cnn.parameters(): + param.requires_grad = False + +# Add upsample module +hidden_size = model.config.hidden_size # Assuming this is defined in your model's config +model.upsample = torch.nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.Conv2d(hidden_size, 3, kernel_size=3, padding=1) +) + +# Step 4: Define Loss Function and Optimizer +criterion = torch.nn.MSELoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) # Adjust learning rate as needed + +# Alternatively, if you want to train only the new layers: +params_to_update = [] +for name, param in model.named_parameters(): + if 'upsample' in name: + params_to_update.append(param) +optimizer = torch.optim.Adam(params_to_update, lr=0.001) + +# Step 5: Training Loop +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +model.to(device) + +best_val_loss = float('inf') +num_epochs = 10 # Adjust as needed + +for epoch in range(num_epochs): + model.train() + running_loss = 0.0 + + for batch in train_loader: + low_res = batch['low_res'].to(device) + high_res = batch['high_res'].to(device) + + # Forward pass + features = model.cnn(low_res) + outputs = model.upsample(features) + + loss = criterion(outputs, high_res) + + # Backward pass and optimize + optimizer.zero_grad() + loss.backward() + optimizer.step() + + running_loss += loss.item() + + epoch_loss = running_loss / len(train_loader) + print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}') + + # Validation Step + model.eval() + val_loss = 0.0 + + with torch.no_grad(): + for batch in val_loader: + low_res = batch['low_res'].to(device) + high_res = batch['high_res'].to(device) + + features = model.cnn(low_res) + outputs = model.upsample(features) + + loss = criterion(outputs, high_res) + val_loss += loss.item() + + print(f"Validation Loss: {val_loss:.4f}") + + if val_loss < best_val_loss: + best_val_loss = val_loss + model.save("AIIA-base-512-upscaler") + print("Best model saved!") \ No newline at end of file From 71da7ed2f1c7c6ecb45b88eded70eda45b931b4c Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 29 Jan 2025 22:30:35 +0100 Subject: [PATCH 003/100] removed old scripts --- src/aiunn/EDSR/__init__.py | 0 src/aiunn/ESRGAN/__init__.py | 0 src/aiunn/SRCNN/__init__.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/aiunn/EDSR/__init__.py delete mode 100644 src/aiunn/ESRGAN/__init__.py delete mode 100644 src/aiunn/SRCNN/__init__.py diff --git a/src/aiunn/EDSR/__init__.py b/src/aiunn/EDSR/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/aiunn/ESRGAN/__init__.py b/src/aiunn/ESRGAN/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/aiunn/SRCNN/__init__.py b/src/aiunn/SRCNN/__init__.py deleted file mode 100644 index e69de29..0000000 From 914d0026026c1d9b47313a0f1d1ebda7a01b60b9 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 29 Jan 2025 22:30:57 +0100 Subject: [PATCH 004/100] added new aiiun script with first draft for pip project --- pyproject.toml | 14 ++ requirements.txt | 5 + setup.py | 25 ++++ src/aiunn/__init__.py | 6 + src/aiunn/finetune.py | 313 ++++++++++++++++++++++++++++------------- src/aiunn/inference.py | 73 ++++++++++ 6 files changed, 337 insertions(+), 99 deletions(-) create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 src/aiunn/inference.py diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..4c8acdb --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,14 @@ +[build-system] +requires = ["setuptools>=45", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "aiunn" +version = "0.1.0" +description = "A brief description of your package" +readme = "README.md" +requires-python = ">=3.7" +license = {file = "LICENSE"} +authors = [ + {name = "Your Name", email = "your.email@example.com"}, +] \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..8e47744 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +torch +aiia +pillow +torchvision +sklearn \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..aa53ea8 --- /dev/null +++ b/setup.py @@ -0,0 +1,25 @@ +from setuptools import setup, find_packages + +setup( + name="aiunn", + version="0.1.0", + packages=find_packages(where="src"), + package_dir={"": "src"}, + install_requires=[ + line.strip() + for line in open("requirements.txt") + if line.strip() and not line.startswith("#") + ], + author="Falko Habel", + author_email="falko.habel@gmx.de", + description="Finetuner for image upscaling using AIIA", + long_description=open("README.md").read(), + long_description_content_type="text/markdown", + url="https://github.com/yourusername/aiunn", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires=">=3.7", +) diff --git a/src/aiunn/__init__.py b/src/aiunn/__init__.py index e69de29..a8013f3 100644 --- a/src/aiunn/__init__.py +++ b/src/aiunn/__init__.py @@ -0,0 +1,6 @@ + +from .finetune import * +from .inference import UpScaler + +__version__ = "0.1.0" + diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index a911f7f..1644662 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -7,138 +7,253 @@ from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms from aiia.model import AIIABase from sklearn.model_selection import train_test_split +from typing import Dict, List, Union -# Step 1: Define Custom Dataset Class class ImageDataset(Dataset): def __init__(self, dataframe, transform=None): self.dataframe = dataframe self.transform = transform - + def __len__(self): return len(self.dataframe) - + def __getitem__(self, idx): row = self.dataframe.iloc[idx] - + # Decode image_512 from bytes img_bytes = row['image_512'] img_stream = io.BytesIO(img_bytes) low_res_image = Image.open(img_stream).convert('RGB') - + # Decode image_1024 from bytes high_res_bytes = row['image_1024'] high_stream = io.BytesIO(high_res_bytes) high_res_image = Image.open(high_stream).convert('RGB') - + # Apply transformations if specified if self.transform: low_res_image = self.transform(low_res_image) high_res_image = self.transform(high_res_image) - + return {'low_res': low_res_image, 'high_res': high_res_image} -# Step 2: Load and Preprocess Data -# Read the dataset (assuming it's a DataFrame with columns 'image_512' and 'image_1024') -df1 = pd.read_parquet('/root/training_data/vision-dataset/image_upscaler.parquet') -df2 = pd.read_parquet('/root/training_data/vision-dataset/image_vec_upscaler.parquet') -# Combine the two datasets into one DataFrame -df = pd.concat([df1, df2], ignore_index=True) - -# Split into training and validation sets -train_df, val_df = train_test_split(df, test_size=0.2, random_state=42) - -# Define preprocessing transforms -transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -]) - -train_dataset = ImageDataset(train_df, transform=transform) -val_dataset = ImageDataset(val_df, transform=transform) - -# Create DataLoaders -batch_size = 2 -train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) -val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4) - -# Step 3: Load Pre-trained Model and Modify for Upscaling -model = AIIABase.load("AIIA-Base-512") - -# Freeze original CNN layers to prevent catastrophic forgetting -for param in model.cnn.parameters(): - param.requires_grad = False - -# Add upsample module -hidden_size = model.config.hidden_size # Assuming this is defined in your model's config -model.upsample = torch.nn.Sequential( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(hidden_size, 3, kernel_size=3, padding=1) -) - -# Step 4: Define Loss Function and Optimizer -criterion = torch.nn.MSELoss() -optimizer = torch.optim.Adam(model.parameters(), lr=0.0001) # Adjust learning rate as needed - -# Alternatively, if you want to train only the new layers: -params_to_update = [] -for name, param in model.named_parameters(): - if 'upsample' in name: - params_to_update.append(param) -optimizer = torch.optim.Adam(params_to_update, lr=0.001) - -# Step 5: Training Loop -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model.to(device) - -best_val_loss = float('inf') -num_epochs = 10 # Adjust as needed - -for epoch in range(num_epochs): - model.train() - running_loss = 0.0 - - for batch in train_loader: - low_res = batch['low_res'].to(device) - high_res = batch['high_res'].to(device) +class TrainingBase: + def __init__(self, + model_name: str, + dataset_paths: Union[List[str], Dict[str, str]], + batch_size: int = 32, + learning_rate: float = 0.001, + num_workers: int = 4, + train_ratio: float = 0.8): + """ + Base class for training models with multiple dataset support - # Forward pass - features = model.cnn(low_res) - outputs = model.upsample(features) + Args: + model_name (str): Name of the model to initialize + dataset_paths (Union[List[str], Dict[str, str]]): Paths to datasets (train and optional validation) + batch_size (int): Batch size for training + learning_rate (float): Learning rate for optimizer + num_workers (int): Number of workers for data loading + train_ratio (float): Ratio of data to use for training (rest goes to validation) + """ + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.batch_size = batch_size + self.num_workers = num_workers - loss = criterion(outputs, high_res) + # Initialize datasets and loaders + self.dataset_paths = dataset_paths + self._initialize_datasets() - # Backward pass and optimize - optimizer.zero_grad() - loss.backward() - optimizer.step() + # Initialize model and training parameters + self.model_name = model_name + self.learning_rate = learning_rate + self._initialize_model() - running_loss += loss.item() + def _initialize_datasets(self): + """Helper method to initialize datasets""" + raise NotImplementedError("This method should be implemented in child classes") - epoch_loss = running_loss / len(train_loader) - print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}') + def _initialize_model(self): + """Helper method to initialize model architecture""" + raise NotImplementedError("This method should be implemented in child classes") - # Validation Step - model.eval() - val_loss = 0.0 - - with torch.no_grad(): - for batch in val_loader: - low_res = batch['low_res'].to(device) - high_res = batch['high_res'].to(device) + def train(self, num_epochs: int = 10): + """Train the model for specified number of epochs""" + self.model.to(self.device) + + for epoch in range(num_epochs): + print(f"Epoch {epoch+1}/{num_epochs}") - features = model.cnn(low_res) - outputs = model.upsample(features) + # Train phase + self._train_epoch() - loss = criterion(outputs, high_res) - val_loss += loss.item() - - print(f"Validation Loss: {val_loss:.4f}") + # Validation phase + self._validate_epoch() + + # Save best model based on validation loss + if self.current_val_loss < self.best_val_loss: + self.save_model() - if val_loss < best_val_loss: - best_val_loss = val_loss - model.save("AIIA-base-512-upscaler") - print("Best model saved!") \ No newline at end of file + def _train_epoch(self): + """Train model for one epoch""" + raise NotImplementedError("This method should be implemented in child classes") + + def _validate_epoch(self): + """Validate model performance""" + raise NotImplementedError("This method should be implemented in child classes") + + def save_model(self): + """Save current best model""" + torch.save({ + 'model_state_dict': self.model.state_dict(), + 'optimizer_state_dict': self.optimizer.state_dict(), + 'best_val_loss': self.best_val_loss + }, f"{self.model_name}_best.pth") + +class Finetuner(TrainingBase): + def __init__(self, + model_name: str = "AIIA-Base-512", + dataset_paths: Union[List[str], Dict[str, str]] = None, + batch_size: int = 32, + learning_rate: float = 0.001, + num_workers: int = 4, + train_ratio: float = 0.8): + """ + Specialized trainer for image super resolution tasks + + Args: + Same as TrainingBase + """ + super().__init__(model_name, dataset_paths, batch_size, learning_rate, num_workers, train_ratio) + + def _initialize_datasets(self): + """Initialize image datasets""" + # Load dataframes from parquet files + if isinstance(self.dataset_paths, dict): + df_train = pd.read_parquet(self.dataset_paths['train']) + df_val = pd.read_parquet(self.dataset_paths['val']) if 'val' in self.dataset_paths else None + elif isinstance(self.dataset_paths, list): + df_train = pd.concat([pd.read_parquet(path) for path in self.dataset_paths], ignore_index=True) + df_val = None + else: + raise ValueError("Invalid dataset_paths format") + + # Split into train and validation sets if needed + if df_val is None: + df_train, df_val = train_test_split(df_train, test_size=1 - self.train_ratio, random_state=42) + + # Define preprocessing transforms + self.transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + ]) + + # Create datasets and dataloaders + self.train_dataset = ImageDataset(df_train, transform=self.transform) + self.val_dataset = ImageDataset(df_val, transform=self.transform) + + self.train_loader = DataLoader( + self.train_dataset, + batch_size=self.batch_size, + shuffle=True, + num_workers=self.num_workers + ) + + self.val_loader = DataLoader( + self.val_dataset, + batch_size=self.batch_size, + shuffle=False, + num_workers=self.num_workers + ) + + def _initialize_model(self): + """Initialize and modify the super resolution model""" + # Load base model + self.model = AIIABase.load(self.model_name) + + # Freeze CNN layers + for param in self.model.cnn.parameters(): + param.requires_grad = False + + # Add upscaling layer + hidden_size = self.model.config.hidden_size + self.model.upsample = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.Conv2d(hidden_size, 3, kernel_size=3, padding=1) + ) + + # Initialize optimizer and loss function + self.criterion = nn.MSELoss() + self.optimizer = torch.optim.Adam( + [param for param in self.model.parameters() if 'upsample' in str(param)], + lr=self.learning_rate + ) + + self.best_val_loss = float('inf') + + def _train_epoch(self): + """Train model for one epoch""" + self.model.train() + running_loss = 0.0 + + for batch in self.train_loader: + low_res = batch['low_res'].to(self.device) + high_res = batch['high_res'].to(self.device) + + # Forward pass + features = self.model.cnn(low_res) + outputs = self.model.upsample(features) + + loss = self.criterion(outputs, high_res) + + # Backward pass and optimize + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + running_loss += loss.item() + + epoch_loss = running_loss / len(self.train_loader) + print(f"Train Loss: {epoch_loss:.4f}") + + def _validate_epoch(self): + """Validate model performance""" + self.model.eval() + val_loss = 0.0 + + with torch.no_grad(): + for batch in self.val_loader: + low_res = batch['low_res'].to(self.device) + high_res = batch['high_res'].to(self.device) + + features = self.model.cnn(low_res) + outputs = self.model.upsample(features) + + loss = self.criterion(outputs, high_res) + val_loss += loss.item() + + avg_val_loss = val_loss / len(self.val_loader) + print(f"Validation Loss: {avg_val_loss:.4f}") + + # Update best model + if avg_val_loss < self.best_val_loss: + self.best_val_loss = avg_val_loss + + def __repr__(self): + return f"Model ({self.model_name}, batch_size={self.batch_size})" + + +# Example usage: +if __name__ == "__main__": + finetuner = Finetuner( + train_parquet_path="/root/training_data/vision-dataset/image_upscaler.parquet", + val_parquet_path="/root/training_data/vision-dataset/image_vec_upscaler.parquet", + batch_size=2, + learning_rate=0.001 + ) + + finetuner.train_model(num_epochs=10) \ No newline at end of file diff --git a/src/aiunn/inference.py b/src/aiunn/inference.py new file mode 100644 index 0000000..12b2b76 --- /dev/null +++ b/src/aiunn/inference.py @@ -0,0 +1,73 @@ +import torch +from PIL import Image +import torchvision.transforms as T +from torch.nn import functional as F +from aiia.model import AIIABase + +class UpScaler: + def __init__(self, model_path="AIIA-base-512-upscaler", device="cuda"): + self.device = torch.device(device) + self.model = AIIABase.load(model_path).to(self.device) + self.model.eval() + + # Preprocessing transforms + self.preprocess = T.Compose([ + T.Lambda(lambda img: self._pad_to_square(img)), + T.Resize(512), + T.ToTensor(), + T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + ]) + + def _pad_to_square(self, pil_img): + """Pad image to square while maintaining aspect ratio""" + w, h = pil_img.size + max_side = max(w, h) + hp = (max_side - w) // 2 + vp = (max_side - h) // 2 + padding = (hp, vp, max_side - w - hp, max_side - h - vp) + return T.functional.pad(pil_img, padding, 0, 'constant') + + def _remove_padding(self, tensor, original_size): + """Remove padding added during preprocessing""" + _, _, h, w = tensor.shape + orig_w, orig_h = original_size + + # Calculate scale factor + scale = 512 / max(orig_w, orig_h) + new_w = int(orig_w * scale) + new_h = int(orig_h * scale) + + # Calculate padding offsets + pad_w = (512 - new_w) // 2 + pad_h = (512 - new_h) // 2 + + # Remove padding + unpad = tensor[:, :, pad_h:pad_h+new_h, pad_w:pad_w+new_w] + + # Resize to target 2x resolution + return F.interpolate(unpad, size=(orig_h*2, orig_w*2), mode='bilinear', align_corners=False) + + def upscale(self, input_image): + # Preprocess + original_size = input_image.size + input_tensor = self.preprocess(input_image).unsqueeze(0).to(self.device) + + # Inference + with torch.no_grad(): + features = self.model.cnn(input_tensor) + output = self.model.upsample(features) + + # Postprocess + output = self._remove_padding(output, original_size) + + # Convert to PIL Image + output = output.squeeze(0).cpu().detach() + output = (output * 0.5 + 0.5).clamp(0, 1) + return T.functional.to_pil_image(output) + +# Usage example +if __name__ == "__main__": + upscaler = UpScaler() + input_image = Image.open("input.jpg") + output_image = upscaler.upscale(input_image) + output_image.save("output_2x.jpg") From 4a60045320175355760fc3c583da4c4ef3ef21f8 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 29 Jan 2025 23:06:49 +0100 Subject: [PATCH 005/100] first finetune try --- src/aiunn/finetune.py | 186 ++++++++++++++++++------------------------ 1 file changed, 79 insertions(+), 107 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 1644662..336f38c 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -41,83 +41,10 @@ class ImageDataset(Dataset): -class TrainingBase: - def __init__(self, - model_name: str, - dataset_paths: Union[List[str], Dict[str, str]], - batch_size: int = 32, - learning_rate: float = 0.001, - num_workers: int = 4, - train_ratio: float = 0.8): - """ - Base class for training models with multiple dataset support - - Args: - model_name (str): Name of the model to initialize - dataset_paths (Union[List[str], Dict[str, str]]): Paths to datasets (train and optional validation) - batch_size (int): Batch size for training - learning_rate (float): Learning rate for optimizer - num_workers (int): Number of workers for data loading - train_ratio (float): Ratio of data to use for training (rest goes to validation) - """ - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.batch_size = batch_size - self.num_workers = num_workers - - # Initialize datasets and loaders - self.dataset_paths = dataset_paths - self._initialize_datasets() - - # Initialize model and training parameters - self.model_name = model_name - self.learning_rate = learning_rate - self._initialize_model() - - def _initialize_datasets(self): - """Helper method to initialize datasets""" - raise NotImplementedError("This method should be implemented in child classes") - - def _initialize_model(self): - """Helper method to initialize model architecture""" - raise NotImplementedError("This method should be implemented in child classes") - - def train(self, num_epochs: int = 10): - """Train the model for specified number of epochs""" - self.model.to(self.device) - - for epoch in range(num_epochs): - print(f"Epoch {epoch+1}/{num_epochs}") - - # Train phase - self._train_epoch() - - # Validation phase - self._validate_epoch() - - # Save best model based on validation loss - if self.current_val_loss < self.best_val_loss: - self.save_model() - - def _train_epoch(self): - """Train model for one epoch""" - raise NotImplementedError("This method should be implemented in child classes") - - def _validate_epoch(self): - """Validate model performance""" - raise NotImplementedError("This method should be implemented in child classes") - - def save_model(self): - """Save current best model""" - torch.save({ - 'model_state_dict': self.model.state_dict(), - 'optimizer_state_dict': self.optimizer.state_dict(), - 'best_val_loss': self.best_val_loss - }, f"{self.model_name}_best.pth") - -class Finetuner(TrainingBase): +class ModelTrainer: def __init__(self, model_name: str = "AIIA-Base-512", - dataset_paths: Union[List[str], Dict[str, str]] = None, + dataset_paths: List[str] = None, batch_size: int = 32, learning_rate: float = 0.001, num_workers: int = 4, @@ -126,25 +53,42 @@ class Finetuner(TrainingBase): Specialized trainer for image super resolution tasks Args: - Same as TrainingBase + model_name (str): Name of the model to initialize + dataset_paths (List[str]): Paths to datasets + batch_size (int): Batch size for training + learning_rate (float): Learning rate for optimizer + num_workers (int): Number of workers for data loading + train_ratio (float): Ratio of data to use for training (rest goes to validation) """ - super().__init__(model_name, dataset_paths, batch_size, learning_rate, num_workers, train_ratio) + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.batch_size = batch_size + self.num_workers = num_workers + self.dataset_paths = dataset_paths + self.model_name = model_name + self.learning_rate = learning_rate + self.train_ratio = train_ratio + # Initialize datasets and loaders + self._initialize_datasets() + + # Initialize model and training parameters + self._initialize_model() + def _initialize_datasets(self): - """Initialize image datasets""" - # Load dataframes from parquet files - if isinstance(self.dataset_paths, dict): - df_train = pd.read_parquet(self.dataset_paths['train']) - df_val = pd.read_parquet(self.dataset_paths['val']) if 'val' in self.dataset_paths else None - elif isinstance(self.dataset_paths, list): + """ + Helper method to initialize datasets + """ + # Read training data based on input format + if isinstance(self.dataset_paths, list): df_train = pd.concat([pd.read_parquet(path) for path in self.dataset_paths], ignore_index=True) - df_val = None else: - raise ValueError("Invalid dataset_paths format") + raise ValueError("Invalid dataset_paths format. Must be a list or dictionary.") - # Split into train and validation sets if needed - if df_val is None: - df_train, df_val = train_test_split(df_train, test_size=1 - self.train_ratio, random_state=42) + df_train, df_val = train_test_split( + df_train, + test_size=1 - self.train_ratio, + random_state=42 + ) # Define preprocessing transforms self.transform = transforms.Compose([ @@ -168,10 +112,12 @@ class Finetuner(TrainingBase): batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers - ) + ) if df_val is not None else None def _initialize_model(self): - """Initialize and modify the super resolution model""" + """ + Helper method to initialize model architecture and training parameters + """ # Load base model self.model = AIIABase.load(self.model_name) @@ -181,9 +127,10 @@ class Finetuner(TrainingBase): # Add upscaling layer hidden_size = self.model.config.hidden_size + kernel_size = self.model.config.kernel_size self.model.upsample = nn.Sequential( nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(hidden_size, 3, kernel_size=3, padding=1) + nn.Conv2d(hidden_size, 3, kernel_size=kernel_size, padding=1) ) # Initialize optimizer and loss function @@ -195,14 +142,36 @@ class Finetuner(TrainingBase): self.best_val_loss = float('inf') + def train(self, num_epochs: int = 10): + """ + Train the model for specified number of epochs + """ + self.model.to(self.device) + + for epoch in range(num_epochs): + print(f"Epoch {epoch+1}/{num_epochs}") + + # Train phase + self._train_epoch() + + # Validation phase + if self.val_loader is not None: + self._validate_epoch() + + # Save best model based on validation loss + if self.val_loader is not None and self.current_val_loss < self.best_val_loss: + self.model.save("aiuNN-base") + def _train_epoch(self): - """Train model for one epoch""" + """ + Train model for one epoch + """ self.model.train() running_loss = 0.0 for batch in self.train_loader: - low_res = batch['low_res'].to(self.device) - high_res = batch['high_res'].to(self.device) + low_res = batch['low_ress'].to(self.device) + high_res = batch['high_ress'].to(self.device) # Forward pass features = self.model.cnn(low_res) @@ -221,14 +190,16 @@ class Finetuner(TrainingBase): print(f"Train Loss: {epoch_loss:.4f}") def _validate_epoch(self): - """Validate model performance""" + """ + Validate model performance + """ self.model.eval() - val_loss = 0.0 + val_oss = 0.0 with torch.no_grad(): for batch in self.val_loader: - low_res = batch['low_res'].to(self.device) - high_res = batch['high_res'].to(self.device) + low_res = batch['low_ress'].to(self.device) + high_res = batch['high_ress'].to(self.device) features = self.model.cnn(low_res) outputs = self.model.upsample(features) @@ -236,24 +207,25 @@ class Finetuner(TrainingBase): loss = self.criterion(outputs, high_res) val_loss += loss.item() - avg_val_loss = val_loss / len(self.val_loader) + avg_val_loss = val_loss / len(self.val_loader) if self.val_loader else 0 print(f"Validation Loss: {avg_val_loss:.4f}") # Update best model if avg_val_loss < self.best_val_loss: self.best_val_loss = avg_val_loss - + def __repr__(self): return f"Model ({self.model_name}, batch_size={self.batch_size})" - - -# Example usage: + if __name__ == "__main__": - finetuner = Finetuner( - train_parquet_path="/root/training_data/vision-dataset/image_upscaler.parquet", - val_parquet_path="/root/training_data/vision-dataset/image_vec_upscaler.parquet", + trainer = ModelTrainer( + model_name="/root/vision/AIIA/AIIA-base-512/", + dataset_paths=[ + "/root/training_data/vision-dataset/image_upscaler.parquet", + "/root/training_data/vision-dataset/image_vec_upscaler.parquet" + ], batch_size=2, learning_rate=0.001 ) - finetuner.train_model(num_epochs=10) \ No newline at end of file + trainer.train(num__epochs=3) \ No newline at end of file From 2121316e3b452b97a89360e2918f5c1928698bb1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 10:36:15 +0100 Subject: [PATCH 006/100] finetune improvement --- src/aiunn/finetune.py | 145 ++++++++++++++++++++++-------------------- 1 file changed, 75 insertions(+), 70 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 336f38c..5174d87 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -5,10 +5,9 @@ import io from torch import nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms -from aiia.model import AIIABase +from aiia.model import AIIABase, AIIA from sklearn.model_selection import train_test_split -from typing import Dict, List, Union - +from typing import Dict, List, Union, Optional class ImageDataset(Dataset): def __init__(self, dataframe, transform=None): @@ -36,24 +35,21 @@ class ImageDataset(Dataset): low_res_image = self.transform(low_res_image) high_res_image = self.transform(high_res_image) - return {'low_res': low_res_image, 'high_res': high_res_image} - - - + return {'low_ress': low_res_image, 'high_ress': high_res_image} class ModelTrainer: def __init__(self, - model_name: str = "AIIA-Base-512", - dataset_paths: List[str] = None, + model: AIIA, + dataset_paths: List[str], batch_size: int = 32, learning_rate: float = 0.001, num_workers: int = 4, train_ratio: float = 0.8): """ Specialized trainer for image super resolution tasks - + Args: - model_name (str): Name of the model to initialize + model (nn.Module): Model instance to finetune dataset_paths (List[str]): Paths to datasets batch_size (int): Batch size for training learning_rate (float): Learning rate for optimizer @@ -64,120 +60,126 @@ class ModelTrainer: self.batch_size = batch_size self.num_workers = num_workers self.dataset_paths = dataset_paths - self.model_name = model_name self.learning_rate = learning_rate self.train_ratio = train_ratio - + self.model = model + # Initialize datasets and loaders self._initialize_datasets() - - # Initialize model and training parameters - self._initialize_model() - + + # Initialize training parameters + self._initialize_training() + def _initialize_datasets(self): """ Helper method to initialize datasets """ - # Read training data based on input format if isinstance(self.dataset_paths, list): df_train = pd.concat([pd.read_parquet(path) for path in self.dataset_paths], ignore_index=True) else: - raise ValueError("Invalid dataset_paths format. Must be a list or dictionary.") - + raise ValueError("Invalid dataset_paths format. Must be a list.") + df_train, df_val = train_test_split( df_train, test_size=1 - self.train_ratio, random_state=42 ) - + # Define preprocessing transforms self.transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) - + # Create datasets and dataloaders self.train_dataset = ImageDataset(df_train, transform=self.transform) self.val_dataset = ImageDataset(df_val, transform=self.transform) - + self.train_loader = DataLoader( self.train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers ) - + self.val_loader = DataLoader( self.val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers ) if df_val is not None else None - - def _initialize_model(self): + + def _initialize_training(self): """ - Helper method to initialize model architecture and training parameters + Helper method to initialize training parameters """ - # Load base model - self.model = AIIABase.load(self.model_name) - - # Freeze CNN layers - for param in self.model.cnn.parameters(): - param.requires_grad = False - - # Add upscaling layer - hidden_size = self.model.config.hidden_size - kernel_size = self.model.config.kernel_size - self.model.upsample = nn.Sequential( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(hidden_size, 3, kernel_size=kernel_size, padding=1) - ) - + # Freeze CNN layers (if applicable) + try: + for param in self.model.cnn.parameters(): + param.requires_grad = False + except AttributeError: + pass # If model doesn't have a 'cnn' attribute, just continue + + # Add upscaling layer if not already present + if not hasattr(self.model, 'upsample'): + hidden_size = self.model.config.hidden_size + kernel_size = self.model.config.kernel_size + self.model.upsample = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.Conv2d(hidden_size, 3, kernel_size=kernel_size, padding=1) + ) + # Initialize optimizer and loss function self.criterion = nn.MSELoss() + + # Get parameters of the upsample layer for training + params = [p for p in self.model.upsample.parameters() if p.requires_grad] + if not params: + raise ValueError("No parameters found in upsample layer to optimize") + self.optimizer = torch.optim.Adam( - [param for param in self.model.parameters() if 'upsample' in str(param)], + params, lr=self.learning_rate ) - + self.best_val_loss = float('inf') - + def train(self, num_epochs: int = 10): """ Train the model for specified number of epochs """ self.model.to(self.device) - + for epoch in range(num_epochs): print(f"Epoch {epoch+1}/{num_epochs}") - + # Train phase self._train_epoch() - + # Validation phase if self.val_loader is not None: self._validate_epoch() - + # Save best model based on validation loss if self.val_loader is not None and self.current_val_loss < self.best_val_loss: - self.model.save("aiuNN-base") - + self.model.save("aiuNN-finetuned") + def _train_epoch(self): """ Train model for one epoch """ self.model.train() running_loss = 0.0 - + for batch in self.train_loader: - low_res = batch['low_ress'].to(self.device) - high_res = batch['high_ress'].to(self.device) + low_ress = batch['low_ress'].to(self.device) + high_ress = batch['high_ress'].to(self.device) # Forward pass - features = self.model.cnn(low_res) + features = self.model.cnn(low_ress) if hasattr(self.model, 'cnn') else self.model.extract_features(low_ress) outputs = self.model.upsample(features) - loss = self.criterion(outputs, high_res) + loss = self.criterion(outputs, high_ress) # Backward pass and optimize self.optimizer.zero_grad() @@ -185,41 +187,44 @@ class ModelTrainer: self.optimizer.step() running_loss += loss.item() - + epoch_loss = running_loss / len(self.train_loader) print(f"Train Loss: {epoch_loss:.4f}") - + def _validate_epoch(self): """ Validate model performance """ self.model.eval() - val_oss = 0.0 + val_loss = 0.0 with torch.no_grad(): for batch in self.val_loader: - low_res = batch['low_ress'].to(self.device) - high_res = batch['high_ress'].to(self.device) + low_ress = batch['low_ress'].to(self.device) + high_ress = batch['high_ress'].to(self.device) - features = self.model.cnn(low_res) + features = self.model.cnn(low_ress) if hasattr(self.model, 'cnn') else self.model.extract_features(low_ress) outputs = self.model.upsample(features) - loss = self.criterion(outputs, high_res) + loss = self.criterion(outputs, high_ress) val_loss += loss.item() avg_val_loss = val_loss / len(self.val_loader) if self.val_loader else 0 print(f"Validation Loss: {avg_val_loss:.4f}") - + # Update best model if avg_val_loss < self.best_val_loss: self.best_val_loss = avg_val_loss - + def __repr__(self): - return f"Model ({self.model_name}, batch_size={self.batch_size})" - + return f"ModelTrainer (model={type(self.model).__name__}, batch_size={self.batch_size})" + if __name__ == "__main__": + # Load your model first + model = AIIABase.load("/root/vision/AIIA/AIIA-base-512/") + trainer = ModelTrainer( - model_name="/root/vision/AIIA/AIIA-base-512/", + model=model, dataset_paths=[ "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" @@ -227,5 +232,5 @@ if __name__ == "__main__": batch_size=2, learning_rate=0.001 ) - - trainer.train(num__epochs=3) \ No newline at end of file + + trainer.train(num_epochs=3) \ No newline at end of file From 73d52f733ce6259d463fe4a10c92b3e90bab4438 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 10:41:37 +0100 Subject: [PATCH 007/100] updated config for updated model --- src/aiunn/finetune.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 5174d87..ce1b07a 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -122,12 +122,17 @@ class ModelTrainer: # Add upscaling layer if not already present if not hasattr(self.model, 'upsample'): + # Get existing configuration values or set defaults if necessary hidden_size = self.model.config.hidden_size kernel_size = self.model.config.kernel_size + self.model.upsample = nn.Sequential( nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), nn.Conv2d(hidden_size, 3, kernel_size=kernel_size, padding=1) ) + # Update the model's configuration with new parameters + self.model.config.upsample_hidden_size = hidden_size + self.model.config.upsample_kernel_size = kernel_size # Initialize optimizer and loss function self.criterion = nn.MSELoss() From 1f33f22beaab5eb1fecf77fcdf759cf1f314dbb6 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 10:45:18 +0100 Subject: [PATCH 008/100] convert the bytes first --- src/aiunn/finetune.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index ce1b07a..7af2695 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -8,6 +8,8 @@ import torchvision.transforms as transforms from aiia.model import AIIABase, AIIA from sklearn.model_selection import train_test_split from typing import Dict, List, Union, Optional +import base64 + class ImageDataset(Dataset): def __init__(self, dataframe, transform=None): @@ -20,23 +22,30 @@ class ImageDataset(Dataset): def __getitem__(self, idx): row = self.dataframe.iloc[idx] - # Decode image_512 from bytes - img_bytes = row['image_512'] - img_stream = io.BytesIO(img_bytes) - low_res_image = Image.open(img_stream).convert('RGB') + # Convert string to bytes and handle decoding + try: + # Decode base64 string to bytes + low_res_bytes = base64.b64decode(row['image_512']) + high_res_bytes = base64.b64decode(row['image_1024']) + except Exception as e: + raise ValueError(f"Error decoding base64 string: {str(e)}") - # Decode image_1024 from bytes - high_res_bytes = row['image_1024'] - high_stream = io.BytesIO(high_res_bytes) - high_res_image = Image.open(high_stream).convert('RGB') + # Create image streams + low_res_stream = io.BytesIO(low_res_bytes) + high_res_stream = io.BytesIO(high_res_bytes) - # Apply transformations if specified + # Open images + low_res_image = Image.open(low_res_stream).convert('RGB') + high_res_image = Image.open(high_res_stream).convert('RGB') + + # Apply transformations if self.transform: low_res_image = self.transform(low_res_image) high_res_image = self.transform(high_res_image) return {'low_ress': low_res_image, 'high_ress': high_res_image} + class ModelTrainer: def __init__(self, model: AIIA, From be5bb536201c143e2b4a16778a64b3ffe8f359e7 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 11:56:17 +0100 Subject: [PATCH 009/100] working transform --- src/aiunn/finetune.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 7af2695..0d49d85 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -22,23 +22,23 @@ class ImageDataset(Dataset): def __getitem__(self, idx): row = self.dataframe.iloc[idx] - # Convert string to bytes and handle decoding try: - # Decode base64 string to bytes - low_res_bytes = base64.b64decode(row['image_512']) - high_res_bytes = base64.b64decode(row['image_1024']) + # Directly use bytes data for PNG images + low_res_bytes = row['image_512'] + high_res_bytes = row['image_1024'] + + # Create in-memory streams + low_res_stream = io.BytesIO(low_res_bytes) + high_res_stream = io.BytesIO(high_res_bytes) + + # Open images with explicit RGB conversion + low_res_image = Image.open(low_res_stream).convert('RGB') + high_res_image = Image.open(high_res_stream).convert('RGB') + except Exception as e: - raise ValueError(f"Error decoding base64 string: {str(e)}") + raise ValueError(f"Image loading failed: {str(e)}") - # Create image streams - low_res_stream = io.BytesIO(low_res_bytes) - high_res_stream = io.BytesIO(high_res_bytes) - - # Open images - low_res_image = Image.open(low_res_stream).convert('RGB') - high_res_image = Image.open(high_res_stream).convert('RGB') - - # Apply transformations + # Apply transformations if specified if self.transform: low_res_image = self.transform(low_res_image) high_res_image = self.transform(high_res_image) @@ -46,6 +46,7 @@ class ImageDataset(Dataset): return {'low_ress': low_res_image, 'high_ress': high_res_image} + class ModelTrainer: def __init__(self, model: AIIA, From 4037a0776451695305ec8cba37059fef7fd168cc Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 12:39:24 +0100 Subject: [PATCH 010/100] updated process --- src/aiunn/finetune.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 0d49d85..05fde00 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -23,22 +23,25 @@ class ImageDataset(Dataset): row = self.dataframe.iloc[idx] try: - # Directly use bytes data for PNG images - low_res_bytes = row['image_512'] - high_res_bytes = row['image_1024'] + # Convert string data to bytes if needed + low_res_bytes = row['image_512'].encode() if isinstance(row['image_512'], str) else row['image_512'] + high_res_bytes = row['image_1024'].encode() if isinstance(row['image_1024'], str) else row['image_1024'] - # Create in-memory streams + # Create BytesIO objects low_res_stream = io.BytesIO(low_res_bytes) high_res_stream = io.BytesIO(high_res_bytes) - # Open images with explicit RGB conversion + # Open images low_res_image = Image.open(low_res_stream).convert('RGB') high_res_image = Image.open(high_res_stream).convert('RGB') + # Close the streams + low_res_stream.close() + high_res_stream.close() + except Exception as e: raise ValueError(f"Image loading failed: {str(e)}") - # Apply transformations if specified if self.transform: low_res_image = self.transform(low_res_image) high_res_image = self.transform(high_res_image) From 9b7a182782bb7f6a2a1e6420d9769b581be59f2c Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 12:52:57 +0100 Subject: [PATCH 011/100] debug mode --- src/aiunn/finetune.py | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 05fde00..22a4efa 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -1,6 +1,6 @@ import torch import pandas as pd -from PIL import Image +from PIL import Image, ImageFile import io from torch import nn from torch.utils.data import Dataset, DataLoader @@ -21,31 +21,40 @@ class ImageDataset(Dataset): def __getitem__(self, idx): row = self.dataframe.iloc[idx] - + try: - # Convert string data to bytes if needed - low_res_bytes = row['image_512'].encode() if isinstance(row['image_512'], str) else row['image_512'] - high_res_bytes = row['image_1024'].encode() if isinstance(row['image_1024'], str) else row['image_1024'] + # Verify data is valid before creating BytesIO + if not isinstance(row['image_512'], bytes) or not isinstance(row['image_1024'], bytes): + raise ValueError("Image data must be in bytes format") + + low_res_stream = io.BytesIO(row['image_512']) + high_res_stream = io.BytesIO(row['image_1024']) + + # Reset stream position + low_res_stream.seek(0) + high_res_stream.seek(0) + + # Enable loading of truncated images if necessary + ImageFile.LOAD_TRUNCATED_IMAGES = True - # Create BytesIO objects - low_res_stream = io.BytesIO(low_res_bytes) - high_res_stream = io.BytesIO(high_res_bytes) - - # Open images low_res_image = Image.open(low_res_stream).convert('RGB') high_res_image = Image.open(high_res_stream).convert('RGB') - - # Close the streams - low_res_stream.close() - high_res_stream.close() - + + # Verify images are valid + low_res_image.verify() + high_res_image.verify() + except Exception as e: raise ValueError(f"Image loading failed: {str(e)}") - + + finally: + low_res_stream.close() + high_res_stream.close() + if self.transform: low_res_image = self.transform(low_res_image) high_res_image = self.transform(high_res_image) - + return {'low_ress': low_res_image, 'high_ress': high_res_image} From 34c547fb23dff78ff86f68d1f298c91fce48f4ed Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 13:14:45 +0100 Subject: [PATCH 012/100] next try --- src/aiunn/finetune.py | 89 ++++++++++++++++++++++++++++++++----------- 1 file changed, 67 insertions(+), 22 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 22a4efa..4bdf821 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -11,8 +11,22 @@ from typing import Dict, List, Union, Optional import base64 +import torch +from torch.utils.data import Dataset +from PIL import Image, ImageFile +import io +import base64 +import pandas as pd + class ImageDataset(Dataset): def __init__(self, dataframe, transform=None): + """ + Initialize the dataset with a dataframe containing image data + + Args: + dataframe (pd.DataFrame): DataFrame containing 'image_512' and 'image_1024' columns + transform (callable, optional): Optional transform to be applied to both images + """ self.dataframe = dataframe self.transform = transform @@ -20,44 +34,75 @@ class ImageDataset(Dataset): return len(self.dataframe) def __getitem__(self, idx): + """ + Get a pair of low and high resolution images + + Args: + idx (int): Index of the data point + + Returns: + dict: Contains 'low_ress' and 'high_ress' PIL images or transformed tensors + """ row = self.dataframe.iloc[idx] try: + # Handle both bytes and base64 encoded strings + low_res_data = row['image_512'] + high_res_data = row['image_1024'] + + if isinstance(low_res_data, str): + # Decode base64 string to bytes + low_res_data = base64.b64decode(low_res_data) + high_res_data = base64.b64decode(high_res_data) + # Verify data is valid before creating BytesIO - if not isinstance(row['image_512'], bytes) or not isinstance(row['image_1024'], bytes): - raise ValueError("Image data must be in bytes format") + if not isinstance(low_res_data, bytes) or not isinstance(high_res_data, bytes): + raise ValueError(f"Invalid image data format at index {idx}") - low_res_stream = io.BytesIO(row['image_512']) - high_res_stream = io.BytesIO(row['image_1024']) + # Create image streams + low_res_stream = io.BytesIO(low_res_data) + high_res_stream = io.BytesIO(high_res_data) - # Reset stream position - low_res_stream.seek(0) - high_res_stream.seek(0) - - # Enable loading of truncated images if necessary + # Enable loading of truncated images ImageFile.LOAD_TRUNCATED_IMAGES = True + # Load and convert images to RGB low_res_image = Image.open(low_res_stream).convert('RGB') high_res_image = Image.open(high_res_stream).convert('RGB') + # Create fresh copies for verify() since it modifies the image object + low_res_verify = low_res_image.copy() + high_res_verify = high_res_image.copy() + # Verify images are valid - low_res_image.verify() - high_res_image.verify() + try: + low_res_verify.verify() + high_res_verify.verify() + except Exception as e: + raise ValueError(f"Image verification failed at index {idx}: {str(e)}") + finally: + low_res_verify.close() + high_res_verify.close() + + # Apply transforms if specified + if self.transform: + low_res_image = self.transform(low_res_image) + high_res_image = self.transform(high_res_image) + + return { + 'low_ress': low_res_image, # Note: Using 'low_ress' to match ModelTrainer + 'high_ress': high_res_image # Note: Using 'high_ress' to match ModelTrainer + } except Exception as e: - raise ValueError(f"Image loading failed: {str(e)}") + raise RuntimeError(f"Error loading images at index {idx}: {str(e)}") finally: - low_res_stream.close() - high_res_stream.close() - - if self.transform: - low_res_image = self.transform(low_res_image) - high_res_image = self.transform(high_res_image) - - return {'low_ress': low_res_image, 'high_ress': high_res_image} - - + # Ensure streams are closed + if 'low_res_stream' in locals(): + low_res_stream.close() + if 'high_res_stream' in locals(): + high_res_stream.close() class ModelTrainer: def __init__(self, From 0484ae01b1e0dec32702f1a4b0bf8a67abf354fb Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 13:21:56 +0100 Subject: [PATCH 013/100] added tqdm and removed doubles --- src/aiunn/finetune.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 4bdf821..f26ef93 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -9,14 +9,9 @@ from aiia.model import AIIABase, AIIA from sklearn.model_selection import train_test_split from typing import Dict, List, Union, Optional import base64 +from tqdm import tqdm -import torch -from torch.utils.data import Dataset -from PIL import Image, ImageFile -import io -import base64 -import pandas as pd class ImageDataset(Dataset): def __init__(self, dataframe, transform=None): @@ -222,7 +217,7 @@ class ModelTrainer: """ self.model.to(self.device) - for epoch in range(num_epochs): + for epoch in tqdm(num_epochs): print(f"Epoch {epoch+1}/{num_epochs}") # Train phase @@ -243,7 +238,7 @@ class ModelTrainer: self.model.train() running_loss = 0.0 - for batch in self.train_loader: + for batch in tqdm(self.train_loader): low_ress = batch['low_ress'].to(self.device) high_ress = batch['high_ress'].to(self.device) From 8c49cc7e013b4c856ff2374c05913d063c46ff74 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 13:51:49 +0100 Subject: [PATCH 014/100] fixed tqdm in finetuning --- src/aiunn/finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index f26ef93..545de8c 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -217,7 +217,7 @@ class ModelTrainer: """ self.model.to(self.device) - for epoch in tqdm(num_epochs): + for epoch in tqdm(range(num_epochs), desc="Training"): print(f"Epoch {epoch+1}/{num_epochs}") # Train phase @@ -238,7 +238,7 @@ class ModelTrainer: self.model.train() running_loss = 0.0 - for batch in tqdm(self.train_loader): + for batch in tqdm(self.train_loader, desc="Training"): low_ress = batch['low_ress'].to(self.device) high_ress = batch['high_ress'].to(self.device) @@ -266,7 +266,7 @@ class ModelTrainer: val_loss = 0.0 with torch.no_grad(): - for batch in self.val_loader: + for batch in tqdm(self.val_loader, desc="Validation"): low_ress = batch['low_ress'].to(self.device) high_ress = batch['high_ress'].to(self.device) From 9ec563e86d15a1742ac0e3c83901fbb065d7ff3a Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 30 Jan 2025 20:49:34 +0100 Subject: [PATCH 015/100] updated Finetuner to correctly log losses --- src/aiunn/finetune.py | 104 ++++++++++++++++++++++++++++++------------ 1 file changed, 75 insertions(+), 29 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 545de8c..4cf3d2e 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -10,7 +10,7 @@ from sklearn.model_selection import train_test_split from typing import Dict, List, Union, Optional import base64 from tqdm import tqdm - +import os class ImageDataset(Dataset): @@ -99,14 +99,15 @@ class ImageDataset(Dataset): if 'high_res_stream' in locals(): high_res_stream.close() -class ModelTrainer: +class FineTuner: def __init__(self, model: AIIA, dataset_paths: List[str], batch_size: int = 32, learning_rate: float = 0.001, num_workers: int = 4, - train_ratio: float = 0.8): + train_ratio: float = 0.8, + output_dir: str = "./training_logs"): """ Specialized trainer for image super resolution tasks @@ -117,6 +118,7 @@ class ModelTrainer: learning_rate (float): Learning rate for optimizer num_workers (int): Number of workers for data loading train_ratio (float): Ratio of data to use for training (rest goes to validation) + output_dir (str): Directory to save training logs and model checkpoints """ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.batch_size = batch_size @@ -125,6 +127,22 @@ class ModelTrainer: self.learning_rate = learning_rate self.train_ratio = train_ratio self.model = model + self.output_dir = output_dir + + # Create output directory if it doesn't exist + os.makedirs(output_dir, exist_ok=True) + + # Initialize history tracking + self.train_losses = [] + self.val_losses = [] + self.best_val_loss = float('inf') + self.current_val_loss = float('inf') + + # Initialize CSV logging + self.log_file = os.path.join(output_dir, 'training_log.csv') + if not os.path.exists(self.log_file): + with open(self.log_file, 'w') as f: + f.write('epoch,train_loss,val_loss,best_val_loss\n') # Initialize datasets and loaders self._initialize_datasets() @@ -171,6 +189,19 @@ class ModelTrainer: num_workers=self.num_workers ) if df_val is not None else None + def _log_metrics(self, epoch: int, train_loss: float, val_loss: float): + """ + Log training metrics to CSV file + + Args: + epoch (int): Current epoch number + train_loss (float): Training loss for the epoch + val_loss (float): Validation loss for the epoch + """ + with open(self.log_file, 'a') as f: + f.write(f'{epoch},{train_loss:.6f},{val_loss:.6f},{self.best_val_loss:.6f}\n') + + def _initialize_training(self): """ Helper method to initialize training parameters @@ -211,29 +242,11 @@ class ModelTrainer: self.best_val_loss = float('inf') - def train(self, num_epochs: int = 10): - """ - Train the model for specified number of epochs - """ - self.model.to(self.device) - - for epoch in tqdm(range(num_epochs), desc="Training"): - print(f"Epoch {epoch+1}/{num_epochs}") - - # Train phase - self._train_epoch() - - # Validation phase - if self.val_loader is not None: - self._validate_epoch() - - # Save best model based on validation loss - if self.val_loader is not None and self.current_val_loss < self.best_val_loss: - self.model.save("aiuNN-finetuned") - def _train_epoch(self): """ Train model for one epoch + Returns: + float: Average training loss for the epoch """ self.model.train() running_loss = 0.0 @@ -256,11 +269,15 @@ class ModelTrainer: running_loss += loss.item() epoch_loss = running_loss / len(self.train_loader) + self.train_losses.append(epoch_loss) print(f"Train Loss: {epoch_loss:.4f}") + return epoch_loss def _validate_epoch(self): """ Validate model performance + Returns: + float: Average validation loss for the epoch """ self.model.eval() val_loss = 0.0 @@ -276,12 +293,41 @@ class ModelTrainer: loss = self.criterion(outputs, high_ress) val_loss += loss.item() - avg_val_loss = val_loss / len(self.val_loader) if self.val_loader else 0 - print(f"Validation Loss: {avg_val_loss:.4f}") + self.current_val_loss = val_loss / len(self.val_loader) + self.val_losses.append(self.current_val_loss) + print(f"Validation Loss: {self.current_val_loss:.4f}") + return self.current_val_loss - # Update best model - if avg_val_loss < self.best_val_loss: - self.best_val_loss = avg_val_loss + def train(self, num_epochs: int = 10): + """ + Train the model for specified number of epochs + Args: + num_epochs (int): Number of epochs to train for + """ + self.model.to(self.device) + + print(f"Training metrics will be logged to: {self.log_file}") + + for epoch in range(num_epochs): + print(f"\nEpoch {epoch+1}/{num_epochs}") + + # Train phase + train_loss = self._train_epoch() + + # Validation phase + if self.val_loader is not None: + val_loss = self._validate_epoch() + + # Log metrics + self._log_metrics(epoch + 1, train_loss, val_loss) + + # Save best model based on validation loss + if self.current_val_loss < self.best_val_loss: + print(f"Validation loss improved from {self.best_val_loss:.4f} to {self.current_val_loss:.4f}") + self.best_val_loss = self.current_val_loss + model_save_path = os.path.join(self.output_dir, "aiuNN-finetuned") + self.model.save(model_save_path) + print(f"Model saved to: {model_save_path}") def __repr__(self): return f"ModelTrainer (model={type(self.model).__name__}, batch_size={self.batch_size})" @@ -290,7 +336,7 @@ if __name__ == "__main__": # Load your model first model = AIIABase.load("/root/vision/AIIA/AIIA-base-512/") - trainer = ModelTrainer( + trainer = FineTuner( model=model, dataset_paths=[ "/root/training_data/vision-dataset/image_upscaler.parquet", From 13fb2b76c1657e0153b362e9b65c9aeb0d8070b7 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 16:03:28 +0100 Subject: [PATCH 016/100] updated finetuning --- src/aiunn/finetune.py | 146 ++++++++++++++++++++++++++++------------- src/aiunn/inference.py | 2 +- 2 files changed, 103 insertions(+), 45 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 4cf3d2e..9dc5b2e 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -99,6 +99,8 @@ class ImageDataset(Dataset): if 'high_res_stream' in locals(): high_res_stream.close() + + class FineTuner: def __init__(self, model: AIIA, @@ -127,7 +129,7 @@ class FineTuner: self.learning_rate = learning_rate self.train_ratio = train_ratio self.model = model - self.output_dir = output_dir + self.ouptut_dir = output_dir # Create output directory if it doesn't exist os.makedirs(output_dir, exist_ok=True) @@ -150,6 +152,17 @@ class FineTuner: # Initialize training parameters self._initialize_training() + def _freeze_layers(self): + """Freeze all layers except the upsample layer""" + try: + # Try to freeze layers based on their names + for name, param in self.model.named_parameters(): + if 'upsample' not in name: + param.requires_grad = False + except Exception as e: + print(f"Warning: Couldn't freeze layers - {str(e)}") + pass + def _initialize_datasets(self): """ Helper method to initialize datasets @@ -159,15 +172,18 @@ class FineTuner: else: raise ValueError("Invalid dataset_paths format. Must be a list.") + # Split into train and validation sets df_train, df_val = train_test_split( df_train, test_size=1 - self.train_ratio, random_state=42 ) - # Define preprocessing transforms + # Define preprocessing transforms with augmentation self.transform = transforms.Compose([ transforms.ToTensor(), + transforms.RandomResizedCrop(256), + transforms.RandomHorizontalFlip(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) @@ -200,47 +216,67 @@ class FineTuner: """ with open(self.log_file, 'a') as f: f.write(f'{epoch},{train_loss:.6f},{val_loss:.6f},{self.best_val_loss:.6f}\n') - - + def _initialize_training(self): """ Helper method to initialize training parameters """ - # Freeze CNN layers (if applicable) - try: - for param in self.model.cnn.parameters(): - param.requires_grad = False - except AttributeError: - pass # If model doesn't have a 'cnn' attribute, just continue + # Freeze all layers except upsample layer + self._freeze_layers() # Add upscaling layer if not already present if not hasattr(self.model, 'upsample'): - # Get existing configuration values or set defaults if necessary - hidden_size = self.model.config.hidden_size - kernel_size = self.model.config.kernel_size + # Try to get existing configuration or set defaults + try: + hidden_size = self.model.config.hidden_size + kernel_size = 3 # Use odd-sized kernel for better performance + except AttributeError: + # Fallback values if config isn't available + hidden_size = 512 + kernel_size = 3 self.model.upsample = nn.Sequential( - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Conv2d(hidden_size, 3, kernel_size=kernel_size, padding=1) + nn.ConvTranspose2d(hidden_size, + hidden_size//2, + kernel_size=kernel_size, + stride=2, + padding=1, + output_padding=1), + nn.ReLU(inplace=True), + nn.ConvTranspose2d(hidden_size//2, + 3, + kernel_size=kernel_size, + stride=2, + padding=1, + output_padding=1) ) - # Update the model's configuration with new parameters self.model.config.upsample_hidden_size = hidden_size self.model.config.upsample_kernel_size = kernel_size - # Initialize optimizer and loss function - self.criterion = nn.MSELoss() + # Initialize optimizer and scheduler + params_to_optimize = [p for p in self.model.parameters() if p.requires_grad] - # Get parameters of the upsample layer for training - params = [p for p in self.model.upsample.parameters() if p.requires_grad] - if not params: - raise ValueError("No parameters found in upsample layer to optimize") + if not params_to_optimize: + raise ValueError("No parameters found to optimize") + # Use Adam with weight decay for better regularization self.optimizer = torch.optim.Adam( - params, - lr=self.learning_rate + params_to_optimize, + lr=self.learning_rate, + weight_decay=1e-4 # Add L2 regularization ) - self.best_val_loss = float('inf') + # Reduce learning rate when validation loss plateaus + self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + factor=0.1, # Multiply LR by this factor on plateau + patience=3, # Number of epochs to wait before reducing LR + verbose=True + ) + + # Use a combination of L1 and L2 losses for better performance + self.criterion = nn.L1Loss() + self.mse_criterion = nn.MSELoss() def _train_epoch(self): """ @@ -255,18 +291,26 @@ class FineTuner: low_ress = batch['low_ress'].to(self.device) high_ress = batch['high_ress'].to(self.device) - # Forward pass - features = self.model.cnn(low_ress) if hasattr(self.model, 'cnn') else self.model.extract_features(low_ress) + try: + # Try using CNN layer if available + features = self.model.cnn(low_ress) + except AttributeError: + # Fallback to extract_features method + features = self.model.extract_features(low_ress) + outputs = self.model.upsample(features) - loss = self.criterion(outputs, high_ress) + # Calculate loss with different scaling for L1 and MSE components + l1_loss = self.criterion(outputs, high_ress) * 0.5 + mse_loss = self.mse_criterion(outputs, high_ress) * 0.5 + total_loss = l1_loss + mse_loss # Backward pass and optimize self.optimizer.zero_grad() - loss.backward() + total_loss.backward() self.optimizer.step() - running_loss += loss.item() + running_loss += total_loss.item() epoch_loss = running_loss / len(self.train_loader) self.train_losses.append(epoch_loss) @@ -287,11 +331,19 @@ class FineTuner: low_ress = batch['low_ress'].to(self.device) high_ress = batch['high_ress'].to(self.device) - features = self.model.cnn(low_ress) if hasattr(self.model, 'cnn') else self.model.extract_features(low_ress) + try: + features = self.model.cnn(low_ress) + except AttributeError: + features = self.model.extract_features(low_ress) + outputs = self.model.upsample(features) - loss = self.criterion(outputs, high_ress) - val_loss += loss.item() + # Calculate same loss combination + l1_loss = self.criterion(outputs, high_ress) * 0.5 + mse_loss = self.mse_criterion(outputs, high_ress) * 0.5 + total_loss = l1_loss + mse_loss + + val_loss += total_loss.item() self.current_val_loss = val_loss / len(self.val_loader) self.val_losses.append(self.current_val_loss) @@ -318,6 +370,9 @@ class FineTuner: if self.val_loader is not None: val_loss = self._validate_epoch() + # Update learning rate scheduler based on validation loss + self.scheduler.step(val_loss) + # Log metrics self._log_metrics(epoch + 1, train_loss, val_loss) @@ -325,25 +380,28 @@ class FineTuner: if self.current_val_loss < self.best_val_loss: print(f"Validation loss improved from {self.best_val_loss:.4f} to {self.current_val_loss:.4f}") self.best_val_loss = self.current_val_loss - model_save_path = os.path.join(self.output_dir, "aiuNN-finetuned") + model_save_path = os.path.join(self.ouptut_dir, "aiuNN-optimized") self.model.save(model_save_path) print(f"Model saved to: {model_save_path}") - def __repr__(self): - return f"ModelTrainer (model={type(self.model).__name__}, batch_size={self.batch_size})" - + # After training, save the final model + final_model_path = os.path.join(self.ouptut_dir, "aiuNN-final") + self.model.save(final_model_path) + print(f"\nFinal model saved to: {final_model_path}") + + if __name__ == "__main__": # Load your model first - model = AIIABase.load("/root/vision/AIIA/AIIA-base-512/") - + model = AIIABase.load("/root/vision/dataset/AIIA-base-512/") + trainer = FineTuner( model=model, dataset_paths=[ - "/root/training_data/vision-dataset/image_upscaler.parquet", - "/root/training_data/vision-dataset/image_vec_upscaler.parquet" + "/root/training_data/vision-dataset/image_upscaler.0", + "/root/training_data/vision-dataset/image_vec_upscaler.0" ], - batch_size=2, - learning_rate=0.001 + batch_size=8, # Increased batch size + learning_rate=1e-4 # Reduced initial LR ) - trainer.train(num_epochs=3) \ No newline at end of file + trainer.train(num_epochs=10) # Extended training time \ No newline at end of file diff --git a/src/aiunn/inference.py b/src/aiunn/inference.py index 12b2b76..c86905b 100644 --- a/src/aiunn/inference.py +++ b/src/aiunn/inference.py @@ -5,7 +5,7 @@ from torch.nn import functional as F from aiia.model import AIIABase class UpScaler: - def __init__(self, model_path="AIIA-base-512-upscaler", device="cuda"): + def __init__(self, model_path="aiuNN-finetuned", device="cuda"): self.device = torch.device(device) self.model = AIIABase.load(model_path).to(self.device) self.model.eval() From 6b354630274c0a0b4d200daafc98488f3bfcb2b4 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 16:04:54 +0100 Subject: [PATCH 017/100] upgaeed path for base model --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 9dc5b2e..385d8db 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -392,7 +392,7 @@ class FineTuner: if __name__ == "__main__": # Load your model first - model = AIIABase.load("/root/vision/dataset/AIIA-base-512/") + model = AIIABase.load("/root/vision/dataset/AIIA-base-512") trainer = FineTuner( model=model, From 0b04ebab7a0e72161c2207235e547ed839e3fa56 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 16:06:12 +0100 Subject: [PATCH 018/100] corected path again --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 385d8db..3e7687a 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -392,7 +392,7 @@ class FineTuner: if __name__ == "__main__": # Load your model first - model = AIIABase.load("/root/vision/dataset/AIIA-base-512") + model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") trainer = FineTuner( model=model, From 804882d7a2a85535776db4cfbd629530fb44b962 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 16:11:21 +0100 Subject: [PATCH 019/100] corrected dataset paths --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 3e7687a..1fefbe4 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -397,8 +397,8 @@ if __name__ == "__main__": trainer = FineTuner( model=model, dataset_paths=[ - "/root/training_data/vision-dataset/image_upscaler.0", - "/root/training_data/vision-dataset/image_vec_upscaler.0" + "/root/training_data/vision-dataset/image_upscaler.parquet", + "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], batch_size=8, # Increased batch size learning_rate=1e-4 # Reduced initial LR From 50fa103579a2933a1f9d24aaf4009e7bad7b1582 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 16:57:13 +0100 Subject: [PATCH 020/100] printing --- src/aiunn/finetune.py | 91 ++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 58 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 1fefbe4..e9c13d6 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -100,7 +100,6 @@ class ImageDataset(Dataset): high_res_stream.close() - class FineTuner: def __init__(self, model: AIIA, @@ -129,7 +128,7 @@ class FineTuner: self.learning_rate = learning_rate self.train_ratio = train_ratio self.model = model - self.ouptut_dir = output_dir + self.output_dir = output_dir # Create output directory if it doesn't exist os.makedirs(output_dir, exist_ok=True) @@ -153,12 +152,22 @@ class FineTuner: self._initialize_training() def _freeze_layers(self): - """Freeze all layers except the upsample layer""" + """ + Freeze all layers except those that are part of the decoder or upsampling + We'll assume the last few layers are responsible for upsampling/reconstruction + """ try: - # Try to freeze layers based on their names + # Try to identify encoder layers and freeze them for name, param in self.model.named_parameters(): - if 'upsample' not in name: + if 'encoder' in name: param.requires_grad = False + + # Unfreeze certain layers (example: last 3 decoder layers) + # Modify this based on your actual model architecture + for name, param in self.model.named_parameters(): + if 'decoder' in name and 'block4' in name or 'block5' in name: + param.requires_grad = True + except Exception as e: print(f"Warning: Couldn't freeze layers - {str(e)}") pass @@ -221,38 +230,9 @@ class FineTuner: """ Helper method to initialize training parameters """ - # Freeze all layers except upsample layer + # Freeze layers except those we want to finetune self._freeze_layers() - # Add upscaling layer if not already present - if not hasattr(self.model, 'upsample'): - # Try to get existing configuration or set defaults - try: - hidden_size = self.model.config.hidden_size - kernel_size = 3 # Use odd-sized kernel for better performance - except AttributeError: - # Fallback values if config isn't available - hidden_size = 512 - kernel_size = 3 - - self.model.upsample = nn.Sequential( - nn.ConvTranspose2d(hidden_size, - hidden_size//2, - kernel_size=kernel_size, - stride=2, - padding=1, - output_padding=1), - nn.ReLU(inplace=True), - nn.ConvTranspose2d(hidden_size//2, - 3, - kernel_size=kernel_size, - stride=2, - padding=1, - output_padding=1) - ) - self.model.config.upsample_hidden_size = hidden_size - self.model.config.upsample_kernel_size = kernel_size - # Initialize optimizer and scheduler params_to_optimize = [p for p in self.model.parameters() if p.requires_grad] @@ -269,8 +249,8 @@ class FineTuner: # Reduce learning rate when validation loss plateaus self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, - factor=0.1, # Multiply LR by this factor on plateau - patience=3, # Number of epochs to wait before reducing LR + factor=0.1, # Multiply LR by this factor on plateau + patience=3, # Number of epochs to wait before reducing LR verbose=True ) @@ -291,18 +271,15 @@ class FineTuner: low_ress = batch['low_ress'].to(self.device) high_ress = batch['high_ress'].to(self.device) + # Forward pass (we'll use the model's existing architecture without adding layers) try: - # Try using CNN layer if available - features = self.model.cnn(low_ress) - except AttributeError: - # Fallback to extract_features method - features = self.model.extract_features(low_ress) - - outputs = self.model.upsample(features) + features = self.model(low_ress) + except Exception as e: + raise RuntimeError(f"Error during forward pass: {str(e)}") # Calculate loss with different scaling for L1 and MSE components - l1_loss = self.criterion(outputs, high_ress) * 0.5 - mse_loss = self.mse_criterion(outputs, high_ress) * 0.5 + l1_loss = self.criterion(features, high_ress) * 0.5 + mse_loss = self.mse_criterion(features, high_ress) * 0.5 total_loss = l1_loss + mse_loss # Backward pass and optimize @@ -332,15 +309,13 @@ class FineTuner: high_ress = batch['high_ress'].to(self.device) try: - features = self.model.cnn(low_ress) - except AttributeError: - features = self.model.extract_features(low_ress) - - outputs = self.model.upsample(features) + features = self.model(low_ress) + except Exception as e: + raise RuntimeError(f"Error during validation forward pass: {str(e)}") # Calculate same loss combination - l1_loss = self.criterion(outputs, high_ress) * 0.5 - mse_loss = self.mse_criterion(outputs, high_ress) * 0.5 + l1_loss = self.criterion(features, high_ress) * 0.5 + mse_loss = self.mse_criterion(features, high_ress) * 0.5 total_loss = l1_loss + mse_loss val_loss += total_loss.item() @@ -380,12 +355,12 @@ class FineTuner: if self.current_val_loss < self.best_val_loss: print(f"Validation loss improved from {self.best_val_loss:.4f} to {self.current_val_loss:.4f}") self.best_val_loss = self.current_val_loss - model_save_path = os.path.join(self.ouptut_dir, "aiuNN-optimized") + model_save_path = os.path.join(self.output_dir, "aiuNN-optimized") self.model.save(model_save_path) print(f"Model saved to: {model_save_path}") # After training, save the final model - final_model_path = os.path.join(self.ouptut_dir, "aiuNN-final") + final_model_path = os.path.join(self.output_dir, "aiuNN-final") self.model.save(final_model_path) print(f"\nFinal model saved to: {final_model_path}") @@ -400,8 +375,8 @@ if __name__ == "__main__": "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], - batch_size=8, # Increased batch size - learning_rate=1e-4 # Reduced initial LR + batch_size=8, # Increased batch size + learning_rate=1e-4 # Reduced initial LR ) - trainer.train(num_epochs=10) # Extended training time \ No newline at end of file + trainer.train(num_epochs=10) # Extended training time \ No newline at end of file From 325faef9d515c0543c0cf2b746d8ce391120ed1f Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 17:02:35 +0100 Subject: [PATCH 021/100] push --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index e9c13d6..36ad40a 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -375,7 +375,7 @@ if __name__ == "__main__": "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], - batch_size=8, # Increased batch size + batch_size=2, # Increased batch size learning_rate=1e-4 # Reduced initial LR ) From 9187ebe01289e6f346a64bb7e8e4e9c5e2b777d4 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 17:14:28 +0100 Subject: [PATCH 022/100] debug print --- src/aiunn/finetune.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 36ad40a..50acea1 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -274,6 +274,8 @@ class FineTuner: # Forward pass (we'll use the model's existing architecture without adding layers) try: features = self.model(low_ress) + print("Features shape:", features.shape) # Check output dimensions + print("High-res shape:", high_ress.shape) # Check target dimensions except Exception as e: raise RuntimeError(f"Error during forward pass: {str(e)}") From be74658ceb3043537f7947e4706c56472b3ff622 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 17:30:41 +0100 Subject: [PATCH 023/100] added decoeder model --- src/aiunn/finetune.py | 76 ++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 50acea1..6121e22 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -99,6 +99,27 @@ class ImageDataset(Dataset): if 'high_res_stream' in locals(): high_res_stream.close() +class SuperResolutionModel(AIIA): + def __init__(self, base_model): + super(SuperResolutionModel, self).__init__() + # Use base model as encoder + self.encoder = base_model + for param in self.encoder.parameters(): + param.requires_grad = False # Freeze encoder layers + + # Add decoder layers to reconstruct image + self.decoder = nn.Sequential( + nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1), + nn.ReLU(), + nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1), + nn.ReLU(), + nn.ConvTranspose2d(128, 3, kernel_size=3, padding=1) + ) + + def forward(self, x): + features = self.encoder(x) + output = self.decoder(features) + return output class FineTuner: def __init__(self, @@ -296,36 +317,39 @@ class FineTuner: print(f"Train Loss: {epoch_loss:.4f}") return epoch_loss - def _validate_epoch(self): - """ - Validate model performance - Returns: - float: Average validation loss for the epoch - """ - self.model.eval() - val_loss = 0.0 + def _train_epoch(self): + """Train model for one epoch""" + self.model.train() + running_loss = 0.0 - with torch.no_grad(): - for batch in tqdm(self.val_loader, desc="Validation"): - low_ress = batch['low_ress'].to(self.device) - high_ress = batch['high_ress'].to(self.device) + for batch in tqdm(self.train_loader, desc="Training"): + low_ress = batch['low_ress'].to(self.device) + high_ress = batch['high_ress'].to(self.device) - try: - features = self.model(low_ress) - except Exception as e: - raise RuntimeError(f"Error during validation forward pass: {str(e)}") + # Forward pass + try: + outputs = self.model(low_ress) # Now outputs are images + print("Output shape:", outputs.shape) + print("High-res shape:", high_ress.shape) + except Exception as e: + raise RuntimeError(f"Error during forward pass: {str(e)}") - # Calculate same loss combination - l1_loss = self.criterion(features, high_ress) * 0.5 - mse_loss = self.mse_criterion(features, high_ress) * 0.5 - total_loss = l1_loss + mse_loss + # Calculate loss + l1_loss = self.criterion(outputs, high_ress) * 0.5 + mse_loss = self.mse_criterion(outputs, high_ress) * 0.5 + total_loss = l1_loss + mse_loss - val_loss += total_loss.item() + # Backward pass and optimize + self.optimizer.zero_grad() + total_loss.backward() + self.optimizer.step() - self.current_val_loss = val_loss / len(self.val_loader) - self.val_losses.append(self.current_val_loss) - print(f"Validation Loss: {self.current_val_loss:.4f}") - return self.current_val_loss + running_loss += total_loss.item() + + epoch_loss = running_loss / len(self.train_loader) + self.train_lossess.append(epoch_loss) + print(f"Train Loss: {epoch_loss:.4f}") + return epoch_loss def train(self, num_epochs: int = 10): """ @@ -369,7 +393,7 @@ class FineTuner: if __name__ == "__main__": # Load your model first - model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") + model = SuperResolutionModel(base_model=AIIABase.load("/root/vision/AIIA/AIIA-base-512")) trainer = FineTuner( model=model, From bb22d0a6da807568e7bd9d3214409455d8b75713 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 31 Jan 2025 17:34:33 +0100 Subject: [PATCH 024/100] added decoder models fits --- src/aiunn/finetune.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6121e22..be54fe2 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -5,7 +5,7 @@ import io from torch import nn from torch.utils.data import Dataset, DataLoader import torchvision.transforms as transforms -from aiia.model import AIIABase, AIIA +from aiia.model import AIIABase, AIIA, AIIAConfig from sklearn.model_selection import train_test_split from typing import Dict, List, Union, Optional import base64 @@ -100,8 +100,8 @@ class ImageDataset(Dataset): high_res_stream.close() class SuperResolutionModel(AIIA): - def __init__(self, base_model): - super(SuperResolutionModel, self).__init__() + def __init__(self, base_model: AIIA, config: AIIAConfig): + super(SuperResolutionModel, self).__init__(config=config) # Use base model as encoder self.encoder = base_model for param in self.encoder.parameters(): @@ -393,7 +393,8 @@ class FineTuner: if __name__ == "__main__": # Load your model first - model = SuperResolutionModel(base_model=AIIABase.load("/root/vision/AIIA/AIIA-base-512")) + config = AIIAConfig.load("/root/vision/AIIA/AIIA-base-512") + model = SuperResolutionModel(base_model=AIIABase.load("/root/vision/AIIA/AIIA-base-512"), config=config) trainer = FineTuner( model=model, From de6a67cb4e0d544c709d17988d8f315fea490641 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 5 Feb 2025 16:16:31 +0100 Subject: [PATCH 025/100] updated finetuning script --- src/aiunn/finetune.py | 536 +++++++++++------------------------------- 1 file changed, 141 insertions(+), 395 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index be54fe2..4fde964 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -1,409 +1,155 @@ import torch import pandas as pd -from PIL import Image, ImageFile -import io -from torch import nn -from torch.utils.data import Dataset, DataLoader -import torchvision.transforms as transforms -from aiia.model import AIIABase, AIIA, AIIAConfig -from sklearn.model_selection import train_test_split -from typing import Dict, List, Union, Optional -import base64 -from tqdm import tqdm +import numpy as np +import cv2 import os +from albumentations import ( + Compose, Resize, Normalize, RandomBrightnessContrast, + HorizontalFlip, VerticalFlip, Rotate, GaussianBlur +) +from albumentations.pytorch import ToTensorV2 +from torch import nn +# Import the model and config from your existing code +from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive - -class ImageDataset(Dataset): - def __init__(self, dataframe, transform=None): - """ - Initialize the dataset with a dataframe containing image data +class aiuNNDataset(torch.utils.data.Dataset): + def __init__(self, parquet_path, config=None): + # Read the Parquet file + self.df = pd.read_parquet(parquet_path) - Args: - dataframe (pd.DataFrame): DataFrame containing 'image_512' and 'image_1024' columns - transform (callable, optional): Optional transform to be applied to both images - """ - self.dataframe = dataframe - self.transform = transform - - def __len__(self): - return len(self.dataframe) - - def __getitem__(self, idx): - """ - Get a pair of low and high resolution images - - Args: - idx (int): Index of the data point - - Returns: - dict: Contains 'low_ress' and 'high_ress' PIL images or transformed tensors - """ - row = self.dataframe.iloc[idx] - - try: - # Handle both bytes and base64 encoded strings - low_res_data = row['image_512'] - high_res_data = row['image_1024'] - - if isinstance(low_res_data, str): - # Decode base64 string to bytes - low_res_data = base64.b64decode(low_res_data) - high_res_data = base64.b64decode(high_res_data) - - # Verify data is valid before creating BytesIO - if not isinstance(low_res_data, bytes) or not isinstance(high_res_data, bytes): - raise ValueError(f"Invalid image data format at index {idx}") - - # Create image streams - low_res_stream = io.BytesIO(low_res_data) - high_res_stream = io.BytesIO(high_res_data) - - # Enable loading of truncated images - ImageFile.LOAD_TRUNCATED_IMAGES = True - - # Load and convert images to RGB - low_res_image = Image.open(low_res_stream).convert('RGB') - high_res_image = Image.open(high_res_stream).convert('RGB') - - # Create fresh copies for verify() since it modifies the image object - low_res_verify = low_res_image.copy() - high_res_verify = high_res_image.copy() - - # Verify images are valid - try: - low_res_verify.verify() - high_res_verify.verify() - except Exception as e: - raise ValueError(f"Image verification failed at index {idx}: {str(e)}") - finally: - low_res_verify.close() - high_res_verify.close() - - # Apply transforms if specified - if self.transform: - low_res_image = self.transform(low_res_image) - high_res_image = self.transform(high_res_image) - - return { - 'low_ress': low_res_image, # Note: Using 'low_ress' to match ModelTrainer - 'high_ress': high_res_image # Note: Using 'high_ress' to match ModelTrainer - } - - except Exception as e: - raise RuntimeError(f"Error loading images at index {idx}: {str(e)}") - - finally: - # Ensure streams are closed - if 'low_res_stream' in locals(): - low_res_stream.close() - if 'high_res_stream' in locals(): - high_res_stream.close() - -class SuperResolutionModel(AIIA): - def __init__(self, base_model: AIIA, config: AIIAConfig): - super(SuperResolutionModel, self).__init__(config=config) - # Use base model as encoder - self.encoder = base_model - for param in self.encoder.parameters(): - param.requires_grad = False # Freeze encoder layers - - # Add decoder layers to reconstruct image - self.decoder = nn.Sequential( - nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1), - nn.ReLU(), - nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1), - nn.ReLU(), - nn.ConvTranspose2d(128, 3, kernel_size=3, padding=1) - ) - - def forward(self, x): - features = self.encoder(x) - output = self.decoder(features) - return output - -class FineTuner: - def __init__(self, - model: AIIA, - dataset_paths: List[str], - batch_size: int = 32, - learning_rate: float = 0.001, - num_workers: int = 4, - train_ratio: float = 0.8, - output_dir: str = "./training_logs"): - """ - Specialized trainer for image super resolution tasks - - Args: - model (nn.Module): Model instance to finetune - dataset_paths (List[str]): Paths to datasets - batch_size (int): Batch size for training - learning_rate (float): Learning rate for optimizer - num_workers (int): Number of workers for data loading - train_ratio (float): Ratio of data to use for training (rest goes to validation) - output_dir (str): Directory to save training logs and model checkpoints - """ - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.batch_size = batch_size - self.num_workers = num_workers - self.dataset_paths = dataset_paths - self.learning_rate = learning_rate - self.train_ratio = train_ratio - self.model = model - self.output_dir = output_dir - - # Create output directory if it doesn't exist - os.makedirs(output_dir, exist_ok=True) - - # Initialize history tracking - self.train_losses = [] - self.val_losses = [] - self.best_val_loss = float('inf') - self.current_val_loss = float('inf') - - # Initialize CSV logging - self.log_file = os.path.join(output_dir, 'training_log.csv') - if not os.path.exists(self.log_file): - with open(self.log_file, 'w') as f: - f.write('epoch,train_loss,val_loss,best_val_loss\n') - - # Initialize datasets and loaders - self._initialize_datasets() - - # Initialize training parameters - self._initialize_training() - - def _freeze_layers(self): - """ - Freeze all layers except those that are part of the decoder or upsampling - We'll assume the last few layers are responsible for upsampling/reconstruction - """ - try: - # Try to identify encoder layers and freeze them - for name, param in self.model.named_parameters(): - if 'encoder' in name: - param.requires_grad = False - - # Unfreeze certain layers (example: last 3 decoder layers) - # Modify this based on your actual model architecture - for name, param in self.model.named_parameters(): - if 'decoder' in name and 'block4' in name or 'block5' in name: - param.requires_grad = True - - except Exception as e: - print(f"Warning: Couldn't freeze layers - {str(e)}") - pass - - def _initialize_datasets(self): - """ - Helper method to initialize datasets - """ - if isinstance(self.dataset_paths, list): - df_train = pd.concat([pd.read_parquet(path) for path in self.dataset_paths], ignore_index=True) - else: - raise ValueError("Invalid dataset_paths format. Must be a list.") - - # Split into train and validation sets - df_train, df_val = train_test_split( - df_train, - test_size=1 - self.train_ratio, - random_state=42 - ) - - # Define preprocessing transforms with augmentation - self.transform = transforms.Compose([ - transforms.ToTensor(), - transforms.RandomResizedCrop(256), - transforms.RandomHorizontalFlip(), - transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + # Data augmentation pipeline + self.augmentation = Compose([ + Resize(height=512, width=512), + RandomBrightnessContrast(), + HorizontalFlip(p=0.5), + VerticalFlip(p=0.5), + Rotate(limit=45), + GaussianBlur(p=0.3), + Normalize(mean=[0.5], std=[0.5]), + ToTensorV2() ]) - - # Create datasets and dataloaders - self.train_dataset = ImageDataset(df_train, transform=self.transform) - self.val_dataset = ImageDataset(df_val, transform=self.transform) - - self.train_loader = DataLoader( - self.train_dataset, - batch_size=self.batch_size, - shuffle=True, - num_workers=self.num_workers - ) - - self.val_loader = DataLoader( - self.val_dataset, - batch_size=self.batch_size, - shuffle=False, - num_workers=self.num_workers - ) if df_val is not None else None - - def _log_metrics(self, epoch: int, train_loss: float, val_loss: float): - """ - Log training metrics to CSV file - Args: - epoch (int): Current epoch number - train_loss (float): Training loss for the epoch - val_loss (float): Validation loss for the epoch - """ - with open(self.log_file, 'a') as f: - f.write(f'{epoch},{train_loss:.6f},{val_loss:.6f},{self.best_val_loss:.6f}\n') + def __len__(self): + return len(self.df) - def _initialize_training(self): - """ - Helper method to initialize training parameters - """ - # Freeze layers except those we want to finetune - self._freeze_layers() - - # Initialize optimizer and scheduler - params_to_optimize = [p for p in self.model.parameters() if p.requires_grad] + def __getitem__(self, idx): + # Get the byte strings + low_res_bytes = self.df.iloc[idx]['low_res'] + high_res_bytes = self.df.iloc[idx]['high_res'] - if not params_to_optimize: - raise ValueError("No parameters found to optimize") - - # Use Adam with weight decay for better regularization - self.optimizer = torch.optim.Adam( - params_to_optimize, - lr=self.learning_rate, - weight_decay=1e-4 # Add L2 regularization - ) - - # Reduce learning rate when validation loss plateaus - self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - self.optimizer, - factor=0.1, # Multiply LR by this factor on plateau - patience=3, # Number of epochs to wait before reducing LR - verbose=True - ) - - # Use a combination of L1 and L2 losses for better performance - self.criterion = nn.L1Loss() - self.mse_criterion = nn.MSELoss() - - def _train_epoch(self): - """ - Train model for one epoch - Returns: - float: Average training loss for the epoch - """ - self.model.train() - running_loss = 0.0 - - for batch in tqdm(self.train_loader, desc="Training"): - low_ress = batch['low_ress'].to(self.device) - high_ress = batch['high_ress'].to(self.device) - - # Forward pass (we'll use the model's existing architecture without adding layers) - try: - features = self.model(low_ress) - print("Features shape:", features.shape) # Check output dimensions - print("High-res shape:", high_ress.shape) # Check target dimensions - except Exception as e: - raise RuntimeError(f"Error during forward pass: {str(e)}") - - # Calculate loss with different scaling for L1 and MSE components - l1_loss = self.criterion(features, high_ress) * 0.5 - mse_loss = self.mse_criterion(features, high_ress) * 0.5 - total_loss = l1_loss + mse_loss - - # Backward pass and optimize - self.optimizer.zero_grad() - total_loss.backward() - self.optimizer.step() - - running_loss += total_loss.item() - - epoch_loss = running_loss / len(self.train_loader) - self.train_losses.append(epoch_loss) - print(f"Train Loss: {epoch_loss:.4f}") - return epoch_loss - - def _train_epoch(self): - """Train model for one epoch""" - self.model.train() - running_loss = 0.0 - - for batch in tqdm(self.train_loader, desc="Training"): - low_ress = batch['low_ress'].to(self.device) - high_ress = batch['high_ress'].to(self.device) - - # Forward pass - try: - outputs = self.model(low_ress) # Now outputs are images - print("Output shape:", outputs.shape) - print("High-res shape:", high_ress.shape) - except Exception as e: - raise RuntimeError(f"Error during forward pass: {str(e)}") - - # Calculate loss - l1_loss = self.criterion(outputs, high_ress) * 0.5 - mse_loss = self.mse_criterion(outputs, high_ress) * 0.5 - total_loss = l1_loss + mse_loss - - # Backward pass and optimize - self.optimizer.zero_grad() - total_loss.backward() - self.optimizer.step() - - running_loss += total_loss.item() - - epoch_loss = running_loss / len(self.train_loader) - self.train_lossess.append(epoch_loss) - print(f"Train Loss: {epoch_loss:.4f}") - return epoch_loss - - def train(self, num_epochs: int = 10): - """ - Train the model for specified number of epochs - Args: - num_epochs (int): Number of epochs to train for - """ - self.model.to(self.device) + # Convert bytes to numpy arrays + low_res = cv2.imdecode(np.frombuffer(low_res_bytes, np.uint8), -1) + high_res = cv2.imdecode(np.frombuffer(high_res_bytes, np.uint8), -1) - print(f"Training metrics will be logged to: {self.log_file}") - - for epoch in range(num_epochs): - print(f"\nEpoch {epoch+1}/{num_epochs}") - - # Train phase - train_loss = self._train_epoch() - - # Validation phase - if self.val_loader is not None: - val_loss = self._validate_epoch() - - # Update learning rate scheduler based on validation loss - self.scheduler.step(val_loss) - - # Log metrics - self._log_metrics(epoch + 1, train_loss, val_loss) - - # Save best model based on validation loss - if self.current_val_loss < self.best_val_loss: - print(f"Validation loss improved from {self.best_val_loss:.4f} to {self.current_val_loss:.4f}") - self.best_val_loss = self.current_val_loss - model_save_path = os.path.join(self.output_dir, "aiuNN-optimized") - self.model.save(model_save_path) - print(f"Model saved to: {model_save_path}") - - # After training, save the final model - final_model_path = os.path.join(self.output_dir, "aiuNN-final") - self.model.save(final_model_path) - print(f"\nFinal model saved to: {final_model_path}") + # Apply augmentation and normalization + augmented = self.augmentation(image=low_res, mask=high_res) + low_res = augmented['image'] + high_res = augmented['mask'] - -if __name__ == "__main__": - # Load your model first - config = AIIAConfig.load("/root/vision/AIIA/AIIA-base-512") - model = SuperResolutionModel(base_model=AIIABase.load("/root/vision/AIIA/AIIA-base-512"), config=config) + return { + 'low_res': low_res, + 'high_res': high_res + } - trainer = FineTuner( - model=model, - dataset_paths=[ - "/root/training_data/vision-dataset/image_upscaler.parquet", - "/root/training_data/vision-dataset/image_vec_upscaler.parquet" - ], - batch_size=2, # Increased batch size - learning_rate=1e-4 # Reduced initial LR + +def finetune_model(model: AIIA, train_parquet_path, val_parquet_path, batch_size=8, epochs = 10): + # Initialize dataset and dataloader + train_dataset = aiuNNDataset(train_parquet_path) + val_dataset = aiuNNDataset(val_parquet_path) + + train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size=batch_size, + shuffle=True, + num_workers=4 ) + + val_loader = torch.utils.data.DataLoader( + val_dataset, + batch_size=batch_size, + shuffle=False, + num_workers=4 + ) + + # Set device + device = 'cuda' if torch.cuda.is_available() else 'cpu' + model.to(device) + + # Define loss function and optimizer + criterion = nn.MSELoss() + optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) + + best_val_loss = float('inf') + + for epoch in range(epochs): + model.train() + + train_loss = 0.0 + + for batch_idx, batch in enumerate(train_loader): + low_res = batch['low_res'].to(device) + high_res = batch['high_res'].to(device) + + # Forward pass + outputs = model(low_res) + + # Calculate loss + loss = criterion(outputs, high_res.permute(0, 3, 1, 2)) # Adjust for channel dimensions + + # Backward pass and optimize + optimizer.zero_grad() + loss.backward() + optimizer.step() + + train_loss += loss.item() + + avg_train_loss = train_loss / len(train_loader) + + print(f"Epoch {epoch+1}, Training Loss: {avg_train_loss:.4f}") + + # Validation + model.eval() + val_loss = 0.0 + + with torch.no_grad(): + for batch in val_loader: + low_res = batch['low_res'].to(device) + high_res = batch['high_res'].to(device) + + outputs = model(low_res) + loss = criterion(outputs, high_res.permute(0, 3, 1, 2)) + + val_loss += loss.item() + + avg_val_loss = val_loss / len(val_loader) + + print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") + + # Save best model + if avg_val_loss < best_val_loss: + best_val_loss = avg_val_loss + model.save("best_model") + + return model - trainer.train(num_epochs=10) # Extended training time \ No newline at end of file +def main(): + # Paths to your data + train_parquet_path = "/root/training_data/vision-dataset/image_upscaler.parquet" + val_parquet_path = "/root/training_data/vision-dataset/image_vec_upscaler.parquet" + + # Load pretrained model + model = AIIA.load("/root/vision/AIIA/AIIA-base-512") + + # Add final upsampling layer if needed (depending on your specific architecture) + if hasattr(model, 'chunked_'): + model.add_module('final_upsample', nn.Upsample(scale_factor=2, mode='bilinear')) + + # Fine-tune + finetune_model( + model, + train_parquet_path, + val_parquet_path + ) + +if __name__ == '__main__': + main() \ No newline at end of file From e298ac4c453ee9798a41d8a357e54348473b36bd Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 5 Feb 2025 21:27:42 +0100 Subject: [PATCH 026/100] manually loading the correct modle again --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 4fde964..7956384 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -138,7 +138,7 @@ def main(): val_parquet_path = "/root/training_data/vision-dataset/image_vec_upscaler.parquet" # Load pretrained model - model = AIIA.load("/root/vision/AIIA/AIIA-base-512") + model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") # Add final upsampling layer if needed (depending on your specific architecture) if hasattr(model, 'chunked_'): From 1a66169b3737d6951a2c2294d6b634eaa3c3b0c1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 5 Feb 2025 21:33:33 +0100 Subject: [PATCH 027/100] updated keys --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 7956384..cb56565 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -34,8 +34,8 @@ class aiuNNDataset(torch.utils.data.Dataset): def __getitem__(self, idx): # Get the byte strings - low_res_bytes = self.df.iloc[idx]['low_res'] - high_res_bytes = self.df.iloc[idx]['high_res'] + low_res_bytes = self.df.iloc[idx]['low_ress'] + high_res_bytes = self.df.iloc[idx]['high_ress'] # Convert bytes to numpy arrays low_res = cv2.imdecode(np.frombuffer(low_res_bytes, np.uint8), -1) From 72a5959bc13a64bd059be6492acec5eae62295e4 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 5 Feb 2025 22:32:39 +0100 Subject: [PATCH 028/100] updated batchsize and fixed key names --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index cb56565..34a08c3 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -34,8 +34,8 @@ class aiuNNDataset(torch.utils.data.Dataset): def __getitem__(self, idx): # Get the byte strings - low_res_bytes = self.df.iloc[idx]['low_ress'] - high_res_bytes = self.df.iloc[idx]['high_ress'] + low_res_bytes = self.df.iloc[idx]['image_512'] + high_res_bytes = self.df.iloc[idx]['image_1024'] # Convert bytes to numpy arrays low_res = cv2.imdecode(np.frombuffer(low_res_bytes, np.uint8), -1) From bac4a9010a6ea7628ad1a9cfe094507efc58c474 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 6 Feb 2025 18:33:43 +0100 Subject: [PATCH 029/100] updated dataset loading --- src/aiunn/finetune.py | 82 +++++++++++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 34a08c3..23434cd 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -1,58 +1,96 @@ import torch import pandas as pd -import numpy as np -import cv2 -import os from albumentations import ( Compose, Resize, Normalize, RandomBrightnessContrast, HorizontalFlip, VerticalFlip, Rotate, GaussianBlur ) from albumentations.pytorch import ToTensorV2 +from PIL import Image, ImageFile +import io +import base64 from torch import nn # Import the model and config from your existing code from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive class aiuNNDataset(torch.utils.data.Dataset): - def __init__(self, parquet_path, config=None): + def __init__(self, parquet_path, config=None): # Read the Parquet file self.df = pd.read_parquet(parquet_path) - + # Data augmentation pipeline self.augmentation = Compose([ - Resize(height=512, width=512), + Resize((512, 512)), RandomBrightnessContrast(), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), - Rotate(limit=45), - GaussianBlur(p=0.3), + Rotate(degrees=45), + GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)), Normalize(mean=[0.5], std=[0.5]), ToTensorV2() ]) - + def __len__(self): return len(self.df) - + + def load_image(self, image_data): + try: + # Handle both bytes and base64 encoded strings + if isinstance(image_data, str): + # Decode base64 string to bytes + image_data = base64.b64decode(image_data) + + # Verify data is valid before creating BytesIO + if not isinstance(image_data, bytes): + raise ValueError("Invalid image data format") + + # Create image stream + image_stream = io.BytesIO(image_data) + + # Enable loading of truncated images + ImageFile.LOAD_TRUNCATED_IMAGES = True + + # Load and convert image to RGB + image = Image.open(image_stream).convert('RGB') + + # Create fresh copy for verify() since it modifies the image object + image_verify = image.copy() + + # Verify image is valid + try: + image_verify.verify() + except Exception as e: + raise ValueError(f"Image verification failed: {str(e)}") + finally: + image_verify.close() + + return image + + except Exception as e: + raise RuntimeError(f"Error loading image: {str(e)}") + + finally: + # Ensure stream is closed + if 'image_stream' in locals(): + image_stream.close() + def __getitem__(self, idx): - # Get the byte strings - low_res_bytes = self.df.iloc[idx]['image_512'] - high_res_bytes = self.df.iloc[idx]['image_1024'] - - # Convert bytes to numpy arrays - low_res = cv2.imdecode(np.frombuffer(low_res_bytes, np.uint8), -1) - high_res = cv2.imdecode(np.frombuffer(high_res_bytes, np.uint8), -1) - + row = self.df.iloc[idx] + + # Load images using the new method + low_res_image = self.load_image(row['image_512']) + high_res_image = self.load_image(row['image_1024']) + # Apply augmentation and normalization - augmented = self.augmentation(image=low_res, mask=high_res) + augmented = self.augmentation(image=low_res_image, mask=high_res_image) low_res = augmented['image'] high_res = augmented['mask'] - + return { 'low_res': low_res, 'high_res': high_res } - -def finetune_model(model: AIIA, train_parquet_path, val_parquet_path, batch_size=8, epochs = 10): +def finetune_model(model: AIIA, train_parquet_path, val_parquet_path, batch_size=2, epochs = 10): # Initialize dataset and dataloader train_dataset = aiuNNDataset(train_parquet_path) val_dataset = aiuNNDataset(val_parquet_path) From 39efdff09ed25eb76c573eda0faebcca15972d81 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Thu, 6 Feb 2025 20:54:05 +0100 Subject: [PATCH 030/100] updated datasets --- src/aiunn/finetune.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 23434cd..6ce67b4 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -13,13 +13,12 @@ from torch import nn from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive class aiuNNDataset(torch.utils.data.Dataset): - def __init__(self, parquet_path, config=None): + def __init__(self, parquet_path): # Read the Parquet file self.df = pd.read_parquet(parquet_path) - # Data augmentation pipeline + # Data augmentation pipeline without Resize as it's redundant self.augmentation = Compose([ - Resize((512, 512)), RandomBrightnessContrast(), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), @@ -81,19 +80,25 @@ class aiuNNDataset(torch.utils.data.Dataset): high_res_image = self.load_image(row['image_1024']) # Apply augmentation and normalization - augmented = self.augmentation(image=low_res_image, mask=high_res_image) - low_res = augmented['image'] - high_res = augmented['mask'] + augmented_low = self.augmentation(image=low_res_image) + low_res = augmented_low['image'] + + augmented_high = self.augmentation(image=high_res_image) + high_res = augmented_high['image'] return { 'low_res': low_res, 'high_res': high_res } +from torch.utils.data.dataset import ConcatDataset -def finetune_model(model: AIIA, train_parquet_path, val_parquet_path, batch_size=2, epochs = 10): - # Initialize dataset and dataloader - train_dataset = aiuNNDataset(train_parquet_path) - val_dataset = aiuNNDataset(val_parquet_path) +def finetune_model(model: AIIA, datasets:list[str], batch_size=2, epochs=10): + # Load all datasets and concatenate them + loaded_datasets = [aiuNNDataset(d) for d in datasets] + combined_dataset = ConcatDataset(loaded_datasets) + + # Split into training and validation sets + train_dataset, val_dataset = combined_dataset.train_val_split() train_loader = torch.utils.data.DataLoader( train_dataset, From a48bad5a4977f3e473f84e8f89604f237937b181 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 7 Feb 2025 15:26:57 +0100 Subject: [PATCH 031/100] limit dataset --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6ce67b4..0226db4 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunke class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): # Read the Parquet file - self.df = pd.read_parquet(parquet_path) + self.df = pd.read_parquet(parquet_path).head(2500) # Data augmentation pipeline without Resize as it's redundant self.augmentation = Compose([ From 20dd9f68ed15af00eaa3ef42fd6c4a776cf30f5a Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 8 Feb 2025 12:52:48 +0100 Subject: [PATCH 032/100] added tqdm to improve code --- src/aiunn/finetune.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 0226db4..e155e5e 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunke class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): # Read the Parquet file - self.df = pd.read_parquet(parquet_path).head(2500) + self.df = pd.read_parquet(parquet_path).head(1250) # Data augmentation pipeline without Resize as it's redundant self.augmentation = Compose([ @@ -124,12 +124,15 @@ def finetune_model(model: AIIA, datasets:list[str], batch_size=2, epochs=10): best_val_loss = float('inf') + from tqdm import tqdm + for epoch in range(epochs): model.train() - + train_loss = 0.0 - - for batch_idx, batch in enumerate(train_loader): + + for batch_idx, batch in enumerate(tqdm(train_loader)): + # Your training code here low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) @@ -155,7 +158,7 @@ def finetune_model(model: AIIA, datasets:list[str], batch_size=2, epochs=10): val_loss = 0.0 with torch.no_grad(): - for batch in val_loader: + for batch in tqdm(val_loader, desc="Validation"): low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) From 8fafbebe45860314fc32bddf30e62ba44be1a783 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 8 Feb 2025 16:55:54 +0100 Subject: [PATCH 033/100] fixed loading --- src/aiunn/finetune.py | 124 +++++++++++++++--------------------------- 1 file changed, 43 insertions(+), 81 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index e155e5e..f40296e 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -8,23 +8,22 @@ from albumentations.pytorch import ToTensorV2 from PIL import Image, ImageFile import io import base64 +import numpy as np from torch import nn -# Import the model and config from your existing code +from torch.utils.data import random_split from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): - # Read the Parquet file - self.df = pd.read_parquet(parquet_path).head(1250) + self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']) - # Data augmentation pipeline without Resize as it's redundant self.augmentation = Compose([ - RandomBrightnessContrast(), + RandomBrightnessContrast(p=0.5), HorizontalFlip(p=0.5), VerticalFlip(p=0.5), - Rotate(degrees=45), - GaussianBlur(kernel_size=3, sigma=(0.1, 2.0)), - Normalize(mean=[0.5], std=[0.5]), + Rotate(limit=45, p=0.5), + GaussianBlur(blur_limit=(3, 7), p=0.5), + Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ToTensorV2() ]) @@ -33,92 +32,66 @@ class aiuNNDataset(torch.utils.data.Dataset): def load_image(self, image_data): try: - # Handle both bytes and base64 encoded strings if isinstance(image_data, str): - # Decode base64 string to bytes image_data = base64.b64decode(image_data) - # Verify data is valid before creating BytesIO if not isinstance(image_data, bytes): raise ValueError("Invalid image data format") - # Create image stream image_stream = io.BytesIO(image_data) - - # Enable loading of truncated images ImageFile.LOAD_TRUNCATED_IMAGES = True - # Load and convert image to RGB image = Image.open(image_stream).convert('RGB') + image_array = np.array(image) - # Create fresh copy for verify() since it modifies the image object - image_verify = image.copy() - - # Verify image is valid - try: - image_verify.verify() - except Exception as e: - raise ValueError(f"Image verification failed: {str(e)}") - finally: - image_verify.close() - - return image - + return image_array except Exception as e: raise RuntimeError(f"Error loading image: {str(e)}") - finally: - # Ensure stream is closed if 'image_stream' in locals(): image_stream.close() def __getitem__(self, idx): row = self.df.iloc[idx] - # Load images using the new method low_res_image = self.load_image(row['image_512']) high_res_image = self.load_image(row['image_1024']) - # Apply augmentation and normalization augmented_low = self.augmentation(image=low_res_image) - low_res = augmented_low['image'] - augmented_high = self.augmentation(image=high_res_image) - high_res = augmented_high['image'] - return { - 'low_res': low_res, - 'high_res': high_res + 'low_res': augmented_low['image'], + 'high_res': augmented_high['image'] } -from torch.utils.data.dataset import ConcatDataset - -def finetune_model(model: AIIA, datasets:list[str], batch_size=2, epochs=10): - # Load all datasets and concatenate them +def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): loaded_datasets = [aiuNNDataset(d) for d in datasets] - combined_dataset = ConcatDataset(loaded_datasets) + combined_dataset = torch.utils.data.ConcatDataset(loaded_datasets) - # Split into training and validation sets - train_dataset, val_dataset = combined_dataset.train_val_split() + train_size = int(0.8 * len(combined_dataset)) + val_size = len(combined_dataset) - train_size + train_dataset, val_dataset = random_split(combined_dataset, [train_size, val_size]) train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True, - num_workers=4 + num_workers=4, + pin_memory=True, + persistent_workers=True ) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False, - num_workers=4 + num_workers=4, + pin_memory=True, + persistent_workers=True ) - # Set device - device = 'cuda' if torch.cuda.is_available() else 'cpu' - model.to(device) + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + model = model.to(device) - # Define loss function and optimizer criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) @@ -128,73 +101,62 @@ def finetune_model(model: AIIA, datasets:list[str], batch_size=2, epochs=10): for epoch in range(epochs): model.train() - train_loss = 0.0 - for batch_idx, batch in enumerate(tqdm(train_loader)): - # Your training code here + for batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/Training"): + if torch.cuda.is_available(): + torch.cuda.empty_cache() low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - # Forward pass - outputs = model(low_res) - - # Calculate loss - loss = criterion(outputs, high_res.permute(0, 3, 1, 2)) # Adjust for channel dimensions - - # Backward pass and optimize optimizer.zero_grad() + outputs = model(low_res) + loss = criterion(outputs, high_res) + loss.backward() optimizer.step() - train_loss += loss.item() - - avg_train_loss = train_loss / len(train_loader) + avg_train_loss = train_loss / len(train_loader) print(f"Epoch {epoch+1}, Training Loss: {avg_train_loss:.4f}") - # Validation model.eval() val_loss = 0.0 with torch.no_grad(): for batch in tqdm(val_loader, desc="Validation"): + if torch.cuda.is_available(): + torch.cuda.empty_cache() + low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) outputs = model(low_res) - loss = criterion(outputs, high_res.permute(0, 3, 1, 2)) - + loss = criterion(outputs, high_res) val_loss += loss.item() avg_val_loss = val_loss / len(val_loader) - print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") - - # Save best model if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss - model.save("best_model") + torch.save(model.state_dict(), "best_model.pth") return model def main(): - # Paths to your data - train_parquet_path = "/root/training_data/vision-dataset/image_upscaler.parquet" - val_parquet_path = "/root/training_data/vision-dataset/image_vec_upscaler.parquet" - - # Load pretrained model + BATCH_SIZE = 1 model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") - # Add final upsampling layer if needed (depending on your specific architecture) if hasattr(model, 'chunked_'): model.add_module('final_upsample', nn.Upsample(scale_factor=2, mode='bilinear')) - # Fine-tune finetune_model( - model, - train_parquet_path, - val_parquet_path + model=model, + datasets=[ + "/root/training_data/vision-dataset/image_upscaler.parquet", + "/root/training_data/vision-dataset/image_vec_upscaler.parquet" + ], + batch_size=BATCH_SIZE ) if __name__ == '__main__': From 19e5b727241024f3c9ee397ab8d52216b83b091b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 12 Feb 2025 06:59:18 +0100 Subject: [PATCH 034/100] simplified code --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index f40296e..284c606 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunke class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): - self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']) + self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(2500) self.augmentation = Compose([ RandomBrightnessContrast(p=0.5), @@ -144,7 +144,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): return model def main(): - BATCH_SIZE = 1 + BATCH_SIZE = 2 model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") if hasattr(model, 'chunked_'): From b4dd550f8dd0e8711e527783c49a27692d93cd9f Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 21:33:47 +0100 Subject: [PATCH 035/100] downsized params --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 284c606..5b2f8fb 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunke class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): - self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(2500) + self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(2000) self.augmentation = Compose([ RandomBrightnessContrast(p=0.5), @@ -139,7 +139,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss - torch.save(model.state_dict(), "best_model.pth") + model.save("best_model") return model From ca44dd8a77cea716fba4c6acb18e50edc6378b3e Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 21:48:52 +0100 Subject: [PATCH 036/100] added torch.amp support --- src/aiunn/finetune.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 5b2f8fb..d994f75 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -63,6 +63,7 @@ class aiuNNDataset(torch.utils.data.Dataset): 'low_res': augmented_low['image'], 'high_res': augmented_high['image'] } + def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): loaded_datasets = [aiuNNDataset(d) for d in datasets] combined_dataset = torch.utils.data.ConcatDataset(loaded_datasets) @@ -95,6 +96,9 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) + # Initialize GradScaler for AMP + scaler = torch.amp.GradScaler() + best_val_loss = float('inf') from tqdm import tqdm @@ -110,11 +114,16 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): high_res = batch['high_res'].to(device) optimizer.zero_grad() - outputs = model(low_res) - loss = criterion(outputs, high_res) - - loss.backward() - optimizer.step() + # Use AMP autocast for lower precision computations + with torch.cuda.amp.autocast(): + outputs = model(low_res) + loss = criterion(outputs, high_res) + + # Scale the loss for backward pass + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + train_loss += loss.item() avg_train_loss = train_loss / len(train_loader) @@ -131,8 +140,9 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - outputs = model(low_res) - loss = criterion(outputs, high_res) + with torch.amp.autocast(): + outputs = model(low_res) + loss = criterion(outputs, high_res) val_loss += loss.item() avg_val_loss = val_loss / len(val_loader) @@ -160,4 +170,4 @@ def main(): ) if __name__ == '__main__': - main() \ No newline at end of file + main() From e7e7e960010a2d432d32a7ed199368b3ddff0414 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 22:03:04 +0100 Subject: [PATCH 037/100] max gpu usage added --- src/aiunn/finetune.py | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index d994f75..84f6b69 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -10,8 +10,10 @@ import io import base64 import numpy as np from torch import nn -from torch.utils.data import random_split +from torch.utils.data import random_split, DataLoader from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive +from torch.amp import autocast, GradScaler +from tqdm import tqdm class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): @@ -72,7 +74,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): val_size = len(combined_dataset) - train_size train_dataset, val_dataset = random_split(combined_dataset, [train_size, val_size]) - train_loader = torch.utils.data.DataLoader( + train_loader = DataLoader( train_dataset, batch_size=batch_size, shuffle=True, @@ -81,7 +83,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): persistent_workers=True ) - val_loader = torch.utils.data.DataLoader( + val_loader = DataLoader( val_dataset, batch_size=batch_size, shuffle=False, @@ -91,39 +93,33 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): ) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + # Limit VRAM usage to 95% of available memory (reducing risk of overflow) + if device.type == 'cuda': + torch.cuda.set_per_process_memory_fraction(0.95, device=device) + model = model.to(device) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) - # Initialize GradScaler for AMP - scaler = torch.amp.GradScaler() - + scaler = GradScaler() best_val_loss = float('inf') - from tqdm import tqdm - for epoch in range(epochs): model.train() train_loss = 0.0 - for batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/Training"): if torch.cuda.is_available(): torch.cuda.empty_cache() low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - optimizer.zero_grad() - # Use AMP autocast for lower precision computations - with torch.cuda.amp.autocast(): + with autocast(): outputs = model(low_res) loss = criterion(outputs, high_res) - - # Scale the loss for backward pass scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() - train_loss += loss.item() avg_train_loss = train_loss / len(train_loader) @@ -131,26 +127,21 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): model.eval() val_loss = 0.0 - with torch.no_grad(): for batch in tqdm(val_loader, desc="Validation"): if torch.cuda.is_available(): torch.cuda.empty_cache() - low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - - with torch.amp.autocast(): + with autocast(): outputs = model(low_res) loss = criterion(outputs, high_res) val_loss += loss.item() - avg_val_loss = val_loss / len(val_loader) print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss model.save("best_model") - return model def main(): From 5a6680178ac23b5ccfb2be1c394cdc78466ae102 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 22:10:07 +0100 Subject: [PATCH 038/100] try checkpoints --- src/aiunn/finetune.py | 67 +++++++++++++++++++++++++++---------------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 84f6b69..817e080 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -12,13 +12,12 @@ import numpy as np from torch import nn from torch.utils.data import random_split, DataLoader from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive -from torch.amp import autocast, GradScaler +from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(2000) - self.augmentation = Compose([ RandomBrightnessContrast(p=0.5), HorizontalFlip(p=0.5), @@ -28,37 +27,31 @@ class aiuNNDataset(torch.utils.data.Dataset): Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ToTensorV2() ]) - + def __len__(self): return len(self.df) - + def load_image(self, image_data): try: if isinstance(image_data, str): image_data = base64.b64decode(image_data) - if not isinstance(image_data, bytes): raise ValueError("Invalid image data format") - image_stream = io.BytesIO(image_data) ImageFile.LOAD_TRUNCATED_IMAGES = True - image = Image.open(image_stream).convert('RGB') image_array = np.array(image) - return image_array except Exception as e: raise RuntimeError(f"Error loading image: {str(e)}") finally: if 'image_stream' in locals(): image_stream.close() - + def __getitem__(self, idx): row = self.df.iloc[idx] - low_res_image = self.load_image(row['image_512']) high_res_image = self.load_image(row['image_1024']) - augmented_low = self.augmentation(image=low_res_image) augmented_high = self.augmentation(image=high_res_image) return { @@ -66,10 +59,10 @@ class aiuNNDataset(torch.utils.data.Dataset): 'high_res': augmented_high['image'] } -def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): +def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, accumulation_steps=8, use_checkpoint=False): + # Load and concatenate datasets. loaded_datasets = [aiuNNDataset(d) for d in datasets] combined_dataset = torch.utils.data.ConcatDataset(loaded_datasets) - train_size = int(0.8 * len(combined_dataset)) val_size = len(combined_dataset) - train_size train_dataset, val_dataset = random_split(combined_dataset, [train_size, val_size]) @@ -93,38 +86,57 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): ) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - # Limit VRAM usage to 95% of available memory (reducing risk of overflow) if device.type == 'cuda': torch.cuda.set_per_process_memory_fraction(0.95, device=device) - model = model.to(device) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) - scaler = GradScaler() best_val_loss = float('inf') + # Import checkpoint if gradient checkpointing is desired + from torch.utils.checkpoint import checkpoint + for epoch in range(epochs): model.train() train_loss = 0.0 - for batch in tqdm(train_loader, desc=f"Epoch {epoch+1}/Training"): + optimizer.zero_grad() + # Gradient accumulation over several steps (effective batch size = accumulation_steps) + for i, batch in enumerate(tqdm(train_loader, desc=f"Epoch {epoch+1}/Training"), start=1): if torch.cuda.is_available(): torch.cuda.empty_cache() low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - optimizer.zero_grad() + with autocast(): - outputs = model(low_res) - loss = criterion(outputs, high_res) + if use_checkpoint: + # Wrap the forward pass with checkpointing to save memory. + outputs = checkpoint(lambda x: model(x), low_res) + else: + outputs = model(low_res) + # Divide loss to average over accumulation steps. + loss = criterion(outputs, high_res) / accumulation_steps + scaler.scale(loss).backward() + train_loss += loss.item() * accumulation_steps # recover actual loss value + + # Update the optimizer every accumulation_steps iterations. + if i % accumulation_steps == 0: + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + + # In case remaining gradients are present from an incomplete accumulation round. + if (i % accumulation_steps) != 0: scaler.step(optimizer) scaler.update() - train_loss += loss.item() + optimizer.zero_grad() avg_train_loss = train_loss / len(train_loader) print(f"Epoch {epoch+1}, Training Loss: {avg_train_loss:.4f}") + # Validation loop (without accumulation, using standard precision) model.eval() val_loss = 0.0 with torch.no_grad(): @@ -139,15 +151,19 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=2, epochs=10): val_loss += loss.item() avg_val_loss = val_loss / len(val_loader) print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") + if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss model.save("best_model") + return model def main(): - BATCH_SIZE = 2 - model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") + BATCH_SIZE = 1 # Use a batch size of 1. + ACCUMULATION_STEPS = 8 # Accumulate gradients over 8 iterations for an effective batch size of 8. + USE_CHECKPOINT = False # Set to True to enable gradient checkpointing instead. + model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") if hasattr(model, 'chunked_'): model.add_module('final_upsample', nn.Upsample(scale_factor=2, mode='bilinear')) @@ -157,7 +173,10 @@ def main(): "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], - batch_size=BATCH_SIZE + batch_size=BATCH_SIZE, + epochs=10, + accumulation_steps=ACCUMULATION_STEPS, + use_checkpoint=USE_CHECKPOINT ) if __name__ == '__main__': From 9645e1da23800a04aabb2f7a1a27e5ea640e16b8 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 22:14:09 +0100 Subject: [PATCH 039/100] fixed checkpoint --- src/aiunn/finetune.py | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 817e080..0141a6d 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -14,6 +14,7 @@ from torch.utils.data import random_split, DataLoader from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm +from torch.utils.checkpoint import checkpoint class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): @@ -86,57 +87,48 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac ) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + # Fix: Pass the current device index (an integer) rather than a torch.device without index. if device.type == 'cuda': - torch.cuda.set_per_process_memory_fraction(0.95, device=device) - model = model.to(device) + current_device = torch.cuda.current_device() + torch.cuda.set_per_process_memory_fraction(0.95, device=current_device) + model = model.to(device) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) scaler = GradScaler() best_val_loss = float('inf') - # Import checkpoint if gradient checkpointing is desired - from torch.utils.checkpoint import checkpoint - for epoch in range(epochs): model.train() train_loss = 0.0 optimizer.zero_grad() - # Gradient accumulation over several steps (effective batch size = accumulation_steps) for i, batch in enumerate(tqdm(train_loader, desc=f"Epoch {epoch+1}/Training"), start=1): if torch.cuda.is_available(): torch.cuda.empty_cache() low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - with autocast(): if use_checkpoint: - # Wrap the forward pass with checkpointing to save memory. + # Use checkpointing to save intermediate activations if needed. outputs = checkpoint(lambda x: model(x), low_res) else: outputs = model(low_res) - # Divide loss to average over accumulation steps. loss = criterion(outputs, high_res) / accumulation_steps - scaler.scale(loss).backward() - train_loss += loss.item() * accumulation_steps # recover actual loss value - - # Update the optimizer every accumulation_steps iterations. + train_loss += loss.item() * accumulation_steps if i % accumulation_steps == 0: scaler.step(optimizer) scaler.update() optimizer.zero_grad() - - # In case remaining gradients are present from an incomplete accumulation round. + # Handle leftover gradients if (i % accumulation_steps) != 0: scaler.step(optimizer) scaler.update() optimizer.zero_grad() - + avg_train_loss = train_loss / len(train_loader) print(f"Epoch {epoch+1}, Training Loss: {avg_train_loss:.4f}") - # Validation loop (without accumulation, using standard precision) model.eval() val_loss = 0.0 with torch.no_grad(): @@ -151,22 +143,20 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac val_loss += loss.item() avg_val_loss = val_loss / len(val_loader) print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") - if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss model.save("best_model") - return model def main(): - BATCH_SIZE = 1 # Use a batch size of 1. - ACCUMULATION_STEPS = 8 # Accumulate gradients over 8 iterations for an effective batch size of 8. - USE_CHECKPOINT = False # Set to True to enable gradient checkpointing instead. + BATCH_SIZE = 2 # Use a batch size of 2. + ACCUMULATION_STEPS = 8 # Accumulate gradients to simulate a larger batch. + USE_CHECKPOINT = True # Set to True to enable gradient checkpointing if needed. model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") if hasattr(model, 'chunked_'): model.add_module('final_upsample', nn.Upsample(scale_factor=2, mode='bilinear')) - + finetune_model( model=model, datasets=[ @@ -178,6 +168,6 @@ def main(): accumulation_steps=ACCUMULATION_STEPS, use_checkpoint=USE_CHECKPOINT ) - + if __name__ == '__main__': main() From ca29ee748b597324f70d0074a90ccd5632874931 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 22:23:38 +0100 Subject: [PATCH 040/100] fixed rsults --- src/aiunn/finetune.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 0141a6d..92fa954 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -12,7 +12,7 @@ import numpy as np from torch import nn from torch.utils.data import random_split, DataLoader from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive -from torch.cuda.amp import autocast, GradScaler +from torch.amp import autocast, GradScaler from tqdm import tqdm from torch.utils.checkpoint import checkpoint @@ -87,7 +87,6 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac ) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - # Fix: Pass the current device index (an integer) rather than a torch.device without index. if device.type == 'cuda': current_device = torch.cuda.current_device() torch.cuda.set_per_process_memory_fraction(0.95, device=current_device) @@ -109,7 +108,6 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac high_res = batch['high_res'].to(device) with autocast(): if use_checkpoint: - # Use checkpointing to save intermediate activations if needed. outputs = checkpoint(lambda x: model(x), low_res) else: outputs = model(low_res) @@ -120,7 +118,6 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac scaler.step(optimizer) scaler.update() optimizer.zero_grad() - # Handle leftover gradients if (i % accumulation_steps) != 0: scaler.step(optimizer) scaler.update() @@ -149,14 +146,24 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac return model def main(): - BATCH_SIZE = 2 # Use a batch size of 2. - ACCUMULATION_STEPS = 8 # Accumulate gradients to simulate a larger batch. - USE_CHECKPOINT = True # Set to True to enable gradient checkpointing if needed. - + BATCH_SIZE = 2 + ACCUMULATION_STEPS = 8 + USE_CHECKPOINT = True + model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") + + # Upsample output if a 'chunked_' attribute exists, ensuring spatial dimensions match the high resolution images. if hasattr(model, 'chunked_'): model.add_module('final_upsample', nn.Upsample(scale_factor=2, mode='bilinear')) + # Append a final convolutional layer using values from model.config. + # This converts the hidden feature maps (512 channels) to the 3 required output channels. + final_conv = nn.Conv2d(model.config.hidden_size, model.config.num_channels, kernel_size=3, padding=1) + model.add_module('final_layer', final_conv) + + print("Modified model architecture:") + print(model.config) + finetune_model( model=model, datasets=[ From 1234fc5beb0ee725a8fc7c993ce1d8b421f7b13a Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 22:27:19 +0100 Subject: [PATCH 041/100] added device type to autocast --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 92fa954..774da0c 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -106,7 +106,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac torch.cuda.empty_cache() low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - with autocast(): + with autocast(device_type="cuda"): if use_checkpoint: outputs = checkpoint(lambda x: model(x), low_res) else: @@ -134,7 +134,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac torch.cuda.empty_cache() low_res = batch['low_res'].to(device) high_res = batch['high_res'].to(device) - with autocast(): + with autocast(device_type="cuda"): outputs = model(low_res) loss = criterion(outputs, high_res) val_loss += loss.item() From 75be3291d3d849ca3eb8be046012e505887247cb Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 14 Feb 2025 22:39:20 +0100 Subject: [PATCH 042/100] added custom upscaler model --- src/aiunn/finetune.py | 56 ++++++++++++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 774da0c..6d24bb3 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -60,7 +60,33 @@ class aiuNNDataset(torch.utils.data.Dataset): 'high_res': augmented_high['image'] } -def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, accumulation_steps=8, use_checkpoint=False): +class Upscaler(nn.Module): + """ + Wraps the base model to perform upsampling and a final convolution. + The base model produces a feature map of size 512x512. + We then upsample by a factor of 2 (to get 1024x1024) + and use a convolution to map the hidden features to 3 output channels. + """ + def __init__(self, base_model: AIIABase): + super(Upscaler, self).__init__() + self.base_model = base_model + self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) + self.final_conv = nn.Conv2d( + base_model.config.hidden_size, + base_model.config.num_channels, + kernel_size=3, + padding=1 + ) + + def forward(self, x): + # Get the feature maps from the base model (expected shape: [B, 512, 512, 512]) + features = self.base_model(x) + # Upsample the features to match high resolution (1024x1024) + upsampled = self.upsample(features) + # Convert from hidden features to output channels + return self.final_conv(upsampled) + +def finetune_model(model: nn.Module, datasets: list[str], batch_size=1, epochs=10, accumulation_steps=8, use_checkpoint=False): # Load and concatenate datasets. loaded_datasets = [aiuNNDataset(d) for d in datasets] combined_dataset = torch.utils.data.ConcatDataset(loaded_datasets) @@ -93,7 +119,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac model = model.to(device) criterion = nn.MSELoss() - optimizer = torch.optim.Adam(model.parameters(), lr=model.config.learning_rate) + optimizer = torch.optim.Adam(model.parameters(), lr=model.base_model.config.learning_rate) scaler = GradScaler() best_val_loss = float('inf') @@ -108,10 +134,11 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac high_res = batch['high_res'].to(device) with autocast(device_type="cuda"): if use_checkpoint: - outputs = checkpoint(lambda x: model(x), low_res) + # Use checkpointing if requested. + features = checkpoint(lambda x: model(x), low_res) else: - outputs = model(low_res) - loss = criterion(outputs, high_res) / accumulation_steps + features = model(low_res) + loss = criterion(features, high_res) / accumulation_steps scaler.scale(loss).backward() train_loss += loss.item() * accumulation_steps if i % accumulation_steps == 0: @@ -142,7 +169,7 @@ def finetune_model(model: AIIA, datasets: list[str], batch_size=1, epochs=10, ac print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") if avg_val_loss < best_val_loss: best_val_loss = avg_val_loss - model.save("best_model") + model.base_model.save("best_model") return model def main(): @@ -150,19 +177,14 @@ def main(): ACCUMULATION_STEPS = 8 USE_CHECKPOINT = True - model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") + # Load the base model using the config values (hidden_size=512, num_channels=3, etc.) + base_model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") - # Upsample output if a 'chunked_' attribute exists, ensuring spatial dimensions match the high resolution images. - if hasattr(model, 'chunked_'): - model.add_module('final_upsample', nn.Upsample(scale_factor=2, mode='bilinear')) + # Wrap the base model in our Upscaler so that the output is upsampled to 1024x1024 + model = Upscaler(base_model) - # Append a final convolutional layer using values from model.config. - # This converts the hidden feature maps (512 channels) to the 3 required output channels. - final_conv = nn.Conv2d(model.config.hidden_size, model.config.num_channels, kernel_size=3, padding=1) - model.add_module('final_layer', final_conv) - - print("Modified model architecture:") - print(model.config) + print("Modified model architecture with upsampling wrapper:") + print(base_model.config) finetune_model( model=model, From e69d0e90ec254785704837bb48bcbbe62d4230b8 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 15 Feb 2025 13:08:09 +0100 Subject: [PATCH 043/100] fixed checkpoints tensors --- src/aiunn/finetune.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6d24bb3..d3279ba 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -135,6 +135,7 @@ def finetune_model(model: nn.Module, datasets: list[str], batch_size=1, epochs=1 with autocast(device_type="cuda"): if use_checkpoint: # Use checkpointing if requested. + low_res = batch['low_res'].to(device).requires_grad_() features = checkpoint(lambda x: model(x), low_res) else: features = model(low_res) From 619e17c32c20c856748195f6567921a60c47a01b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 15 Feb 2025 13:17:17 +0100 Subject: [PATCH 044/100] going with batchsize 1 --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index d3279ba..a0e67b2 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -174,9 +174,9 @@ def finetune_model(model: nn.Module, datasets: list[str], batch_size=1, epochs=1 return model def main(): - BATCH_SIZE = 2 + BATCH_SIZE = 1 ACCUMULATION_STEPS = 8 - USE_CHECKPOINT = True + USE_CHECKPOINT = False # Load the base model using the config values (hidden_size=512, num_channels=3, etc.) base_model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") From 4c840869327dd8dd4f492e336a153cc5e2e38efe Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 15 Feb 2025 13:27:30 +0100 Subject: [PATCH 045/100] updated script --- src/aiunn/finetune.py | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index a0e67b2..e616bd0 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -62,29 +62,27 @@ class aiuNNDataset(torch.utils.data.Dataset): class Upscaler(nn.Module): """ - Wraps the base model to perform upsampling and a final convolution. + Transforms the base model's final feature map using a transposed convolution. The base model produces a feature map of size 512x512. - We then upsample by a factor of 2 (to get 1024x1024) - and use a convolution to map the hidden features to 3 output channels. + This layer upsamples by a factor of 2 (yielding 1024x1024) and maps the hidden features + to the output channels using a single ConvTranspose2d layer. """ def __init__(self, base_model: AIIABase): super(Upscaler, self).__init__() self.base_model = base_model - self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) - self.final_conv = nn.Conv2d( - base_model.config.hidden_size, - base_model.config.num_channels, - kernel_size=3, - padding=1 + # Instead of adding separate upsampling and convolutional layers, we use a ConvTranspose2d layer. + self.last_transform = nn.ConvTranspose2d( + in_channels=base_model.config.hidden_size, + out_channels=base_model.config.num_channels, + kernel_size=base_model.config.kernel_size, + stride=2, + padding=1, + output_padding=1 ) def forward(self, x): - # Get the feature maps from the base model (expected shape: [B, 512, 512, 512]) features = self.base_model(x) - # Upsample the features to match high resolution (1024x1024) - upsampled = self.upsample(features) - # Convert from hidden features to output channels - return self.final_conv(upsampled) + return self.last_transform(features) def finetune_model(model: nn.Module, datasets: list[str], batch_size=1, epochs=10, accumulation_steps=8, use_checkpoint=False): # Load and concatenate datasets. @@ -134,7 +132,6 @@ def finetune_model(model: nn.Module, datasets: list[str], batch_size=1, epochs=1 high_res = batch['high_res'].to(device) with autocast(device_type="cuda"): if use_checkpoint: - # Use checkpointing if requested. low_res = batch['low_res'].to(device).requires_grad_() features = checkpoint(lambda x: model(x), low_res) else: @@ -178,13 +175,13 @@ def main(): ACCUMULATION_STEPS = 8 USE_CHECKPOINT = False - # Load the base model using the config values (hidden_size=512, num_channels=3, etc.) + # Load the base model using the provided configuration (e.g., hidden_size=512, num_channels=3, etc.) base_model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") - # Wrap the base model in our Upscaler so that the output is upsampled to 1024x1024 + # Wrap the base model with our modified Upscaler that transforms its last layer. model = Upscaler(base_model) - print("Modified model architecture with upsampling wrapper:") + print("Modified model architecture with transformed final layer:") print(base_model.config) finetune_model( From 62825e9731e3bace14799271732eced5584a8945 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 15 Feb 2025 13:29:50 +0100 Subject: [PATCH 046/100] increased image size --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index e616bd0..b0dd634 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -18,7 +18,7 @@ from torch.utils.checkpoint import checkpoint class aiuNNDataset(torch.utils.data.Dataset): def __init__(self, parquet_path): - self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(2000) + self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(10000) self.augmentation = Compose([ RandomBrightnessContrast(p=0.5), HorizontalFlip(p=0.5), From 704ad6106d954c75f410312728d37c74db8f46f8 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 17 Feb 2025 16:22:47 +0100 Subject: [PATCH 047/100] updated inference class and test image --- input.jpg | Bin 0 -> 160671 bytes src/aiunn/inference.py | 179 ++++++++++++++++++++++++++++------------- 2 files changed, 121 insertions(+), 58 deletions(-) create mode 100644 input.jpg diff --git a/input.jpg b/input.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0426a63edad307bde0bc3fd73fce8903113156bc GIT binary patch literal 160671 zcmeFZbyQs4vM;)@5FkMs_r_`5gS$0O;~w0CdvMnfT!XuN2tg8D1Hpqk1cC$!!MUAp z?|t^Z@11ksALrgT#v50sv)YfC&JArvM5934j7e2=GGzf%HEx41p0q`iqVTALIhyX8;d==ttTD$bZoR zz!84^)8LZ-n*Z08i-U_-kb_f@n-k0-Ajruf$jJo&NcjkVmj>69kN7wIM`S+IKk*+? zj~f{6Htz1ug6!;$ZfvF&PG**D=1vam-lop%oNOHIfUuaiv#Ghgr90Tn(%RNhg!-iY z6E)b@LWEj}SD8cES<2GJR{piCrN(O&P4n0G<^mSfVxnMSZ$WPdX9r7nQ?R$gD@Qj$ zZxQOhgbTv)A2K^N_%9ZBdl70~Wi_yrldC0|myMT=gBm{D)xt^;CN2B-x$sva)PFb9 z%gc++i<`~K)ta4CKtO<gMCj;6D;g z&73^kMX2GL|1!zh#>w5u&Bn?3KZO5J9{7*pzx?ozN$^$u=j{J**uO9;EB~)q9UT66 z;V)a<++{rBrv0yW!cEi1*^(V*>E`6&YHlgxY3b-rLk<29D+Q&TUOBlcJ6TwYP;>t) z9V#WI=IUf+`wBkb29uKnLuI7+I0g9lSh?6Z|LAuXREBSeg{ix#w5dCs=}$nha&WS8 zb87x0pqSzD#li7+W+w|cXP6G_cC?0l(2@cRD@c>%F5Q!*4+oJ_GjC`TwpGEu)O#O zvzLt}T*N;t=i=1lG%%<834g{&)Mg2mbAWe|zBH9{9Hh{_TPP z|9Rk_iMXXBJp1;7r`eB30Lx$L(4Q<8%+1CD@Jm9Kk?H;<_5k(_dECNVpb>dL2rc9A zN;2Sa6VU>m44SCHRAr$`GVm$|01!1_nLD^4a{z#Yqr2;$>`+%vAB?&WFE1$J#R?km z+|=C7Sy@9?^DmkI>FILruVMt4X8j}UKV|u^i5M2}BpqJOJcYA}nmfC?!||W8M#;ckOL$EFaQN)04abE-~6v13+se01z!Wo4T6*rRPr}if9EdThGb>;JF?E;C=-F zbc4V73_kW}94MLx08L-`R(%10%q#$)hx20@J}$w11TTR9TkVgK|CWG1lOI0=I8PDb zg)|ZZ4uFV*fP{na*ar`51ONdU2|oU34S%(#C{Iw)kdV<45isBkve*D3A_5W;A~F)n z6BJ|w1T+LhBxDre2@WcV3YYUK9<>CTx+y*lmrJl@VnIXqPjmv>$qjCp>nRVNlvxNN z5wT2CAzTjm9}TGgq6Y3#xSS`bPZ7}I!_qha0wOXp(i3F34kYA1CL!VgNXS$uAWn%V z>ZYi;)Gon)$;acG+&F!TPa_E<;C3aXH4Bj<;?+npFPwT@1fC%wz?X)E1Bd~?Pm8Jk zKiI*3KBB$yGBGRA1^cx2P=OJ1x_ER)Z7X*hsT3gH0*7N~hEjPs#G6JNx1UUyxojhn zsP_^00Ndfk3yeu&q7B~}&DB$bnQ(cKq{UkQuBO%eQuMF|y=r4!o&G&^V!1oAkZ?@N zW$>l2KD)^Yiq}|bLa~qMz!Uyzz?Vj1d8{`<9BKIxNMGE1rx0ulo7Pb&E}&DOw!_pc z-OgTUjcL$P$XBqV!%l!w*|Iz-ZfKHW6U}$8%Twp5m!WGOhe0xx77On?TcWGfVMGrwyVKO50i zdr7>qrt(qxvmrIu2@ZUs8jOe)e$w*jT4L%X%~dPxWs)!y0B@0~m6tWKJj<#FOL1{o(a*Y8o9 zdTHZz436#c^QuT37e2I}4@R<_@jAo#`K2nh$z_aS!=RKhP8G`ZbV~5u<*1`;7lSU~yz~pHg3v}3G%BXkTItQHl#I2V{f*~u-@yJ^-$9plH(2J%S zH?0aAh_R^|S;*5s{2RjaI9tW?wi_Hp(2k=V-q57HOs3ZA3#tXZzR-j*RB-Oo)9yK} z$kRxoY56?1Y~nukH3h?xU=eFB!t3C+#@Mc!X?tEVkQt)>PHnA^^gJ%AcmBrmGN}Nj zMErq=feS0?x0cZr9#`IedlwA1LcPk`#u{G)It*HSra|h^!RJKlKcCKYP{w)JMgAO9 z#VG6lu|2h!U-}-J!_Be{QBC!UZ2*(vyyT17>d(mAm{(+a@3gTy!j%#>ra}BN1l!3y zgvTKwJr;wZBc9j6noEE$gk)DS?O2iBKI*QiNnW0epCQ*Td7E+qyQh8(_K7@IL%d?3 z{tMFyb_sr)8tpEoiQFfnBHTO;Ce7J76+INTlJex5=ua=E$5}qy^b>3kXb5>R2z5bJz#0;Y;*DZ~&?Gd5c%R!`zTbE+{S*zL^s=mir9Mja*!0T!A+RUGw8=9eX7R02 z__4&c(!Yht8G{=#Rg;E_K48nC#>>rPKG~{fkNN4x zSAjnxw#@W>dq$-oRg3A>zMWw+BmnQ-7@uqSno#c2e0&9^nd%bp zXFV$(EwOKfKa-2}0;;*9YbW=oVg|MCgD@7V=C9P541eK~%8_WEnX@DjrM|(|1Ihxn{YtKaV0RJlM-MMx}lU2>gOc^Ytuz%Qf=iYBo zAf~p!O|v3azmz5EfXU5u_ii~)#2oODs`ya-gyNXGWIJ8R73sP65B`+7fL~^3!-TF9 z)J*kg?Djqs{HHZb3u}n%*yZW!MvUk$o@dtP8TbcR>)E^Duw~iNvGYnbO6mpIvwyfb z^B(XQ_@U5k%wl}p{7}gyXF#_PWmY^E5keHzbnZHHt`w3JRcSo_>PwO?s$SEb^sWHY zvfxe7HrwToPbKuEG2>-rhy}FeIm*3v53g>M?I$Xebl31WvV9$PIZyPrKQ+5O#~&i; z^LTlro2sU~*P+s1VCiXAG#EOLyYN61k6V+Y6Z^HiQeJ?2R0H3zJ=og8(D7D?pikX^ z-iW0I(5}NJ_nTzv;*-@X`J|`y(dP)|&?)`hWG_UCp-*u0=QXDp?IdUMAJ)gM$!47@Q*e+8Z z;VX+&8~DeEb~{v~8jg?t9aJm2=sN3(77fTQQyq9sJlX|E+x*+P*HoMnL#MZENQMgo z%Z$}XpsZ+6Dx1Qb7E4$?kOYi9Iht5yBl4uufPvaW5_URT?IY(Nv>W2$u89jH^VCy< zmhpcsV=w2f(bfsq1#g{gC03)?DofrrhAF!zK+)(#*p}Wnbyy(bX2i8w@pP9QZoQk2NHoLw z5%{Ir!{tq(E!3xx%#5E*$Cwu}G;TFkp&$%RX4%W2gGlIUyCFx6mw^Y(OjoMcdr@VK zUiHPK6kDYvi8GDdq?oX(uZG&ByiLszyPNpzuRWhkKryEoFBTj8Y&QN)}2%<#`3hgq^Xx&OM zRvUhpQ^P~v<*c-7yfW4E|3jVf-P^IifP_fryXyzzz)^V_X3m7V*g>OC$1yFlkmf1E zG#MG=88hNo?0iIo?Wo*2)>tHfiAy1$d$WJlW1h*t1C$w4)s{9FBRa z0E4Q7xt|*jM7-`lRiAmKau&BTpi46j}>Z$TQZXq@_M zk?$uz`ub#+PS2Zb*RFn}AJw+v_Fl!?c60biTGD2b=p|WAYQmuJ@-;P5ZS=B0>1qO@ z&x%pwRX10sT>OMcD$0Ac(My!LO3+3z%JeO(n?GY~rE<#-n|&6!=(|_Dnmnk}toI(n zwJ7X;FGfF+&byVUzl`$~L19|*%cki;fwg>3#OWYyY@UtSY=FSP)$;%#P8eERmCgmT zrgEd30h%q*)zdj>89~aC;ztC}oLZL6qinP!B_W7!qhZnGQV8X=ob3w(E8x}hm2Y>7 zgQDL@Nxt~*l9XgW=?_oUBy|5JWXUAKkkIdc=j<`Y?|{h}g9s}50_sUKOtsrSFuM6M z;XOkXw-9XQApIK`iZK4xe5{S-wf)Rgl^(RJSI~2qZ2#c1=2;!mBk(FIU@PtXXx)1& z_;5j^f}w!hq$4d~uN9eqU@e_Ll|ENR>3}WO==y$fHIcmatt3Zzd%D;1m*qOC={y>J zBHTHfS8cFi?hdYo7)7>j$%aE6H6cys7kOBou|^OgD`I8od<03WF*G$|rwWoorvvIG zt?(&D+yhwi1FSIJzTRQHbm`s<@^$IDI*pVeyFk_e&qX`G!pdgQFSjNvNq0=G;>2Z% z)C*tsi1x|W=Xoib7~A3p3Z?yh=G-t`mc$C>Uc%{$&Mowen968H&1PmMO>z3}cMXV( z_EkT~nS46ZNH$fG&$U{RD5OwzRC)(X57r+{K7YodSY2$dLh3y&W>iULEd@s8m4C4i zA^Nw>F_&^E1BFVd5dHKM}mqLbmRiCn*cF0r9UptkbM zvnhq?YY9((xrx#)osQQE!BnP^^jG$uL}cD~Sc>L&P#kw>Q_<PkR~98+Xe7#B^DM4z!o5kQE|q`|QvtC;H-jgJ-Xc>e2Ts-;^;Xl;E?j zFy9x|9C(hX-$$3WisF;ql#WE#=QLs8bE+ou`+Tw}Uv!yNquyGC~pg14Vf!giS)o#EA3Qw3wz<*?W>#(pXkmzKj{)yT{$4 zrLJmFzuw)0C2i~XHT^i?MNQ_dJA8lHyV-ClvofgiA}AUe*FX=!sp=zN1#WZBkT-(9 zsH&(B%>X06Jwg9lFQ!~4=b90%)Rg&FzNSFDUl|eNZRlC~lx%9sZMHw=9cT(#1HeCp z(2o^4<3}yv_i*B?VIQ^KlSqDz)sW%uUb~wUa`9{Fh@$N!4%Gfwr(i5ARAwbPuq_F2 zrLD0+$q0gB=s%pYg+|Xtf}%ChvUIJPz~8>vv5=)v3JqL}JvaX7G@f6(Uh{!<&-L4A zTItvzpDxUXlS4NIKje+T%flA1ZDHz&A!sZ_5~JEb$e;2d?eu}8W}{_8Sc>h{;wwga z8D0l*9bXRQ1q6kUJtPNgdOED+zTcd1w?1@=7`~J0%uxtWRm|oIO~DJwv{3_B>n$fa zN*$5L$7p#VQ%GEjrGF1pg@;qaT>hsS|2xdn$?xnl4%21(6YD~DkAV68Bk+5^fy~wR z>Hq$IWwrJB{Q5ykUZyH>)z^v8-Mnu1bhbA2sUxlEJ$-G%I$8&Q*hj9Bww{WQoG1KUmw1svvG0mH3V3a8$g3DKR>XK=hke`=;P+c@Z98ZU;Y^!{@Ud zgf<&Z-nVmy#8ZVcd~~U~h)A!4r1q)hrE4&5N{rMf$wv$@Kl|EjU{Y_64Zu3$9j5^+N6d3Bl#<$Vd zjK_bk@&rW4XR&A(JyU7}nu|Yjv%ssPZfZrSUv&(8-D3*^7&s=^*~v4}%oB9c=B;|S)-|9Pps`6p@hPT{FPMKN*Ci6= zS!!_j8JA&|SK`|xi<;@EuQFMAl)Ng_zf&-Gt6($SCu1EL$_tLK??&%Kc!_^X2(FL8$TXTBZ6Wwa!V*ahN=+Ye;Y6@(|JK$K< zNv8=YP*dg1J50M0hA>^2(ZB z#|i{&MEn77Mh#YJk7w6>tyzQQdbWl3&Fly9b}bU=tEr-mHJyih8IhB_UAi}Xwp#TG z$=8>)Rd{kAa!ht|6E~m`fH@!NdNauZnYVImH%r+uy&G|&7w)^vmk=I#pCD|q-38RU!7?A!C3%SWLw zO>v>OtEZ6(B=^Sh;ag8?7I&kj|u2c`!%D!XHvnU-1=r|qyM^ek)H zEx;bX7;<_dj}^|Zz1FYKYt_T&q5X7Z#;>;7EKW|bZ~@JyW`Zvi?QJ@g(~2i?CF>nR87Ah!3+gO0M+sse)U~HoZbKT#ey#`X{>9#Ln~(;pvpRZVF7u}4<*;&s))$N_XCA>x_vnFLt`($IQ< zw96KZK3}7ZYYL`(VgI^ibqxuJ$#B7#ct~H94OX1sWdG1{zqP6N6eT8JGamJu+u;0X z(ZRga_0vO$WtdV{DtG9F;&+iw)rrqxs@MoQ9LBdj#$@PvKb7oWR~V`PwO^+{wX~r=0>!6Nletjr<5f@L_J#4T@-XL0{Q!V)AMy5AFWeDT=HvLk+{BvKy;M z;))gcE{|05F2_7yi0O#K`EZ|KSvsXMx6lrxNy@=w@zL@4)wrb5n_AT}{XEFwn6~`^4!(OSP0EfCrmyc|3`HST?z^vos_7V0h`38S3^C;&;9v+ zRn^wg*G8S9=H#;5tY`AL2Ki| z`YG)HZ5`)I7t5cD^4Va>s^nXSbDE30PkGm;b!abtzVW`3_H&S!66tkvFmwKOM#wbG zYp?iP&z{H_ck#UMYi3yG-Xk!mEk72n>dQzIFMDKulV5cBJNgH|nKl|~e12H`OBj)C;#R=?w)1WGq>+j!R?5|f%H1~8 zwWn9%D?&fqb{gl;i3ze9$YZ|rSAefA^Jb|P8V11%UC|jgs;JLO5CCl6bE%kH?JX6S zE&W_ZEe|`q7K(kY-7!g81r@VQSIYuD%X)Rc>fLSQyUA#mF9FQaGiJC4)@ZXp5Du>` zXwt7DZ3F=b{As_Y@k%-QfNi^ZcXq6ATB*MC1x`}Jw`k42XAMt|JZ;4FwgdF7POOjF ztBgEZ-?)%lZyi+G?atygz;>0;kr^0K`;0#$z5sWfzN{Q9dpC<)sgoI{4jX1tv^3jp zqY4(f9WcAH`{Ci3?lh+Iy*}mD;CWKOWKv%p>ae;#OtNVzO7e7qYb`sKqNO@7_m`uE=IpI4LdN5akIf4oY_4 z1*fs;^QjVgeq|1+aQ+nygJH$MhG5GCv_Zu0yp3AF$(DOLY8$O2)wHqsbz~~hwRKW*}2NfrmYDdR_WAcGT*Uk{sx8`)|sG1ZyCarCWbE0Yg$z7x$-j(aYp2{S=K zsvgamGA7W}`6Za_C&+uYAecD(TsT*^hAQt9at zGmndW`9|`-$nu#mwtvg^GFMCm)JTS>r3T%(gpLUu8-k{IQVpy1mlS@g{&WSL=ILl6 zF(nvt$L&qq+9n#X{s_4FoXM@_bydd2l@r6biuD*|&rB~myq|5dxSn6$tD5ka&n*iW z`Wx`rY_n=q_z1D-)G&9Gha-DNYttiKA$7m>J|AqG{aA!j*vCNccjF^#QF=Tq&LXGX zYBYNLndxA=1eVAt4(T@2-z#11=u_`h8_c84BRd~@*i_A%c?9-!EaWq(Sl*)JyhLTq z0Q!8oKKAqJs8}jtsI$R8;QF<9czQFA8t>n!rA_hf?X;dNDdmS16uP%;1RR$ZBp z%_QPjQXU4%n{RdqtsQ16t;?g+`=XCPOHh_~W2)!4{ur)yVqq3<#G6doG>BGcFD?4B zsP9sUcp*713ytpCJsK zO{9d*q$)1e?2!|FvY%8vkNk6HUpZPkvx(lXB`)JFI9kXG_sxl5Q1Y6!8`isG30vuv zv9yfj)2ZtixDg!gW_6;y3&O_KNYFLJ05Io7z2Zx@9D6n5XQt=YcV&1gqSCC2kyw|P z^QmY9v>BwT`rchRUo|ZId#|>yai-M8EgA9jNU;!Q(A#KVyzjC?(pSXwWR84|vYY#H zl-6ZLfkqyOXv0vh`>b9;fL@$Ax8&K#(?-2sL^i`W1G&m1Yt4>h&f}oZw#f?NGiEa2 zF_?tfv!oKf_ITC9NuGJ3_=abE_TQkUIUu{UqC6FEBPkk0@f~#RA^E+x^?L(+bYDQA zY<%9sLqQW?*Exae=z^){6l_faQgZh3HF76H?Xm8i)?L4sm-i4ZPgwJB2c4>31zRnf zt;G3`!lW7=KZwf&?S03_okYK`7bY}Q2&Wu$7Adb?65NDa@FJ!|RIj|^I5xB#<(N#@ zK<%!2b?WR>FKGUoCm{`Ea-b^EfY{9v5$>hwM$YGo(|&ucXDBX@B7eMP_(iBSCb~n1 zO<90)3Hh*6Kgw#t&9sQKwZk&%GaZ{kDMYGMjRo+3l8t!m*ToslaAk9l+F_;ADa7k6 z6cJtGhDD?J2<&@V>5ZaCT&ZTqcVEo!E5u<{5V0T2uBI;$JRx~m{*w;1lj~c#HkMsUxQS(s(Zz=B>TT9uTmDy%c;XY%%xjWdSMyT$L0t+2vAS;@}oHv$r){BhIv zB%Am%i~-kb*Fv)p>oE-sK(lbpbJV5EcIG^Ob#d)Fr#<~Gb9|E-26md;0u7)ou2v5Y zJ-`br`FxBz0sev7U3JPv*tdTw%v9*WtCTtPE;=c#m<2-DjjqW3ef{EVl3&^27go*6 zOm5BS#?(wEg(yo5JB+RgYwx6#2aeZ8&&PPK$rn4a-DqyH2oX2Pbr_xGDI;@Q^w}u!K=^Rx>%Eh4y0?$|=GE{l&&79c>+y7YeMcQmTLoZO zQ^h3CiaDE2-;LE)!6%7?$@vR@RFaK_o>`*QAT{>LfgHGk;wgr`MOwJ_AxYJaS<~=EuLwMY5pLu-Y)8j*pzO<5U?E3z0 zvY#tco^p#wgvxr0Yj#AcmL04*Z|3ooYq;j(_`<5GJUpmqOTZ0OXig*{n&U{uoGL7v zF^(kpn!g$ktdN$Wo=h}aP8+0YbVDu6_IY*z^sRHkVZKF}ZS-#}yeV*>6BvK>H8&>{ zO>Q{7)TU7RFdzJ>7A;+67-26w!LR46_}?hD%fe$o(V^LcmE&9|3H60fn4+^p}pVe60gS)ZQBZIZsA z7iDqKf2WGiQ)SOIN2HhK__c<^L@{Ie3_p9uOrGIlPD5%z26Z#sItZLWDDp<*T%`$FW{L^VwJ$Lz|_JcT{>!nY;<_FQAzxQ{@oZVDaNw?OAPsQSVQ z))-8Bgjm&DPtx%4c=ZwBi?b^4(yR+FEhZ&w@}HHHpHw|7_s6#Qz!h%I5`CYW{A zgU|%n$uUCYI(_XI>Q~+jNB6dtE&cM$s|i@lj_@Vu+uL0EWru$S#T8~^GXj6G$CBOl z+hVKUL>F*v)EIsQs9eP_9fXg}v5RO@^Qb(1wJr4+8W<{nU`NK$kYdVGdY=xVqF&S$lPLW?pJFzB5QtsG3xzPK2jr==&hcxj-+hwW_PM72MHHYCp zM_v7#eYbe;_pb4?A`NeNt%+;zP9Q5y#f!DUWu(Eps<-nuXFj)-6gO|U@QN{ym%ll@ zfA`COQ`?*c^JlBET;hE|;!mRcL5c_Qy|-tVkHGsP=|(cvv*h0w_Nd#ZG{TLC5;qTz z0P`T(S;j`L)FW_6d~rHNX(=Se&{ol?fbp9i8006>4p@& z!2d2j@cEfK@4~vp02?)51h;;PJPpB|t!dniM*kWR{B-~$c-iA#3_&HMh0RTocofl~ zr@wm9W`0WG7V{yu0e!!!qTfzKrFxX=nPO6*8%VDwd~Kvpc3KQQFj~22?tb48*KS(Wc|GcAfv=M#N6+Xj6uT zsBg#BO=K%_ULXZOrGs+|I$g9Q8aIL{{U@OpYc18TXZgRw2oXMt7R^hp=@xhKg||05 z=*A6Q?d$jJ`tc|Vp^#6p$L~qTOy+sKe0pA$!~1%_eOTeH$%x?v-%e?savj011Z%Gt zmi~w$81LSmm5@x&CGm2CNxiH3HEp|Zf@S&;DQ-FQVQRMxB+8#>qPQn0g!*AzSE zO#bWYJ|a+Lb8>}rkFasG%8PB9eSE~e_d6C$RcG2q%OFi-smIy>>cRN;Ax1cpcf4~V z73P!T=ct3Zmy4|YZ$C3a^iFkQ%9)J*)phhEzyYAVh^9STGoRQ2c{|}H#+fk|jXcZu zlTcEKJ-kIYy^JfErHt|)oqV$uyp-*8I91rfWKpwY+Cob|T_r_5A5Zl8C>{-~f@Smt zgj!nCRe~7Hf($oK0q;&vDrRP=k1>UQa$7%1dVnq%9huZp1VdJ_zr#SUrzIsNgWrg& zqykrq3l<|!)r@9eyrwp{O6z)O{>!}l!!rsP`ERTKJ3f^Qk5^OmrbWV_e&v@{bh;g^ zVVrF~`a0~Z0_sb{otOPft*>}Dov`L+p+)%$pLnV1q1n8U1{9xfA**17FQt=>KOd$W z?z}Kd%Cn;&ZpBI8)2frAs3`DhU}NLZAEb;|;#U*#tAc4m2f~*i`LwG^5BpOfI(W)l9E;3FUg0y`mI~2Ov0XGJFg={B*n(BcL#4 z*wS6M_*J(+NYDt$ZEMr`R3JIa#)r*mCikKTk|ae4vL1#%x;z7S9(ri3z@inq%SIC# zS;knZPz4K=$HCZp?V|u57eB(=kf)-(H}-A&KOlBVsnZjw0!#|p$*i_&Fimo%XUb9F zsFkEj+6iDJD>^7hRts@GlEgjc+snYboyv@y|Mt_PVrc8Q#WdB& z3r&ysOtX#DDeuAagHojB{p%M_WdFu-QCwAuU_BRR)=tO7K6H5 z&g*UlOE$@T3A=wD*vjZC^MrC4_EGu7E%fMRr9#fi1z!!L(75gjh`nF-^k);H!%dMP ztF$PUxU1Dpfmn2=xLJL;D>ARP39K@*ZipY!iZ!>K;^E!CZlBHI$b~ArmgLiBLKcUv z4VyE#Cg?Pu7Ym6VMkK;^X3dhM`#A*ga-hJmSf0(!ICwxtAt4$aO(CS|*o|OYiti@a zx+{^AA#=#`D6>(AzLmMb+Sc$4O}G^$8pYsbX|VN2ZDet`4Zmlm8{>ANd|Emc*bt$h zrRMy_6vwYZd42SZ@-%0nwmnyw&emx5_cx~2WwS2XU6wVjwF+5sE)2{|Zxm&=aEA^R zhDt29bHCk6=Fz&;HoY^o_oBV{V>jf>L9m?tLh){EXIVBp*7N>0vXwG(FBdg$;tq?X*~@KOgocW(bn46KWovRX zv}P0{4};Qh{0#P8-TI1EZ6^FlvsIhosFFXT#JJO!TgrGz{9KXNu8%+ud8qsjkYM`k zbDJk*kNTNbYjQN3OMR@s?K_6;yh`>WI}8>+O>ON7Wx)u&ab3D-3H5!w=Q`b4-8s-{ zdIeHs2Z#SgTAP4Bq$n__M}toCs6_-5U|9VDooVj&0k`E##eXpxodE=MVIi#5M*Y)}v5MbLbf1pb+jR)m z;3_mSQ}MC5e^L!}Db_C?hT*kEfAp9Sl>2mU)5#aHG;(4X=fUY-JpsBL_8gNRVA3Lx zj<D7gGu&E?ppKO(@_}Y`2P*W5s)4_ZkC(J6jg&SWnC15*{Us;dMs3&!B%G zXMg2dsYPFEES*qY@>w}*ZpG(Qigof8Q<`Bhi&g=K)yPKPvcEqywx5~k&UX{ZieLCT zG}_C06UpP>y0^^^W64u4%)5J^Po$=a8O%k3oy4^=X?TYZF$I=IQe0Bv=?3s+l)}bR z(sg{Q_&S!q_f|M;aja9IFWH9VF2eg>s@l{h%=LVmle#<|4M|o?sqL zX=OQAI1?Mg=OQ+jvsvW4NYcHhfC|S8+t-!5YLtk4Mr#twY+JlpbBgmv>9(9wpBzcB z-PuhED_~-r>h&af?PpFtv|7LdJ^oGAIkBsH;Qz~oMyT@m*y*HYn5=M%Q*UOSV`p@P zPOjIhhf?c-rd4)&aGgW%oywPG-^d4}?DDz#R))n7?V~EQdv#`K&G`EB1u)~0(q&g6 zXVf+Q(Potye)FbkQtZA4y8|f=Ihrn6BD)f3RRwJ^9cZT(YsksMD zPP5umfig>iOzxw7ZAofGYQN6W`wL?Gj&&g>r_l6xIJ;3&)A*C~UkQIt?|tv z8+LH{_=>pNLCO8+BQX4Jq82K|d9_{o9vqJgiEk9SmUcO-HoTWgx|(je3v7O^`E@f) z1ud(EN(X(ETRI&U`sCSI&wRY!<^!!UyMBo>vu2x&9s(Q4-PuQf{mJwQTb9so{N#~c zPArbKbKa~GZ|5_Q`pn^Z3xUc?5_MFSIc>6#uAo@fVub#8 zML=TAP`02&BFD4HK253Dgp^`oy6sIhx`wPi_8YYlNyDRLRY!>MgF=KBE!4_E}t=EP-5agBI{oWIbULMI<6b&n1y z8r7C@%v^keCXbnOo3-^@tYb;YjdkLx+W8~9s7O+5$uhA%%>W-x*{yipxn|YnxrY_d zqtS#g!z12xxzL9aCJO2!&m!=43GQzPx{wD(3F5Ojyz|dwlQG^E0;kz&c$(;6xxcPW zK$PN?Ni8))^5<*~5!nI_}k<}eTpSVe!Y{Psh!+@ib_EC~MZbzJowJu>M z_F`v;^wRNS*?@-q)&$9Wu35bx;RTlri%jrE4u0|nFQ!%}aQyV1!KbMDkA6nzWtD08 zj7cb=8d~-oVyB=v=;bpN7RBi}NA96kSXG*grwoXUEwVT~V%40|b}GW8Ou5 z{c-wYOhS_3r=+<#Suz&ozV2M>dwfguHL$54TRLG2RM^2Rr?aCnmV7zwWJ}WOT>H@&-Y`JTlRwH| z?MmP{8OqYvWJk34`770wUXR8Onopcfc=q?=4)sXp z1l7^J>0Ai3_tWY&l%m1Oj@4$yOoq6Ap`L3?Dy0p_8yzVs$S5PE8{Q$sioBK2?4Gim zvC7ocEMO5n<0>`#tR9FPmYT}avXT|JW%K7nI+S!d7l&VL7mDld_h-jkf zJDMgMx@Ra2Z4xk!06ydW)zo+l|0sBdf}mvZ5kTcjP-aT|p%B7AY*C;=hmVjc5DW#U z>8g1js-SPWyI5h+;H9%d6$T6sCFvo2>Jn5@$eiFbjW=d8Kg3ZCsb4DAhtctz))>96(wp2l-{mCzk z=`eV8(;ibw((%&EOcxyuhN-g>Q{z^S`qh?fw(p%$^ywE#9d+(bT{TpiAPmCg2P;?9 zY@mlK?Q*g0sDj1p!k*Put_*+3$g;Ar9OXxIr4e$)&^jUNT9D}c1Lh_fJD%!>?Jig) zrF67_)q&!{cYwhtQnhfXV#9~HKh|SOV`8?jyY3}V^hnV_H5$VQu6Q5kq6F0gabcIe zU*BSr*-Dj$nawnlo`fI$t`A6b)K6#(r=y#4C;S?@ICns;&^!AP_h?LeaMY@}EY^D} zXIGwLn8eK@p5F&9!2yoln{F;fQ8OKegMkyVdg5g3C1;aMN z>;=VT9DdF)@mYF*mD;Eai^BUb*uaK`4pq0b8BXvvo+Ghp=-$A4M?$7j!)UHqmQn)% z11CUdzpf;gn*_=WtDr6PXW6C;#UF)9)KB-;L_A0`e&Q*c@OBjGT24ILI@-F)o7NLA zl2t=VsrsZbTIC*_SOS|$N|3prY7xEn-?~_q0VnZO-1*!+0+|nTCxh=x0*Fcm>LyI# z-}_8gN!Ycr-YYW-^%pUZ8Mt2jpqS!vU46-o>qT3AVK!t`qqeA~hgEbA0()7#nbh)O zSraE;<#ntq%97A*hKL&mC0n=LpWyS{aT>)8XNpE76&8RZENhiYs%e~XB;^s7v{Cv` zeCJXZr=Hl=vVb{1!0*T75d)S9c-#Khm*9g@Q7B}jImj_P7V(vEMtmUufW6HRj$gzc zct6CELvxsvgB>tPDRTAuKJDx1aQJH2@j^Y9xsGR>e?EM};?^B9I}}d3%%Dy*57EDG z*@9ZdPI^2ad!?u6v(y|NW=wdA1p8rCGOvh||v(m~(o?+Be;iXwl zJdc~0Rup<$PFh=2?Z|Ygh#_iB)c8)0cf@gbG-Aq<3=$Ax+Aoki^e}8lO?!8T-Yw}e$R=Sa)7oAITD~Fyml6`VmSbc|nOe;%pOQ7<`#A}m=Ty||$ z$+iflUxB6ue`yq61>_x*FSK9s9km;$Yt#{Q{#;vZ(7Iq4+hN|p6cm2NyIsW+{iepM zW4bfuuwd^*lk_|?uv!maSm>>pZVQSisa~WtzXwy;EP6d9OUtwCkZ3|Rn1rcTg`YeA-;g2oIp^Rhl>Uy&f6us!N zhaBpc515Yt329N;irA2!@Q&w`#S!=oSK@?Z_r2e--Q&kgDXxT~+HL(B_9$l&O1rQW zjTrAA1omoK{gH{xI;qo$4@I}E1zWrm10}QNv-CL#JbMDE9EwlVhW`g?XZaV^8?}27 zknRwqVTKq=xZ~kQ};EX6SCDOIoD6K_n#vB!7qJKX}eLpZ#*b-1ogc z`&#R|zKe|!n*mcau_%n$=w{mVKa@Ic0`d0;XFju0;=IN3S`%U|dZ<=$)cw2O&b4I+ zHJy%CZY4Pdf!bmV&0?`U2j=~oUx-Ir?x*B@bi<;N)--d?6TzHkEko_ovMa4>j1m{Vo^ zY*EPUpz1?+=|_u|Y>}NLM(gi}tiastHOSQi?ly;A@te3sqv|IPu`0CEO%RT-@STLJJ^N47_$ncYihLFGDy-=c7UTyPllOX z0XyXoY&lB|nf#i^$R=ro7nL4b@6|!yjdbDV?Lpu3lctXrdfk`G0IY}AGxB8A6&TQ?n|I&-G#j^j|qr zfSSs4X>e4|-O|}l;I@wfe8)#JIiqY0h+J{nytRkB54mf!?|V<}l|pkFlJ`&M{QezH z2fAhOQf^yoAIxlbt@qM|Nk+9_nquyB%4-OdE2ozjq-ixqHt-%t@pUo9^x!n3zY!Xv%eldhMw*K50x>%zN#@=o-rn$q;R^ecKpU+ug zpQb#Y!XxBTq~reTM6ScGp;@Br`On#+qQYTP_5W8`iO7GqwNg^$kRED1_4h0t4xM_? zSodxXAafAk*K!or=>8w*^O4@Z!BH>?xPKjl;&(D);h4V)X4nARoPwNV=SkGq#T? zuE(sdvZ-&l#*O)_A|jIjwHKrlIw1Er9B( zq*AOA(?+;%x(VeS1!_dV>~LE+*-ik=<3wANT)jP&tWjJzBK?iZ8Zg!p$pElTUZ5<+ zm_I3*Uenlj5ZkP%e|t6E{oQXy;i+@vi<*ZUqq=snmGj0H}2eP)o0}APe<5e>oVgoo>oUBktSsAFqzG`8tT?s9&a8yhQCiK6po~(FOz*KhDa|D9ReTAd{4dBz-P(qT-NhraHrY5JD=Wo0=Dx$H5jtl3K$bD<}A*Y@|ldzt-s;GQPjPcPlBXekqL#1)pyNaHQO6%_8a~{ z2R9vIXG+I#SUu>?hbsJ z$mh`W__Q^&f1~_1Ycyh7t%|@tQj)j=Xn+XKEzL=j&=U;{M4kl5aIqqF2`jBFYjpgI)Xf}H1XWB>CK&tyCe`A8P%eV9w!V5 z-Cig029e1xA{yW_>@fF7_N1}+i?eQ>artjzH3q;J4y`Hj7t?Y*n)>1)t=)1of$DY; zbxq|Ed%yb_tRs77?f@hk9_61*BDw1r$hqQ!pWW>0adD*&pB~h$PHD7$`s`+!LLw|= z_^UKak%JHA7$rfJYbC@1d$2C=s?-XDQOV_P-<_kc{lrSBv(hOuRoPJe-B>0#R$#{s zo~HC3HLt#PaWlho)z`3=QzYxSDxGsJ4C2iBiSw_S-3Mz0IFYeM0&q80+ItFTpd+-J zaYY{Vu1cr{shPUT0=;CkeAUb!y0f`CCK$yWHB0sPdjRs^LMG}^6321bn}n;sH&Gjh zi>kI$%}yHQO?q(}YjHiLxch-@BEr@0!>ARG@2pMcXhxpOB%>$dy;@0zF79Fq3uKk| zQ7ZS%d$52b)_*sIOK_h$F#C`>`W9U<8HNxY8gbLzI=DI7n``xJ5lO14mDZ@69 znpe4Q*LeZGYT_gD#BT$Q9o-bL-2GMOKe@(YeWia}m} zMy!JOhis&}PlDWRbqcL582e(XZU-k1q+(RFQv%KjeXaycO&-qtFjL!{wxvo$ck_a@A(Iktn75tR=2OxkvUUTQ7mwtheH66kuiNLb& z>HG@=vqxgmK&&cL+SyhKt7;rQm2a*R!KDgYudt*}N45joX@cL-XWu+`9@U=5=<#gx zl%qc?HV!8M;JMxLe@gq;+E>J1P1)ff+=uBT{?P{oB()}hb5-AuWHNj@9Nql5Ntq}Q zdIg17_Us2%mu|AYRpc8BjBBLBv=1>Xtc0t5Vj!G*dL*r|6Zo9HSGZpsKI~m)ZlPy}4n&_xw z!NR38O>!Q;`n^m~tD$)J6cb{8ei>JZ`obz!6*h2ej3nA-@zgmtt*e#2`yi=JUAkdd z>Hz%#HndZ`_T!-OQ0J+cvn&IYq~VwWlR^l8eEwSMn|$l*>t;hn;F0>2On;Ufu-rU* z*Tf&MJFTDeQDrz0v%oX;$F>BhAw^)`Ek)u#lu2bUK&WQr(Y!*}A82nZHj`41AIP~s za(h3lqcZIrkZzQonX~?_0r4`KZ_2xOrnTUn+|VrWZ!_CU#+oPj#|KY@TX8V~$7K7^ zzcBsa@n7%JBpLjcrYfjel1^IBuyUZ{`(S4~PFYa5cM8&M*U1x1i?E7b&_@rrg&dB| z(3d?aZf^F`&^mCcHR`*?k8a&29y5G~qY5=wU&KN0!s^)AtUh_8w$%HFS_ERPI&4O% z8hV`7wC@W201diZ0DqnFF$<H#4I%i@x-M_Zp&`~12A!UJEzO&a%+yqrU|(32s`xU~*|9{!au!h% zYo?0*UsaWe6elFP1HkbcK_&7ig;D``QUS-mS7flkz!4PEiBR`rdT{T@*HtJt#LJ8I zC;vbGLRtEx!Wfmt^V9P`UBZdm!-~1^??OLVX=8ls^_7GhAeY*EInBd*EVTxP>=GkS zPtyuVB(nZ0pJVE=MS`fsEcd+gca#z1mX&-NSOn>M2=Qbu;s~$sL61D%0m>+5U7j8_ zw{~z!I_@7971|YATt*No6g^Bow0=gqF!5qNQ`l?!$C+qRSmpkuZo}&?J@aZ4M(c@2 zzju8&0F7ZvmvS0=a&6gKv?d3oQRRa3iHwL+;a}LY>PPAwD6iz#tG8FAtaayjNNX!= z&Qbtsg?zRd5lC!RPtuRcFEqfq<*$SAX=mTd0J!hX!P39Wz{jMCM1`8BQ5?W;svByD z!^=%DR7Q@xVimsX)>0EhaR`O$oLv(3NJBu1biN14fm5sbe%6ed&1W`Y7JMee`cPlUafh>h;1hNhV= zC(k8XUOvrb>5~!iDv1f)z2ABjV-LKY%;YD$ev2RQO}M0*@^huJ_i8?{cRX!G;r0@* z;&0s{MkCiR{64+XFliaWq*sQ-$pII)^HyB14noo@2lW{?Wes`vhLc&1#}(Xry9%@~ zJgsFkxC>;#6YY(!-mi5fhL@gVwPx<1Mbzf58h%DqbD{AG^YbsXSt*C{=zinC$H=*f z?>ZI`GNBWxr}X){J?^Hpr)4C``vH3#JZ7{;-`=Q-_(oDxBQng`lqIR*Y9)VnX?g|7A!>B=`+M|RuJD^0EhQxA5y7r ztDkRUvA(%axC%;*9#03tvW!KIFkMzmZd$rsoBvrfcJjaKW&N1CH>(xfXUDI}H5I!Q z$&PKD=Uab3rWQjV!_~KFw7~;RJ}XtCjV)E>^dm}s9I1WbZ~+wS<6D^QhmNgf$$%IDDOnD)ipx7^ zQ^lu+(Ft8%Ov!7LmP|Ra<3|BQ{=EE&+5;S@Qw7_-XK!yHunH)Ve$@niR1FG4SM!p5 z2!t}4N>!5!R*tNdX9KWJphCP zZ2lm?%lBa*MC;#9vs?dvDB>AEX^S}u2igR+y>H*NK z?t6e$j16+*?4tKVnbl;Zu>BRbR70pgWyZc5Yt}+?uHiuGd+sT(9eX( z<@Pd|4hnwS%G8+f__^G-+sq*@>Q!MYGpq4G>+NxOYf&I^Vwoe0j6w3fao}h7#ac2a z$(U{1K#vrw%oesT20i=kX`flbEk8Svj2D@oU<$T@N6H?Fx&i22K7S2DGs{q^VxuUiy3Ww4^_BF&W&TMzHE)_e_{ zY$LX}JXX3y)n;GqqQ=DqjNnAkM6OJc!!B+J|DC*Qr^128Syg{-*&;BD7D~YW3@t}| zM`-G5G?17MMno}2-tnKPmIxVl`z5s8tGKm&p?O31NL$!YIbmr5ZK$Z~r668Nk0?s6 zrG7Yb#X2}G>6>t?b^+e+j*SE|-Wi&I3oEAz(zYFODx=F&9%2k2)a22%ftqiy8$iD$o7WR|c7+nBO1$(&pml$x#0? zia(5*NX9Ec5|H3{B4xCQO7N0@5b9}dS(PU%Z)9O-gsA`e^r_paHO~My)i7QI1;YFv z%6w!zSS$2zSYPP&Y7$E7L4nAE;+ir$25pviGXFVsxSbO(cmlwn;ClX)x#pGqFSzGYsE-h2IA zeN((GvaG!Q@yd2t><+xKzuh-)W`788n$rk}xHbm?)e>f_CQDK_7|8e04>B@QInZ0Y z2?}1q9>G!04QB<_9s`6u!tp-e7WsIbsdk6GhD`*A#;)<(HJB5a;3IMp^$O_Vi9Qth zx5(jt<^=`SBzAp2WU#&3TxdhBy&S~faZ%PfNN^~IyXy7TvAb(Ufk$A*O6uq#du?u` zoi^*u-7SfKl9*W*)+kSY53FWHY0Y?9m3s+!!m(+K!Ff@M?*XBR*f0tRl@BXfO>gC) zH3Az)GXzoRjauU&Eqom>))>h_~3c(pO(UOwXevmGq0&NBFX|0Ra#3BlUm7Y(E=ALCxh&ML0|*% zzsJDhp96ww>Ff9R^a&AT6bh)Bt!=eZ&w6wyw-UlFU*w6h1%P%8oM+TQCVqN!enUmG zj7pO`==r82!Q*Eeu2lE=7Zz$?yI{R1iA84kPCeg_7c$Ylm5D^do@8e{?#|NmG__ca)cJ*!df5bDuw? zx1{wr*_xyfCH|`ozYzffmlLMhbOPR#=n;r zTY#|G|1iRgk|*IwRAbK2cq*iE-AnYPAsjQx(HfUx1*#4#-1cm0kdc!h#$*geJBZ}! zgzq-5eNI$>{L~}HM&**DRQSE;!BVIauER_I?v-iZJR-udRr26Biw9(UrP-=TCTDs2 zS83@eO762d2i0HG$w#scn~ey3tfY3suyzYCn1_$Ij>`Fz2mgUB!>c#v6MI_O+JFk? zs9B||#q4cYv&W;w?HK#|MByvsA?7H+z|Kow%n)Ci)cfa*<)-{YK5Km`qicE?!D5Uy zkn~jm0E6iCg(T{IZ+bx2D~+L=|F?&D`ta70ic`V8OcXDo)tMKJ|M-CbL(ih-^zD+K zL8mLWzZ#+uygSOApV{kUOiNZ?`C7mH4^A?bXmYp>=G7as2_p~U7rgB3FVqmTC6Z%1 zZ5C&CQ7g?kx9kSk4HR;Lo)*zT5$Jr;wO?20-bdpSynOq3-6-34czZi0rp`v+dKYLQ zuEffmEJt|zKOnw1Q8~0z1oQ3x3x+RQa_X(D6jkrdi+jHAA-Etp&P3r#yV6{5ZXDn; z%Jf}w?P~@OX6V|Wi?pd&r2QD&lT*(<*8NQih(Q!3gBGnddqqP1nWK>CKNJ#L#Up1^ zi%)A#o!n#Jt>4zH4p>AcEw^Y`=%;MRnbh}KMA-rCGBl~fQhG{B$bYVxCYaloo9?Px z!5}&Q2wVoB^a=8vn`aw94;M3$@-#Kpr}vG+G-!vZqNu_|LIM<%Q*UeD`lE4*)>f=c{i243+| zi;SNnc?;FTg~w$T?L0wPY2e?yM=D9bq1sR13dktmeb32n}2GrpfJ zMy`g@?-tcuo<&s>x8bfh`k`*603w(d1MTNe4jD1($^n!OLW^dBjRg?tr&4X>X9ajsE1SI48_x<@B!L)mS7xz0T1W9-a|CWi;#Di(R9=q>I z6#nJ%vc61AKY3_n_h}j>6ParS*^j}pe{h%DXeJNTN_~2JFtYt;`%I8C-L66}PA3kG zl#NGHH+Q=4`P{z5Pgc~rYi+HP<#=E1riyzw2|xM4m6yr2V#`W%vK!{&dy#4tMx?bp z2xw4qgYNHWNck5Nz|eFu2H*}__nLJ$b(GF`z9z?9GfoYv`HbjlVa_~#yNaf@d7ngz ziw0^equAuFLzC z9;{AjZe05tUF~Qm#`d|`ktxFLq-53rP9y5QiJ^o+Gb7)jvTX@b!60%jtA=Fq?bjy+ zzjh>f9#J2O9Sg$}%RMc4TU(K**A~)|)h#dGeQ~fka(D;W96QyV{egce;RKn(t_2d0 zURPdV$~nz*{PlJL(hf%&%rJ-4BmNajzyhj{ z=uww&KA`7L`fdUzJPlEJaI^a8lyB{S-?1v1UB<`8#NT5AB;ciqzimExu!*@Mni;2huK3b9sP0UT`b?U zJUg2v3et+ek!RwMdtkiTJU>f~!5fj-IL_cb%nq zLn4)w`nggC#(;LGtSJnjZkyV_$)z*>#1g^W#h$p8b%(LwU~URAavJ&C$v6 zJSPvHD(CqMYIA1`KX^lc|ppCmAsX5X4{)U{e50M}%F>5xK@iANgnq~Oq^jA9m?MD(91&-KQaB=@n zYql52gbV{waylQF>r(|KYc9vo6y8FKP{6@?! z;&-X?1*QE@g^q9@1H77pkxpLZXpNDK8QYjvBi8S#6m8yUwm(MDfsWo@k&2Rviqdx3 zI3z<9>IdA{ZL^0o`m1l=YF&5}PnIHbdU`dKwaQ`r^Xo4j`t*nnqi@;aJ>uW}*K()m z!-tGscyu~nWYf6Jhbs9v)feR%Ei)9PHB5dzljE%g;}o_Ct3^MX86v%zJx^a^zS z?blXS>ub+VXa_IGR*a+xOSBFfGf3PJ$!JGlO#eV;&3w6k|Lkeib7)He?Wf3uVt7%; zmIVz}JYr^%{8BqDSp{c@s#`}pV=G1!C2TpTbRt4JBc90*sk7`G;YRysdcwzIrTBuH$GNR z@J+p;{rqT;g?wUe`|)PQ+iATV)yql}9TQ2$(XES0E>A(6sqMp`Rwe4=eCDv%p2mN< z6*VVT1X~^8J`{0rruZK{1o~^zEFYE2`DU8g!vY+@6JW+sK+CikZ*CX|ulEa^a?Vd6`t>6sx)P! z*+77XI|`9LXP0X(5{0L3-5aQ^clPUR<(m9uR#2m|Ow_1*r~4}=rU|X&x7)pDU0t6) zIqd{Kl3b5hu48N_f1<4{?9#-D`df9TEKaTYdCATayq75E#!xN5J1U@Kt@}}%$+sAv ziEW{C*k7;jpCVa0mmvEelBRLkPZ=9^5Ogg>vvTP#0lHNl)li;dKjprl70qG!*B{cP z>~mHX8uJ`Vd+O7%j|dEuEyc#}*Z@68lZq2ZlOrnV9I!tAi{H8(635KYW4D#2jOI3r zC3f&<2isXfW*Z0H^v_4p?;@r|W-wkc`*Lg8KGL1aOa_w;gnsOBkC{~T^47;$JPI-h zRFjoOGHR-8Nq_&EsVA2E#-fcPu8E#1kXdKU`_xkzfAoh77x4{1(r{pm{?HjZVjweBFLNohONp$X~b zD+oejPPMcQ(K>N6s(@5BGiWGeyIoXwlkk()%(3TsE z*Yy?Fo_z)A|5CyzZZib_hf=x;RwU)UmBM^@eqtnB6{a9kb3uFT;gK$X8>)n+wgRmA z2wq(L^Y@zAae1wI*V0?o{z{@PzLoDFd=1eV{U!;6(DY6g;gq+@tQ*qkF5m-s?0FmHVtZ zg7;kxD8!8yaXD&^tNpc9_O>QTRu-o#Lo03Ec!dNdt#0?vQE-dBDq%n3V^%<@M(j>Q z+`bkfzGV28+C+B}B&QmTN@0k~zFJbh~2#6uO>k zA(#G5(b)2jYF|*SdkXb(|3e|@qV5fl{;OO11bU%?X6?R|d;y#PP^{(OYL7U#+x0#~ zgQzH^wQH^LD_L|@Z?AEA0dS8bdkJsD6+-gGU1*{DH_xf;e-L*aopz7AkrcL$6{EVe z5Y7Py1=2wpMSikD>-ls{?Qo6{C$xDXD;Yi$%D*>6E~aRbPQuT8ZXSHE?vK$@InyyU zex#Zk+4?^C-VMdir_&Py?+PccUqANfg_@H05EQe=uhHtST!G!lDnBo;cc%{ooDjuY zjUF&J0q{8Gu{1dqUXxTP;EjkdH$32-W|O(wwrP@NmFSb~v*lu1mKxK`4j0s0RrUh4 zmCCA<5hWinpwK3!U;U3sZ1Y=*Hafx34_eMMCkg#@L3kI;LBM z4f>7}FHc15hQMyUV(I8bx@JiVEL1LXAMezguz0dm-mf7e@w&eB>id^-f}RXvM&ufkE|Y0DleAHr$|WPDVjb1WJr|!Q48A}H}yy^LQd%DaR1N`rD^{6ohy;MLL1-S(!ktO**qWSmW;)m zIR*YZbe?)1I!hSqoFFxO6gBIYWr$+cqP-@idlR(PD(Di;m=Pvea?)1LSdqTi62$7a zVDYijn@-|%vBgod;l#v%ZI8ts-ax()Bny9S$m`I?&=k+Y~-0?0$QsB&ghFe&^ zW|p*y)i4mci`g-g$V<{~Z4BuFKhOzw`-^V-bz)Izvy)7}TSIxzRZpD}q&T;c2ZF|t zAX0cu(~=M7I*$LwoSFr`2r*vTJcdwD)zyq^ELo?LHug!eXWk10mli$0c zta%Pbz?_~PGmuihXj!o5uvQ1`v^u6F0csFIUExvKJ|~#Y1%6o z)igrK%{RgzqAo}{k9#P!I5Ww!K65&qO|1VsQe=6OPxYZbk%}dBiN}Ml)?t_dtzFND z=IBt7&uFjg%av4Q9_rt@u9hZ830%qn*U?wSgatXVmc|fLxA(YSkB$yz+%1vu% z+V>LI*IFB4vK(YQ3yHL5`48ntjk#XZa%Eu#f4U3nH>P_9{^46`d*tZzEwo>iM1-K#128%9 zH%q?{sL_)KlP>?L+Gkl6G1TGhpr&V5ysRG?XR+2ZZ8P{*A?p=|d-;=IOJVdFTO0vu z-#`l;E4C^JkP9Z{w@L*gyf>|BbJWj=(BmrF+4Ey3Ga_ruXQT(gE<{S1UE|VC3M$!o z)vdw>%WM1`T#gFvyd4;7UUEU!CZQ^U%^e9I zb$2lV24&zu)6MY} zSq?SODq+LMvcgvW9CUr9_gvf&_rCU`EHfB+Yv_ejer#r^!aBbCO#jR#?tryn@HwkBjd<(^jRVobme^{`j@4 zgMih4C=s=hG$to;nJ}@$>mQU&Uhk-_)e{~}Sm?f*^c)c}SFR8(R39q0!mQl^QZc$s zyX1m!D{q0BxQwq;|Dn9`KCW54BChVRI2EgnI%M5m;u4sj|1l9cS46!23X74-c=Cm0 z8zr%^s*bNnrPCsmRjC>YhU0iL;M8eX(lavZEk?w!J(H^|7WU*@{9V$9!L`(n^*t%t zCDld@eYP)zi3aUibhVlM4pg!ru>QQVOz!>LrRQ)&_FUqMR@B-jQRhFIq;4Mq9twVL zNC-<|N-2~bv4P6pd*fEHOFKG5SYI1nD!KahXf?tV_Yc-|PV$abkB1fDLSAA?6 z!rqwF%rEg>NN+&6WHWm<;(~5kM3c~1kOEMym7Z3jFS#=S^aYP7?u#0|ek=KPJJQvd z0F}uTU*c{?9a%0)%+C=&{9+eT5;?-1P12fm+w98vl5OY4G^XsZY@r}$yu8ko*#e0# zT)w3X<`G-ayT#tY>>4b|yNXNaRZ=RKhq`HJ)&Cl>336pLQm=@MP8Ayb7uL+MIq|8< z`D)8z#}B{pv!m*NC~NL{PFdwLvgUnOk=?)j_4V~$>u7F8sSliqNCky% zw;^BiKtLV%m8tp=3F~wz*}+ivK(o@?eB}}2He(&6e-u?bXF>Q)nK(W!`y_ixO+YGND9|d{C zTImNf!CozXT=KB-%k(a>TUVR{ob2Ff!*}`qhQoRNmwmeNxC}HlAT}u6`71W zF3^M;|MBaK*cIOujW;9?+8)54Z}%m zE6h2=0NIv{RNWlVIRyQkY<`0da4&W*_(hb>B?_(x(GPP}5Axbo4wO>yyOe8HW<`KY zmHSDeaCKvX{oNhjrQa!+(v02I}JvX z0TjabX!*U=8-n{z_7yf5lNum^sC!^-<&qm})3Jl|4ovYk*yVn*Vr(YRMdqilcJA!`I#Lq*lYjPM5pAMYQq21Qco)1zbg$bD1ONT@%4 z-!(1BK_idhljbLLWA%27!||rWGQqSndu-bsP8!#Nu1To?mSf+WW7ByzKXz|O7MJmt z&kBxCWOkR+`WA-Y52I)2d5UhuUEeob)%n!4ScAK)VQ3YvnLESAB-{d$qyl`Sx0>mL1cPPF3)+(H!-@gO2{0H4}(RZQ5*(auj@5`{jZb9ueS`S3Av!CF`-j4A7gM&q%qQ8|GGdK^y?0Yu+w%Hckv4XKG8BDV^q@=aupniyA zcEhZWN)2MZH5p!~)mfu*Eghl6;wi+n}*k{GP>Lp7~#p(Yl8G(!movjS%f zA-YA{@=ksM6jtD56XlnjP8K{g5aLTArr(sfyE}_HgGC4jbZB!;03+10UQcgg|yEh)cOi6EF^&iwMCC1%W_ach^ZD0u7H|dqu0U>whK{;&HRt88%5J zmJV2Sbsu5K7*t~e2kORf48CmV{ak>xMdvvf09>A=>E_VpOaoa-#I7&p=rPKG=3X<7 zE>YVph$zKX*sxT)Rl#D12spPvHfXi2A`zLbQtskbRcHRsU0*4_y>VX9>r+qeuNp+e zu2hVV)1eUvE;Oz2t3&{?5h*(`%zf3+-g^yzYxJG+@p6lz{keGvHo@9W(0V59 z0P+2#w6kcu^Luw(ed*fPrcoGaPwvC;geRQ3m!j}dJjQomk#(- zdi+yfx!pIUk9SCyCs{Y^neW<5 zT^SlqQ0ZK_sAHvBvi;h0q>^zeOP|vB!q_cTE09_E6p27GS&e84v(!lwZirtss{Rk7 z^Ihymt?c89Ol~uPz9>AMxP11qBMF=+)O#A+`4n(*=eqZ7q855ET-Z|Ee8^%zkx#a; z=o~*eHeBUsH^!Y#vH0trPUgfTP&m<10e1F=Rf!`p``EbHj8mt8e*jSyKoq)mSHv4H7(d->t)%A3_h+f?iB6N!%wzz$Uz~^Q|D2J$9&o= zqpILC^wi*xPm5_<5M*^31Cm`g>@`y5B4kN&DyL?#Oo19YQGxYK1usj$pgy48f6t~KJ>jAHi3f7t7FqLo5rwJ z(Qg(`j+42G)#!;S1HQ++osUn%!zR2-({y@lm~&hwlGnQINc3v!Lpr8P(di$%f1A{` zbv0w{amRp&H~NgsY;QpO$bmyDR$C9;os9rN$C98kWR&T@{)8RliNB>@9NI zGoUrGV-R|fj0t5Frq2BaOF!Ejsjeu=SS%e=E-mim?;U|cqKj55g=#I4W)oVmqPz-1 zu8-_XoBX6@ZA-(}lb_;^xZWx`5YFXUNxW>STJkxrP8yLjWe$vEJ-OGRYlob5K;xK_ z+Sf)xcnzaB~R3mWaRG6A#hsc3|$*xS6(I(@AK=v{jp3ng~tC-KH^2r_3znSh3sh2_Wrw zpQ*$#!7d$M@y;V%AlG}fotXbn@cM`1w6jR(-2G3!OBSca*>8(M=VDR#29}~a#CfG! zoOV>U{pwr!p5#ZIajTYSo2{GO$sHtHfTRO7P$dlVfzB;37z@{%4zqnx%$3zfq?wK= zUMjs*yC?!^i*6FObbOa z<5bwH{Me&AZ~~RyF=wh=FYl;rKQjn5+t;&5{V)W=uMM!+M*c(L!H)sUY%ECf)eERh zl~U4RPAJs}xEYOAd~=)`jwxkes0*a*ip=_Z`FXH_QLCO?wuHoyK>kyy?z4pz zE#<%FjXIk?bu;a>XnVC)U^>0ibHh<=IqC55z0{M-k!Iw;A#o% zF0wHnRS_sc?rntO(y@=Xx@T&w-nhA5X2pWP8}LQaPZUx)9jEA1NjXMH6TzUohfBdt z`q@k;y;l1)!AssTX0;fzeUR235Rxuru*w1GmIq{*BS}!ihoN3p!_5-9|Kbe#a-78Y zWBW(5GSMzK*)de^5+WR(f+K=^KD|9+TO+)I+R+b<2%r zA?JZLs$3yX+3Vk`=WpltTX}O^TbLMXYB~N7V`tqI#}lsYB|wm1!QE}q0KqN6*+m!E z;1Dz{9tiHv;z5Hi?yx{`*WeDpgS*S`<^2NZoSLfnduFPud%BT6({TPll{|bv#b$R7Fktiv8Uo(F(eb%#J zgOX9m5R=M@q6UH4ms5r696qM-VO5V=s$BQMUFpuhY=IS90}J28aH)l6(zUc2fN%0F z?#JM14HX<>Lso|so3W|y38)CHB>dl0)UPZe{62tPP+lpGS_EO1ynb)wgxEVbTG7S4 z7?J+s3D4qiN;#|SDx9-vcC7}~CjPRr6PzwLWYHQwNnYg$Ot2C~DoZ8D$scBQLI|`Q zSno8Zh^*2D4No`4wHmw^Q+KCuPAjfOaXgPwe?tx|kmIS|8?U;P85ik1ALXM`*0P-q z8%-&-AJH%T#R`PTNt+?-aY%dNC{jPE5Dcm)!JaobeD##CPPg+K#JOm$l$6EQ$ufZcNUi13v-uw!kFaA1IsQd| z;S#gs?Yg55!|a)t>HGQ_RTP^h=lAAfWYuG#VQEA(0Ve~!LB6P?@U`b}=O1R{ z8g(BpwerPy`y=h#hf88hk;ND%uIdt2p8LWW-9xDPqqJ|-L+3`%Dr_*-Az_sR>|WO% z5m$Gk2fR$%u8beB{w|=A|3Iv9951&yIj%$+*W#DXN0-;L057iWzGF!iEC&v>nHedV?BJ$Yxz(g)!lX0MY}_&-Yb5HvhH-t?cN+ z$^x3c>(#g}h~Y_Edwe_IYLOCMuB&$*+P<7%S=NFHXfOJIp%$p;Xf3x z%*du)6!5va=bI@rRDSv5NFvU;KV-2W|mKlu0`fT84^k~#a^ zkceReZDO^cgL0gzq)j8?41~2(bJ0H`S718+;ef&;g6xk{n}NAYkHb5$vO6CUWN z@_}jG!W?1pw9(wNw?^s@iDWpM>xgaUCg?6StxsfwzvurF0jnR?EDbEb*Wyos{d3&` zzJ}Z1W%U2scoCMz)~PHFHOXQ##bnhR9U`&VuRi#(Bo^eO{6w(VA}+DHDr(@o<>B64 z$QgCM?oG76W70QaLjJao%u4S_>1>3i*R` z@qQm{_oTY_J-_kW;@E%_clyXr8S(Po>(<$9mu@U!wDyPmB@DPrc7lvHE&C0)Jpz zsg?f$^!vtA&=o+^2O77x^{z|}|L)B^Raxo-y5pKqXe6u1(-=oNGkF|Bi?^nyn3)*i zte+W68jto=EMKmtR>I)R^h!?@nvQJma%wji`XaWx-#U8BaFjk3^u`klAf;s!dSCKu zCi0>(g#%!eqWaHrE#7iq$so^2ZBl2=$+0JPp)|ZAD-Rac)TAHRq8%TMm31FI^IchXGP5j7%x;P;L}70bqKP18PcdU0(!*=g371{zS^)@ zfvhy?2uLYJIyDyli3i(8_zJdkpd`wRZqzm;7h+I7BzAnYGJjV$S7&6bFjESZNL)Y$ zUY6bWF#ST;1G7z-=mLbJmi?04L%1y{xtUz2ry}+xoqt(x#r1HVhgt}@-}D%EI*JL) zc0{$rP?e6ac0n{WHT~tY<$mY(!fSrYXqjHN-gBq6$zEAIe9^ghC8I~2;`2s{ z4>o51mhY>N6yP=0S54D}d2Mb@#>!fS*Lm{*0zr4|J>GG&m;GfryoLoI4@NWXP^@UT-B_9DOem{C>#DaD;-Z`s;Jv*>5iP=Sp7|CH6OgqW<;PTpw#Bn7;n;tE-8Pi?rP->A!P~~Nn6o6Q_Zf_mvnb-D zzwU-M*@x24XPG-Ct7?Q#7V`w2l4>)}XTAz$FnH~l7|FCy2ve~>yU#0=4VgI*?B5mR z_A;k`TnJ8&f=I!#&$-G+7ab{6M~7OyWK!=a;g9lV=Tgh^6xsce1mcGXbPJb&U`0S2 zgW3Zey|$M0ueylR??lP>x8Fw02S)d23O^p;4*{C#rsB~#>(*{0-`R03qHKRFzn`l4 zIz#Zs2qTpTc$&%1t6)FyOOgC5#biF}kX0mkZJA+0ULc;48NOU`gBkvtn(j8wz3zbH zftauMX42|f6Q!VC4^}C;xUZ2@9(51Du**5;f#Pof(S6o&E4d&l0vkVli)VhNt&7CTr@w>;1|(Gad-Ml_pRxR zH>>G28QzZ5&G(eR24nt)ln)BmJE1c}%XnTg#s&7{195IAD3&v;wVS*aBMIS^7DI_d zDaPOO@HIQ%nstqDJCubFeA_UJm`E#qtvCai_=tD(DZKs})$>fQe8KQOrJ5c!eq0vH za2K^(ZD>Bz=8(FaE=J98?(-%kGXMIx5zF$Sxbk#FSy}e z`M>3p^5VhTS{CW)5m#YZlY>^FO8r~!M*Gp^L!B7Uw_nRg53zUxSNK{{ zQA!wHc8B=hrK|Dg=|M76NjjMKhsAr&#K6SqceX$cTV_$e2sZsIghim+WD`Xn6e|fP zRrfJhyUB12(q>VojEr0Q$;{-K;!V4`qrz50z+NWw2L{=WiX3E{6>n4`U)^bQSzTHb zyTq_1X;kV@(Rm=cHHsm!)l5CesA}H+=;QvDF{}Ml8L)|FRGL@UokyC)#1h7gCq=6h zu0ZxI=X-0q;U*{EN$O@l9GS?bf%ggn!^(_7x3=~eOxMWa%Lszl7nLR_ljbm- zZ^?{LuQx>_9qs*02tErc6S@@UVtKB@IsZ+Ydyc7(ThA1HoC1PyT?oGZg`pn`sU8AB z6#oNk@2pn#g)@s2&D5Z_{LwFCQ#I~YAH$lgc7+LRiip(0&@D~m$q(^90|Nj0e(LFjzXN>SNh!Th(F7F&Q)zgby%%RSJKG58;<_Qz}FZ0as%3M4kWIN=yo| zzwfkTP6w(fE%IfVfHSzvG?Vc{nOL}F-wLRUtbK^(?wO9*aM>n{OnIaD?b8?nvoS|| zpZcp{?AG$!Lqg8d-A{x_^OG-!CBv`e2XJQ~MtnnrS7HX6y=n zQk}jjwmQ~s&ytMV+3+t(=#EWFb?-#W)^F?b05f7s3D61*1qQ)ai*_=<+)(Rx-ilEJO7ev~^ep0?Q^A&Tq~u zF0!e+6JLq3^|i?fNWzgw=Z_Y`kG=PKz-6_VpA<)>01qbS@8V0%XUP4Ft>8?3cjm4vmlh9f)y@IhtAzj$?w8G$i zO$4N5;(Jo1Ie$;{LV#6Az5K|Ha+JF@a-6=NC3;A+s(RE-;C3AaQ znOWyQK>r(XkbB+o^)vl|Kz{ozkMPOKwVagrr|nQx3!Bqu=`a@sfSG1atA6)49+qobOTjW2 zv#tF^pM0HgPS9^(wr*d|y?%7hxcl#=KwU^Z0c8yjPDSkrOk`3=2~EuN*|*R6SJba8 z+qk`l$HzzCFF9EnbH7~BPgWTh>Y&S;G&|+Ygm94L(MO8k6J_!rp=?lt0qXUtoq+{E zo`KuTQsH_&IW=ts&dOA=<2r1Fm;#g2PZLaPJKGEEYWz?uGkJm|}Zo|3NxL>ZE3~N_?c&0e={m{}`n?K{+kS132C&x)z*hiB} zWZ?X#zlKEepI<+)B1b_y2+lJ+f^sV4{DA_$n`M4a#myE+2K0GqF=h_@F4f7u1&;J_tk&B5}Yu#aGSi z2yy@N3iHO#Sl(z?UW6^AA19zT{?1ux&LFN%Wh_4Y4x7y&iA|lV+~hL757o{%cu&H* zVr$HOeEW98m^g1UJujswrPkzAGQIoPl5bH!Buk6vjZQS+9D%o7myRm)4<^I(+TFyh z3D-Iy+M2cAPzG-UoBsev>p?e?`L8wp?qg?*zRF_%YAuc} z|3@A@VQE^Sviz2qRr!+tCz|nz+i~1OVNq$PO~E(&9p=2ojJ6WpcLaY)_>sR+P$XQt~0(D$PH`la{RB--0PoNoL$Jm;7p8)||;PhY^`zJ&8O>T?NKZ zMI7XpMql+&H{Fj<(Bj|e>K~F>la<&oSqzd%#3|yZCh!<4*KsIvetFdJ^Lh#0VjK3N z%r!|h#9n3ddT&}>{i*UOX5fZ?!^34ftQY-w9EQAge@%Fi#8~>7j@v{xz}oqjq!Z~2 zgRCG%2|g0L>eI-6awX@fO&qj`6KbA~VM2Q$?wTS%d%vUP?}7Xu;NI-9xqFaI{bOCtmZAUCVU zIWFyPCty|DlXD|jL2}V{T6dc>+^?!pVT9fH6jrRWU;C8^_)ey+PP*9*C5x(OzwYhZ z*X5hVGMUKGpl7<;UO#iBP$_|v4Sm=Es6d;gl9Rp~B!_Yk*N3Yil20(x>exsHcMlWknM*rj``K$wv(Ptf#0}V})?ww^`kFDmE~1 zv1B)1O;dVnC3Joq7aOaJ9W^-j{X&cYm|iuPJ<1V-Vy2p$!ny*}6`YdC+{gs?U1=Vs z%SLVTvL*AiQH{=mQ4r7}6XjcqrH`vPEa4IBQK=dq*z`xN^xpPAhW&eVD3`{%-58rB zYdMX-i(DKFToRYAh=5VL^D0^3*3^gwWCQ6Y`Xb68VCExL$|BZ5SSo%qto4VO6Bf%u z>@j9)=>DA!NwS8HQu@WbKn;^z4df3DtF|^bnduV(6AkP{L9Sz$Hii;=>@v+mubaBn zd=_Kq_;#y`{eJ&RMFL%<%>QudWJLh%dOdfmzW*}4k`bAW-?|QO&P~x7CWHTV)_ouy z6|{_8uZzCQHG|B<8QgzJeriQJo(OeTAxP)OSLVVH2oK^_THHZWgxD8&Tx#)v&e(-8 zMM9Cbpza<^R=CmAqs19UoNmx@%MkXyEt9Sw1K@HzMSq_!>VE+A@p#h*I@c$C*ENnR zZI{QG3(~9Nob=dtwvj@J60Pg%Mauzxxc$@ZXarU*Ba}XNfcoPXhxJ(%25>BMj(_P+ zWl9qpIcJg6`7J*TdoMB zLX{2`?yF^sKLeAw%Z^6yxB;W^Xa_OF=IPH`qZaRJW92+Nx3Nx?qQNQV!)Z=7q%nR3 z;7uj%qOWPzMLwO(z)a4WH=myhe*4$O}~KLpyI543e3=&ukdLwPiuqLXa=0x9(@x6!VwHL0dL{P%DwxYc9YI) zu=~TD?ylk3cmBxUDQUv65G--kXZ+HEwi{klOwF5{3%4?!%85H?5TAxZuo5IP9Pkbv z;eOv2)>d{JRUu1rfU7a@sC^xEh&;!6-q?2@-5{q>1%g@7)}g4RSiZ7-yFRV>69jV* zg+C-GoqeNpseQsJ9|DpQqN@R!d)+?jm5L(9j7se~j;3$m?JK>|5Jjq(@ zyWWaQbNVqP4vq2ZL`K7nO5di8LN5uVF%{xvDKkm86Q?)O*}E=F?}BPwo~M+CEnkz4 zT=h8w{`2%tZS8v0M130S(qdY_gp)8ZXfFWRV5hy~S9r0+fLfC^sJ3<99PU`xW1IN*8H*}Y0P{jTRA1j!8BIIJHL`XOZ){7_GjW}@I&s|<86PI3pjIvm1 zPp+w|XAy8D=hclBtL6w=O*9xd$3Bj(FHI~z36|P+f!-e znXi4M-xw9d?k#{!NA%a~DYc&qCt-$b#XYP`378_hmp73&^PSxwlJ|T?*Oz<_|19wx z!}@2kos(G)|Mk!m6D&rS+Z{$kC52jh>Vbo|!G)bAHZ;6cwv2nTzg=C@_>Wf2mb=8$ zOxnmJ7DCy1v$ZrYybC7Mf-MxG=xk~zs|~`^zqj}u{sZ)qH8!%P%o`_Lma9he>A+*_ z5DpDs$Lf6cr)mDBtl5>2BfOC0=6K?p&B(goZ{If&;i!CX3}JASFHnk!DozHRO|G8m zHTv?B-mmB1Wk!CMcj0#orSdTE>g)KLjzskj`o8~un0ObJkKl8vF|gmfdHSPK0VOM_ z(ck@?Y&@&G?d|UU6Fs{vy5!Swx(`fMY=4QFhCQs5X*f}gRaMR`z>N_AbzZlS+A zHLaI0Nq;j5dTC#CDo#P3%%5wh=pyVg_CzfW*1%e<`YxlUp{lr5yf9oD(fEz@Jp<^& zosauRP~XbXh2}=Z>dN-u>ROKY6#ntac8$mbsw%5u0y6$i~E|ESp zd;)}T3+uQr8@BZg3FF~!XTS_JT-ppxf{x)e668|<+a-xx^CGVxFV&s+Dy^Zpq}d;w ziO4zLxe6yKUSEsDliqsgVgcFgr=BLIz)rF7fxE7eTc` z)R3CjJnMe|jTQBXzhdvSo+BHLm|XY6QxE~B?8zJz!YT2TW&D2t#B9*f^Rh?jKGe4a zf-NQZR`I#mF*fA4L!q)LSW z>`>L&0g?NN(9N_I7SXoebIizl#E{Mp9?)pseFl#S<+4>QQ(x&}sg9Na+!o068908! zwjj|_RP+AUeY}qxB(~$hK{6P;3Y~UsW4wx1j)YUBW6|-P|#O|IMvWLW$ar;6A14NFy8u6@NB~j zMhGVS{qF8)oPIdAfOdw9@c(d(-AiWxwxgDfoj;{Vf%JxuF2YQw=bcy&$;5udi9y>u z&mW!<&-^-Et_`^e;Zr}C!{bM(LXS$#z^v>gLBaI`ceCixPn5U#LTB#)KGec{D^OgP zZ1MI12+NpK{hmR5JXkzRFy4>d$)?C~5i*3G_73mwy92%cl@8C1(Lba^rf)|f7+a@m z6{Attc*FZAVetyj=n?}U;fQiK6d;FB{h>9v*JXfCFqb|kD2hh1-3nNvfI<1;yq-N4afIo?EwMDK<;a5y}oK1vCiW7CEc4MQf*C<^SedfRxg-GSc@0L08{wm{P?GA z@ZAKBh9`1sebC?dwu9R2OqI{9T%W8c`&Ukjjc|p|Xe+*cO2eaE$>P6#kWAddM$9UA z{2XG&x$3BHA{Y?^2Mi^JK4nL-)qO(pv=6aDe#!nSmaunV>pd@p8rn(0N3_om*Izy!wgdS?KSOT4~Y^6#Whapn3L)F;% z;?MjGuXXR>=69r<_S|>_wJ#L7`BL+1W5SRII;PJI5Jd<78Da8_Q@3jZjHWZipDBZx z3g>;;m=lz#J22z%PvP+qiwA%)_WpKUJA52CyqTS#tny=2N_`<}QZ+}9Hm5hPP0fD* zr<^S4pX}RKbww*Xd_iksYTpEcy5&=D1ZoLZlnxP83ubHb7B*nc5&yJuM6QQr$zY0< zd^kYQkx&HV)*(JI(d&^k(T)l@NZht*{o;^QMIxzjf^xTM6ou*`b4cID zTot1Qs?uo^+-%3c1{5kwtt@zn)`(CFg(DQ{yC13KRRtnC3niFX{f$<#J+9vNzo^&7 z`TJ&$O;>FE2M{`q&g)BFC4^;DpfkWU;jy*aGDE+wV}8k}|KR<@l(&XkhR4R=lQUyY zs?t|tTP^kx&Rftz_?hb;s)R|_vVjBPUHQs~ylg|*FMe9qs{TIN1_bfmoPcZxuQyB4 z#g3q>XQsmV9voMm4@?8W%HD)YYW*bRjhCf@ry!ES95-)r(jH-Q%{gzng~B3LZz4-0 z3#X=4Mk5GP`5}pE{(5Y#fkPCANJRV#M@NFVNx^0G(4cfS?!ui!&C9E5m5 zD4NeyQ4YBak6^Jr1!GAdZG?3N~l_n83Fqj|BalIWi;a=3~?V>!(FY|LXx$-9b7Hs2`UMO|#04}aI(CH3bk#Ocx$ETEy&VosWx4LHbDPKDy z%Hu?Q)?#sDr_!SE)cUinP6D0vR>z;12nA{P*bVX$F`OTvwI5Pea(GpnSPw@9F><>t zB1nIHZ1MUE;KfI3;)7o^GtzY3_Ndrxj#Q9J#F@Id-dwl!aa61?A&`I-Sh}Th%R_`oUM0?zecojY1$Blt3+!)N_n6wR$Dq-8aQ=^@AID z0|3O_E|g(Q#daO51@uAanu~-d%AlcPX1{Y(HYiz6jzRywb8%4jazuq$t(&if+_27m=9bEE$6QV#%e7Oc9FMd#}9*znA9RVyhk73T+!!|5lz! zB_HcomNK`iYyJ4DQ=>(*0?Wp5**+7RAnLsH3Hypa*Fbagsg4S;Up&WJ>MW8x^}&*N&@f%;iRM@^ZqPZ% zIR{76)NF*_93 z?Vmc(^k!S04I-ct@btM{_9KYuI|2o{#3tI0RPP8^gQu14WerSl0L!HzVR+5*+~>-fT>>GW@*&S2`P>lS!zsA=gqhCb(p+pgI4x4O}pk zeU|@wM?x^%x$C#2Ies)zqtGfEczf|WEJ>A>9R$3}{W|kP#d6{(1;+VirP-ZV{DYRaT`WtFJ>KyY#$5Df79+;=R-{(S->Y z(Gn=UT^Q?=NcA@-+mDob0+wQkbn9QTbSTktI9u6Ala}x<9&)qvI!FU8J}3gGXSVSG z#hfiAyR7z=Tj$atIHpCRO93Qe%(m6G50v_u$*KieTxPmdNgPHu?r}WpgjpzcXV0@f zDXWF?Ro=Ki@1w^TQ!*B>**CdCDZoFqa}b~}G}-&ZOzpa%DhKqV3n)`zl_Fr%94|LP zyzTYOlcMI81fjgvyKjC>18My@5J&cHm6_QhY5asK&+Hw(RUs`A0OM&CY%t6s-!sOQ z;hJ9{6uoBK&(@GxNXm;?N&a{EdnaX-ohLV&G2vjAlOBBwwEzjsPDE*NuAwL3x3Nw4 z8HKN4A)&Roz0qL{)C-f(*5u2K{61>KgkLmKv+0J7r z#tv9bXRG9K--*`?6!`!*H`*RZ4wYQzQ=(6{Yw{@QtUe~6Tao_~Wdhv;AQr7?1Iv5= zolqSqiaiI!fP49UOgo_(%z2iY5q63Y6>m}88= zS_?f{DzN6SvJ-yc9Wf@<{^=G;IvNb?BGQW^-^-@U(Fg}>Y3!-w2m^Kq2pdrR6-~B7 zU91|eJf}%JD9v=IrXueTe!$(!ILLR|();c;P@6vhDlkzeuGUE$E@ccguO5k^fiGCmdH(=CgZQx$0 zJYDY?aX6UZGp6VT)+|UCO##l7#%?0f{<4thYNHnA4im~j+sqUu&BoSYt4#;7zFMvG z%%-7hk+a+4YKX&<&}SdL!_ zs;5tJSr#HIa#Do2`&8APuYa-$NB?VD_eo;lp5|ieeOOD(Tx{mmB6F0xqpfZ$90cZ> zc1zz&_l&0>*zZ06K5oudGel2)vUW0S;ud`Vh5B0#dI$@3|G(<}U8$mx;v7MaeYmB7 zjK3ia25@#KCW-c=*i<4wPRWnh#0P4{SFom4;dhQ%#PH#x1M0m7?*b3koJyCA`M!g> zu`!Nx14TmXwMez#rn!Z5v1FF&OyRVl(>EP67pabOEOAl4>)gX)&izQuHQvY0&YDb@ zY1-^3y%N5*!GjT|A1`*fAbb|=Op>>lt$l#Pw9VL$TefY`_S$c{PrQ3m;|E=fPAoBswVouklbGpM1z>iF?G z8ns9hJemvoqHHvNdL!R0?1qfwUw_N=8C5CLZ)VJj|47yYwYdh%gh62kaZO6X0`uC- za@9eepR2%UQqfPPA5uUFPJdZnQB-eN<0h{o8w}7r|T#r@u|`B`zlNE+k1%?J4)Nv60oXY zN8CiL(AMSNe*i>jE+{iM)nk1|=nrCgjRBJ}-pAYor)u}%yF5gBIi_nZuGnWP46FnxtwxZY#m*c&#l5!P&(dtc)lLZSSnb&fj^p*@`j^ z%x35ijbDt-G}d;cT~rQtmHQJtJ3io3`9W24{sYX9=gFHjmRqVC8MucF6{IoLk}}mv zgH*{Z7_n?(Q&Su7I$IHL|p zQdPcBH?$Ns*`8EXB$2SLmT7yLe8n7--3?c~a0iiGIdl!h+We`vlJQ-nWHmSX(na9u zWy8=Ps1nfAno9Dg73#o1v*r7?Pl&75lG9eRRw|IWm{A3P^&*=vN44cz&~4zIjN~0HrQupE4ypv zy^$qg>6=+j|JDPEW2eP@3o6DngH+t9SJC6@9IMCi{|2!Hq|J;ol12Gq1UkU|mquw~#l-rNPw{FJtSb}i4cej5Ub1v6j zO!@fK6j-$-Wq#y%Eh3f-f;|GBbMQ9XXaq?#8`4W<5FkYbqzJR!$1A9?TSWaVJ{&(c z9pe-*OhvyPbzH^lr(0*gEACaH5}EKcTDxE)O}((BNNzkh!)>Ehz={a~oCyK!7<)cg zKrkSYii4Szh6uo=U%5KDR|A12qDui&qiD`)4{R^k=9)jQyt?@na6?Cy^hT#1PIqsG5|Ahcc;Kf!c#9`T*3>3O) z9~`O*ar#-on5gGp=aFXBNNm1fp{8j&6sYMQ*XbD}uJjW=Tm(e_c!`+`RCGPSu?RAK zDOexpTcN4q>lFm6TzOzTMQ`2Kd;e(q?xCRQl#^o`tg*)3nPPU^gc=OZEU zU)2NA#x^^e&F^b?-E5Xx-!80}cy~1{={;%jc4&!stQea`zB+mw+r=bDnz^tRYPVC- z`AZQ(l358{aydFzZ06rfd{3WKeQk|wR_M`-nyLDJU6QX=9n`LwQb0SHD5E#WYk3tn zwGBk_>b|3>vz+@S6l1>#N5P1YO8QQ0W)!+C&uex)Ra%_xddkn8bn-U0X?Hri>en3o z*l2GP=kG~M^`#V5gE@Z=<%3ws08Q?qmDv9Pl+MRPXyeDJfzyL2>3UM#pH#!&xH@e# z-kH1cT7iJW=qlULoho+Xnmc*cpRc@+m0Fm>Q+oQg+34|h=r;bB9F}#Mut=-A=^#jt zfz=7!s`_3&P&MR2Si2zVe%;(jJho=wa)=}(H5EA)Nm4(D_Hu$wH5+Z)kDI(g17nO7mxk>t0(57AF44?1!1_SaDH%mS!-n9=lVizAp(1Z~^*SK8x;C}`oV zB3lV0c`ZZ2igRKjCA(!ndUv@Quk*}P8+p{c>2&K=YAQ+TYaW^I>IK0uEWrMV_TwY> zppThPIz)@L(vB>2XVBwElqBr~>Cm&tzL^^l_sZ{-3^Hu5%@7Ga?_)51+WL(2I$5N| zpU-St6V(zAvzZ=!b>ARNpp>>zIg3ElDF-{@^`tLnOB%w8YD@{a%%kIibu^*p+;yUP zaiALq&l)wxrp<~k1bVyaTd=DL{PL7yOWbva@braN9>=Yr3$I;-C$j0}V{$OM=N-H0zr z0X0-(D(#2te9x6M`C9e+e*Xv19oXB_IAl&DEEz!wp)+8CqDdyap?&@_twVtZ&loIF z4-~Cjj<2HqF`A=M<0#~Kx;N3@lD0v_uL!aLkK(~Ny#pK?Zm%L`71ATelsQpg@LV+f zE_HxtqLXSh{dNTc-}6_UoVuB?uMLyhCZ~n9D2*YuHQ;+$_DIMT8s}6 zV0_j2D1HB)JUNf#XYzxkM{f6P9HA`);&0FKf)Oq}b3_-U-j6}4RT|idP42SL4&$aq zLe}GyQi2^|u;W0wPaeb5m5-lP{%s6_Z2hiwoW{(LdT9s z3*hQs!uQ}FeT2r+9_L5#aF{~(Ybg0$@%t?mF}HP-Z73In5r0U3#yfh?(X^;dE8-#7 z>XFSds#wU&nrL~MiE#r^#h@znJF3`V)UPAyd>=KuqJBIM@Kn9c5b;b)9}QF-Y3v~R z)6pR;J*hnD;bMK3p{8waO$Q~N+a~yMB|TU=w4Xh>N{dZ)fi^oK@i6nGxY%IDq{XI8 zSuMIBk@Oig+pKn%7WtP(;)r}?CPCN%4~L`d8|}i7i4?-O0Zc|-12u60t5s-9jU0ql zP3!R;#&t`oYDuc#wgM2w>RI=7L3dj`r99l+P*a685iT}5)3SybH)LxhE9)taSxf{tSc`B znPe$ilq@GNZVA0Yaj?1y>D}qnn85noNL=~ppJgRej)nxSW+eJg_HVV_RUuzToa`+( z>+MubvfF~?Epe#^M>BI)hjY`G6ITdxYGzfjoa1oLLjb+ae)tJrK-zGx!g72<*t`N` zSgS;?M8dc3no&M-n6;W_-nUnFN+2I&T^MEDL)I3n2X_a<<8@-+nxt#+|LowinLlUi z;bL&pZcG<-cnc}rE*Pi}+3#~W!8{g`|BFn|`yBC>?gSTkSN?jj+tsGBvpG#mi`i$p z^mCEYjtAAI5wopjyXL4qx>VX5Dde;rzXRlV@ewm_2ZntfL$pKe;tTq!ds2>XB6V3~ zq@=SLAk#xye>n_qV|M^v+y4RT?jQYBJn3QSd`!P&h_SY_IvZQEzvmQ-Pfri!0L5VO zq*vS%$t^-r+$=U-gp~d)MstI(HnJ9Tj8;Et)sQX^DwN=#-OBWIiifB>a`kg~d_Q72{_ucYtB0H1< zW|Fut?WlCA6k6e}#1%^1&W|&s{7jlV>C-8}<*3X0}x*r zqlu=5CLXQ(VVO(q#{$ye`PPxw-mCikX6y9c&Wt&eF<=#8(ur#=;(H!>%T)-Z1M7-l zYwqRFpC-8#j?9MviMd3-Q88wZd>= z8q}8Rg;%%?#XNjb_fA8QlMl0L>Zla|WU{I&RFaL|sth`OZ-71f`&a8jLx+Ha*R%3* z9b@D>_&$q0=|DKxn04=z>*8TTw9WWuupIVE@0oz#4R9vSA`%WAl#0iUOiW_y8oZ!^ z3wkajXY$2PosHAP%?$FgvOCeq;1T+NaxU6S)aL~+W(`?E{{YD6O}gDb?Ei&!9%ipAY2W1DFz41vumiQ` zC>HjOsI>BG77V4xH5lXHY9+zSM}|a(zA5RuV~U|q0PR6W0k{s~bN>MfgY6|IL}h!0 zsN2{S$7F@QtcRQ5fmcuZ!kulVUrx&AaRXQTyisceWfrM2vSFfHoUiA#Dr4bl7b!Lz zr&xmQ7#1On*99C!#+XpKMt;gC$#6#0QV7eRLg`gg+s;<-|;$YZ}@`aW}HcV z`(9)5^`7VDfvLE~J%s^m`f8e8oqz=2pyOaGO8zcg8dQzZqOPwoB33;5U}3t&L8K!u zr#YsHPGh;8F()ORLee(c0X@ZV>b3+Aqth7GbCfuLRg~A(jm82qFlc`YfKY#M+Ix-d zPOr8@fS&wJ8kvZsPzvJDj86;}rp=F!G&HYcZs6|aOBNy3hRpGaZl><3k1w+eAA4>7~DBJQ#}cAHXNZ3tw|%Y1?2)_#N=f zi_+^qfO)JR!!!SzA{49}9pw{=HChEV9521KF^A`@qjPt&+w84=jpr;K&aYo1keOUK zLTH_6e;y9%f-gjeUmxA}@=2st(UWG(H40Gn4sK z@g^wv9yxb9JEhGm>-4-LX5Uv&KvnH*@tvedg7JW(NJuvMA6*ZHcAMI&aB^lZRrnK! z7`6c}(gt5~P!cF9zgGGh@aRKhwe*CsD3i^e|McCK5$XTYb(U>$bWs)s5`w$CHVz5y z?u|F@!GpU64Hn$Hao5J(-JRg>7OaB>3-Au}edej(P*wNbv-eu>a&)yPk{>QeqVV$r zY#cvde7-*LU1!<+J z(M`GbGL)z8lM4^M@Tm>m!338R>s#(nlvZP1!tE5Qntv!8BPb7>nKiJ5kaSz9lkQ+U z!n7izIw3tocJQ#(eD5COlnKR_xHP4p@N;zwk*QDAKa8KPo*3kUC%Gj5wR|OHxmQ|m z%ap^RuTSDh-J|2T;mzJ0rfm@rK32LXJ~}B$94^XIssjA^j~kOZPEg3d#H_mrc!;WRYpDs~A`7f)G4~I} zk3jk!d+G$dK`GXWC!zDe^ixWx9Brv-ysF3)9MN^@dPzT>-|qEpw-C*BB_z_|NN$L5 zvzbO2OJbLm?XxH|ah##m)e98;?!NGRZ?i@cy)o*d7jFmV^dun-XYSj(A*y8sUg^h0 zKNAe=5=sN7c}ya)d`tF7V>LSn8XABSsM0a@q)1M>=P*9q_g6vfapp5uZY>4cyRTNJ zFTD8JKsAiI@pQ_*OCt~~h~T1CniKNPs$c>}Fm}XY)37vuFxPAEVi~5$GwqLtWiAl@ z%@0iuJ-d!ZV7cQ`_t~%Tz(^#Jw8l#06gO^%1QQsnF>V3xp!{I%_h)5@?)l<-RDa#2!UiR-Tj8DwBAJ)e*#)ArBouHCT8^KrM*dM^FKJU9QuCK z^+m46VQE0}R=a3x`lw-Bls6is*z`nDbG7uwU+sEPpH4I`Y&JPxCjr4DBzUcw;3X6b z7LaN&1asPc!x*tD}gEk50wp2wVp#{TZ7`4ed-_nsGnZMev)@HisQmPuvZdw@SCAazMCuJ()qEb zSC#SwSy(%gF?XS|lq#&ESw9xcI;5WxAZ=Q^19=K*5k+0?$3!0Gp`~K;ozTpU!Uv zaj%)zNCdk`pN5wrX5Y(ajv=9xwGJ~156@c!H&@>D#wlY{g9#EJNFP5yL??IOV>qKD z)p`-QP-av}>=_|7#yHcgk&G3sCm5ufGOQ33N%0uSAHJqiB=<#ra)Vk_hJ`x@yAIu2=K zW~N8jJuRmaUbQ8tUhBQtSbr8xFzMkGvnF58L^63X8Jdx}iR)s02KX9hU--P+$5Njr zQvhkYNQy*T`Q?G&>tP+7`?`poLn?}U8h01XxN9Af<9imP7b7nd&JG;o zeUhqg1~W|R@qM+iCHfb1MfW~Yd^{ahbhD9wBXrfjb zrXo;(HP^wJWLg}3B8$f|8<25_kCwW74&~eaACo1Bpzye8$1_EL^6lO{gZa4?RG%<=Vf~Xe~+y}=SSkYf@tS4kyD<0}DG+nLfDO=K zq)m-2SONc7tZRPg*%+L)aXf4fd8&Wpj;jrxZuiSZN41(qRr07Vv|0!#f$ohx z)^W!B+JN9U?%^!*K4?8NXsYWp9Vx1X(2ZoPU9Rkst}%n$mBkYFA6!QTm|03<;#51z ziur}Mf0gCBql$3%Cv)WJA#Fxk#371hs|Yo&Tun|D+9ylK_#I055G*=_o{F%AMGk2L zeTU(uo4~H;b8^N$IdVy5MO8%dhLv3k6XDu)&N^+q=BC4x z`ztg<%-!rqw6UAYS*7%KKFpf5F7u!K%8Q9FHt?Ibq`LPKcVemDOjV=0IN?fPp&A%B zYHqiw7WE@coW+`K&unPQ&79hN9qL{DgS(qP%~s(+nf>cwOlF)F1m?Bi&~Ew_{2|RE zF>FJJ9#+Az%xR^nyyj*#QJT8erQi6&%)j9Bop%EVagP&fzxEc_evU~Tj#~h2Y8!yj zzf0>LPBTMg*>rc+o-@{0Aq%riY-ShQ7jVIt&8CiB&QbRnj9Aq?rO^-wD6GPzp+R_v zetMRh1+`xtRbdQ4wmFtAwo@i!C_PdkGZ;{U0%qo8rsnC>uE?=WGE4n}W$va1eE!q% zT_e$GgvwR)1A4Q;TKr}`eaujt;jvv&p}stV6kSquJ{yv^>vHSGd4j+~FhgPB2YpQr zF!!(ay8=ydEDKPTVWxOYup$OCu zoDS=Fj{7brL3lZTEQlhjqhl!ITS;dz{#_579C{a1CPwr{_z-Kuc{fU+c@rJ<2J9iTl+C!(o;U|1n za3BP4kM>ou`o^8`KT$?Vre4{))Z)x-OtodHg_~XqzwxrP$?~>!)7l6C>ct@nk2Km@ z4St_5xj$X<|FpCaFz@ULPA%++1Nx>e+!R9@rPNIa3O{UO@AC(Jylx>I(F@wQC4%^; zY%MBir-s*Y2$o1n#@Hc88U8u6w;FN8p!xqE!jbEDm)k#YK+>RwQG{F<`jw7vEb?gR zU!F?*sfwQ=eirVdA{ZL`cxTMt>?)XT0!xHl9o*L)e`jNC3(`g zWz65eoNjYDwu;QsE#`cFO}@~_&a+R3zE0p}G?=j34gMuR?w7E4hw&IyKUas!Q1c zt;ow2P|x6>;zpaSa1}&txwbqS|K+et4N>LAECV0smJb&PK}L`|nr*WmZHh6ei0=K|mx;JLU6f z&$s|?NKI1E)@dTk>7sJ8m3szjy`MwSw?0l0)qGtc;1WRjx$c;csw5{}#--QJ4~uP? zZBpcxTaCV<&X(RAu(j59W1QaT^B^|(XMV8>;oUEinzA(sN6*^_-?Y9lBzl!eX`qxh z_4&~F?&f~^i<-eb?BBK_pM#njf7V6SFR+zztSe7XlR`X(8SN}wq{6VGn;zybKhLDy z{rKDh)T%^q(U<9rCclIg$cj5j8oYTMCRR*oS(L5Jvx$;ru#tTR!?g9UXSt$>(!IyI zN9S#oCH6rV*ia}M=6V|UH@zA2;=u*K&#EHDp2xwGJPem zOoPB96Oq>i`p!Dqbz8F_LwQ@X-e;?IIRaLs60Q=!?F1y09aVXl+e*hUxqnskCsTbO ziO>n9+Wx@yIa!l9ge8~f^O@|`?C|u``kBCSHXB7O*m%S+mEAILE z5V;#Veib;i>0>tAD)Jv(MAOB%!n=?)lM4z+ec4g%wJ{F#n?WtfY}XtpnSwNgfZrVY zZm5agjiBC(sud2ftji6<1aB+c2$rs-6>oJwJ^lO@E-`C_T&AkH-zgM^T{(GCrVV0Z z;rtlI(+3uW<7?&Lp1yJxnDyZgyLRG&zAz9eLx+W?EWpCaO4$GG$WNQhOgFX=9>e-g z1|{3;NXi?)A(NUXEa=4)yTng5mdU{R5j{#`P92klhy4ddR>Hl%rItf%B@7n?`xFw- zxjlX-Kd)53T+9?29Zi&HFeU0T)A(wAs`{{VS){XrO@%DHQo2Ugj?rrnu-0NAm)w2f z;b{G8Fx1F$SUGt1B?&+9=76{e+)hhR$y@g&m$q!^m+&DAa1{$@^E(34Lbk^WhU*q1 zu96INak8Y6=JU3I=O-_Zn09nAoY^a3(2-b`Wg&z)nY*=t)s~!ijsZu3TK@;-UgtlN z1Z#3PUfn1tO4%Pg427@-^(4QKm&Ou#5((PrfB1zzMG_KBx^cL}IYVEfkgmj9n)`6u{Ol1gv70C2`uYy_ z{||1tD?JrTD45WiyHkCz6r4z&we~86VR35YJ}ct)ODq6jplnvYmoZ z+dQYX;>lI-LdElTw3jG0pU@ypA>p6s93@@Izz1f_WACa3KFBv)%|!KlVj&$=?YS|L zY9bpAp@qdB=z6^4pN`9uZf;lBmd59{Sw6|9BJTr3Ns2Q ze2q=f>o#=8%$UCdJIc5`ydrI6-&&Zy(iNwq$mQ**?kJ znR|1nQM5J`D<+LB*IlnP_cKPp{o@w2Ify?Nd6U)zhcBq+Y^zGi8RJ(bi@Z1czL;>h z*E#UjKna@hlS^k(Nzg#D!8CCr%cN^fF`#fi{n&7a0G1o0PA}~UWO?^-5txL@FL*CE zUlF*#lU2LoH$srAR`S3QH^z7Iv9vC&MU@m4TFWKN6k3^Pb}MDv)MBILcY}}|d0~Ku z?(W-C^+8hug~F^>L0#^pl3(_|b-Z%Y$SA$DQ?sw|npwlIhbs3bF669`Uu_g+*!!sK z#b$pM3lgD?U}k+X~p!>hB zl&wPc5aQSO1vx9DDQ`I6H`!*`keK39NNXuT|lest+PE*-?hPZve=z=sa!P4^sXj@^g&(6<4)wa8`YPqZ$$ch zIrwd1C8gO|@7141tGeFz8xqg{HnDg|E%1KD3s3t3E(%TL$G8~EuXoOBVy}je9gmfR zFf@IA$K!hl=FxcgcgKWQHF!r0ulO#P;(2&Gz8^TTb5oTz&1JH(f&I(rrbuQ5DCt%w z4kh&5yg8#eQO9&OJ~j=o*<(!wE?wG+Aa6D(=y=koPmie)VP%n%P9aR&8yA*aYVp8u zm$Ix}bDY4O$5ZI69OJE$h4zu{rqv<2_v#@T|4LEJ6Rh%CQ@t_)i+OF9#RB+A!AxciR*>36Q#k72*x#!g|&9&w?YDZDww4ZIQ!N2FoQPxJH39wxM>? z)4F)qMJ&w#@`bGVZQ`hmT8qL5Z zP^X%l5VNUfVHE!@T{^Ljl($t7C!d@B`}vp%V^0eV>&L>LN0HEbe^RN!3{KD{%|1-rSzAdN zW*nQW!qdl1NjBxi2su2*DNoD*b2soGv$tkB9@WkZ*fg~t;2tQ0+;GQ~|I*OVP9-~$ z{iPfcHr6=v+B}_Eug}{OlpD5K>|(l7Ke7E-5{=9WeaQf z52)hNXYoGHux`5Msqg2}A<+taE=jY4`G82dP@z}6+rr@$#!`t*O;bmL2BSKJkC<7q zMG18kRl)>1B$_J8g`gYUtYcw(NSmmbu$946mj~al?;jo8q?HG$DYHpz{lZjVT2pih zx}^0@EYeZCE8m7>0$xgjH&e{)jJ&CH;>8Z;RsXt-F#{q&@dHK09&6fB-M}mlPB+7; z{aoUbN?PF5h(JMDg2sXPnu&0?lmIzcWA^VdKpf4$nw5J}YWs{*<+k%JNm}#g3aEH# zam>JqeTpOF49UY)^#&>t>tm?js z8H>KYGPoKd&v*)^!Pc5Iyso6m%$1Rm*>U_xiLf z3;TEyIXGU_LT?1gU=UG0)IX;>xdR0-bYT+(FjYUI68xypI;sXU3(eT#Z zS8crS{cWYWIy5D07B9q5g(8?jZW&>&U!WD+ysg?(+vxP%Xpq$t^=1g^Jl$}Y`$2GZ^ybHOvfq$R@g2A9I)(Svjmy(XH zqBDM9V!k0?Sg+`xNykoB2lIQcl>x_Ck>%r}$TEEFm(2lt(aK56Cv9v2t~3!ZO#JZM zK1M@rQR8>aEp{5Uk^ABaP-Zy|}JNPS=&f6`Y9A(oehr`hBk0_HwkZ5+3c1Ss2Z47N&)6i$wrg zpt|mlaq;gE!rLg4nDfBjm8$;M!o0RE_iDe0Tt}p0c#@t~r6~g&D8=_LDdnks=nS&R zFD8*{OnmR$riO5v*z)cDUOfLc8BH5|WXKsNCiCzIZFt)^Svp2_^KFUt8wy%4sK;}` zk|s$y3eTaKguLJMAQ;S8n?BTTSVD_k8f)DWr5P;o8Yb$6xT(!GfjY5vNygg@3`_6E z4aSiR#0T&D#-|a5Gk(qT?}pxVtv8h^av@{iKQr;OY+{%;fbeh0Euw@q_Qi`_umoz6 zuRhL08(dMP*{oek0KqnnvI!ZTx67w-TCwBWEDOBMehf#0#{DgCJ#$_jwUs0)v$+EQ z5=RkvDp^Oro zl^L2_i{XUk6Ll+wd1;?DdesIuYD9%j7uO;i`52*2mkR$-X&}7e0?x{asAA@d1pRT* zSKU==AMO2ul*pwAyRZfyJ6vDn@-Xr=MDbA1N((^*>?oFDHtJKA(0K||$kC8jkok*7 z@NQExZH1j}=?sbn3m-LHpTqHeBabs$SvP3iSh3*tpZV=-EMtXvJ#mQj&&6+n<^unA zdZ@H1#WfU7?mQkqNN&6SvZ1a3>Y9H0L)`^3lm-TX?SmrUIfZ1{fu0vHUGLx-o|q|5 zQc+_B!>qHwIQ5O-(iz9xzyAlkP8a<(4zDVwgc!TOZ8xD;v2dWcWLGbBZYqe0dfReUDOQdKMQ9r1@~LSTh? zFtB&omLP8UlQo4THiLvr3BAQI^PWDX4pWMNQ$Z~kUZfn}_XY(C^o4`GfX9u1Aqn0c z7X>7L>;muhfl59o)~y-+MXpqVd%13J9v>rE9LqRhe{|NEuwocJg^tv%ahP;0{&c%W zkHEx7io6$IH|1nw$IOgegh|y_VQg>ple@Wp0{fG!$AlFM0B9SH35Q`my0nvpsgck? zLaJrQTNS>_tQ{q-WIYsx;j{9-D{fTKrioMB?X1%nyPYMSW^#F6o48Wc@D!w{;JI0t zXJ)$UQmyeaFi@;SW%~^&;jCN4@;)ctU|)R8I#80gDbGR`B*%@+8ic2h4_(s%^vsT5 z&&I3OE&T`gmQ$0*mOAEw=fcmJxbwa~hh9uZRUU>^v6L5t|7#C*ev$l@)yp5_$C%dV z#G)$Zjg~UH28u02fryz2mHK=Fl5$Qs7YCE4H&y;{w0m9wPC5XlMh$LJ)nV%HhY|?MK>Fn2$vIn%&f0954S6iHW|Y*Uc~u zF>|5=6iQMAr5u5B>i#m|5H{oLlW(D-U$>2=&Uzeg{@wCZ028@my$DHyp}&;v8E zh1a(6`0V=Nk_++!h(Ey@ZDteSkdqfGs~T)_S2F*pG&?v+5Z(F(MhYDz&s$mb?V1@p zs?gETAVgaWX*YDcT;9Nlkk^!z=EYFYa2H>KAawo3n%`b09`mW``YTSP&pg`6g}1D^ z$KEl8>idlKJy*-U4`cTBoKkmgIi7*29B)Wkd<=T*JRPnxXtVS%0n;o>}CsW?S+Xefl*FIQzR7GFcRXj)f|S)b<}7>f>!iyB@Gqi8-^O-H-mV zG{NsY;HP|a^aN1ZJZvvLte#KN6>Kpe&0lMhHC&s1CJ(Bxsv}W`dBIFdi2$mv0yZgc z$?mF)USXg}QLb@j)wK@_C_2C!FY~0UB$aYp^MV1YBd*k>!o?EG2?(xOnDSf@^AF4ub zb{x;p49UMkh_#ll(xdk7_jaD!F+9Me1ny!Y2xBSrh@OBfwD+?@-?hoq6^jY!Q2Et` zD4xAd!5ptN&&*X7UQ*^3$vR-!`+(I_Ypc2CcxTjjb}sY#UsGni^dvxf#WntCJdtxU ze%o(RxL!of@V>MF@vxbE~>iT9p_3$vBCaa4+{sw~+;mAKV4X~3pZhjEVy_?!G> z3R!6yY5~-hMUlo@?_-*Xfd>}72$Dsi^iBHm8c~IvpYjxg9ek)~n<4N>A(qOD92Y1{ zuu-ME9cO|0B_7QZt?9CKPyw@@oY{YH!^-=AntN6J3Z2Ps??Obd%5xPD;>EVbO;`Su zTaVF)uoacxpS0igB;dMnXuy*8KiYjbwFYc#Z$lynuQeA%PSoyA7y+%^T>A|zTwH84 ztWA0R@ehUx^O}v9D2oo7^1~c8f~=54(MYg2?&vV+|(Q4tJZHIAeTZ@Gggixjn^;I zm<@t+KbUP@#Z{y93`zxX025P>^^pF9W86r3bS9%!q3;+6;%*4x6)e^rE%m?_dlfqe zu6z$>H$ws@17#@XR|8(o;`Q}15<}1u*#h4YTr1{BVk6VDYH>voYsi(v2Z@Hc z@b@Qw>7O3nQrss!>9qGw7icJ3NGIN%xbHELE$FpMw@;;J!cctF50`9>CruobNzu}a^MTfqbIMF zrm+Ef18yxTfD`qjV|r?1O9WrH9-AEGW5e6Bm&{e^SB!77MbIgI^M5wX%a-qrWN%%hEWJeb8rhzdrsS&iSlB-bsj|L~?24>* zlEEO>xwOLR;AE*#`xt60D=#LqMER~4;M=ttXl_{K7_4m+O;JFN?&lasO%k=toSh$X0A{QkE8Z$YzkC^jUs4^Dh zVpqK8dP%^GT^7{z?)FXL`JzcO+~xE(llOtGbTCF6u|y6Y zk2Fe3)AND;>@%-CzuvZZ^S-rpU10sRKcQin!M4iZ+A@>=vY=!nH~4+{S|#@}XvgE; zs=8g5H8cf7jaf%w3Jl}5WoXo=)3*c{Xn!hV`a}s6d-HVacUk}b><&uF0E)53u)3DE zzB-d){q&;*Ixx>v9o4Yav=cs^@>jJIP0y94)MRAGi&rSW3R->91q}LGa_)G zs~YO8AzS0{%)F&GKHvkr1A}wa^jBFMhfEa&=3o+Bd^wP#Yvt2sjB!(a+)3Xie1wZJ z?a>%YmZm`0dTbBiTXxaA#U|9wH@()uG^(f4qo|ayrS#K{W?4IZ(zS0X%s-wM*@o4Q z+#cm4XLUwls^YmMSDudQuiDi~yQ;kk(hO)0mx0##l<|*E?w-p@zjMlifb^?3!Po^0 z?M1i6c3Z*Zg1vOTdfhihHHLMrox#?T^}AZUF3x`wxtcC(GZ_u<-H}PfZ^x8^NgqpP zz!ot*Q^^QXQvk3iA^Ds0&8Fus8^#$wX^w zhLu_lA1Q(m+`t65Nm*VyjtA~J3}?%yXWLt3qb`@D>%=c%rOKbUhVtGD%wxi7hl^-X z3l?i_ut&$SMn$lBy1kJV2CTi^Y)}@GD}%SGMAcUOKE*R0I3W%AC}9(OV%XW+G^%nn zp)uaH)zj{6MIMo;qBkWPhI}(z)mg<+jCK*EYUm!h|Gcu)+ddWJtp_Dve%kRaY+j%Nw93lVXcBnn~Pu-CQG$I zMTXU+HRnCEEZ2N8^S}Ad4aH)J0Junt3k9!YiHD z#PT}MFb#=5yGRqQ?OQpE$BA67U7a@9H`9c$EMT6ZI@XHm9@y(rK%ui-i(?8w0AOCR%!ZgB#s41$ctR0S^Qd{ zK->}^g*FaBN|O$;lNrJDCQOXVJ#s}*DVuVsiIt*Bettb%OL(Nq^8D}tx6DVGx;?!R zlHS*yU+%NPYK$qPN0pwAgdTO>Ri6%pSDlFsY?O_PC(#)6mpJ-odrDfxORZ0r2R1j3 zBS15sgciC<=pr>Uhcwg#MJT`G3USK9P7;x?{ND$YR;qmi4$55Svw1)`gj+_hSc_~(b4=H)7~A{7^vUS7P#1RhcJuzX6m9Np!6Zx879!-(RAdZ;u~rI?_Y* zfmhv;jV+yiPyWaE9I#bx-BFkWZ#1MBb!e_uw^CBON ziCip^T;cYAd1C(ai{|)~q9*~Y3<>GB`Hg=j?{UnljQZA&s>*-X@sVzFJ5fO$w>cg+ z=B;fgrD((Gsmekp3*Kf$0^W=CJ5V>kg0f;3D@)!RwexfnRhPU3VH30b985w4rf^HU zm*?}?nAwR(P-Lzg;54)Lu4K0T<;PmL3pN`Q&BwbyepnK<#!!o#;9@C$pQ?xi!`q$9 zoxFo$|sy2+b|RFh;K)K)a5C& zMrX-!4sU-RP-L44gvIYG|DYG3rWs5M_*A?6{xyin+h?7ynoJXONK8%}Q>de^w?TXh znWp^Fiz=SL4vyHYcfc&P{KVit{H?xCsnO`&@~-qrYM`t43+(ErxgWi+6eRGMa7NU+ zag5oW5qjnYi^o#f%3`S{csgCVsgf2O5M28nAbrO5_$1U@rz$vjHNNo0U$v|=OG}-; z#&Rx2G(ty1P?6)z%e~&`FBxb^ryye5m(cE>Cf)}S7h0SIthgSFEv{rO`lJJr{D`fv zqJ)6_k?5lUor?$O8dUw=)nUv6FA)=h>>{5YzucW*_J7_@(B`D(k}h7%ibSDv_Fd99 ze36lbOV&?-*S)3x@suOgfD0zThEt?Z?t|k{h{d_j^YY-o|TJO9$vfDdpXRr4!*{~1#_Imw` zMMOo#H3zIzCc3?>%P3R>zy$sRM5JSDNDh_!kSXGmX?WHVx1f*$KnsoYO1yz$*AtrVTF=wy>QnF#P zpM3p=q`(gH+=_~V@&Yq;eoX$4wiQTE)hzjw{(Im376DL#{5kQ1IdN;{jww^jJ#?ks zT>4Bu+WTX1{d)bs_Rcm+QmMGnMrNI7#a;J)G7yLm8imM~SkS!o_BYm5vjT@-3R{Ow zQ1sb$%#qanad=A{w=&EsJDH`6K~c5RvT)Z@IQ~wjGEkCxMS|2WD6=9vuSFr;7{Tm zP2n!_;&DA@pdVTzrrTDguhg8Z@7yd)sE@&ZH~RKSjmzViPuJ^6_;7Z^tjjDSuK$Bm z6vl)<*mTeI=F-@MXJ+F8O7udiWS`u-JP63wwcgX5&S_f+Yyc&d_$;JW)*Zv)ox1y3 zb^iE^0S9!B>nY97v_=GCG8*vR^^#>GJ)|nKKyIVb>S*EzTnX_Ny=Hk_5=~0&FIP}N&GBw~TM+wYm7B|3DPVHg4fK_# zyey5M#ky2~I+ZE7cJ_EU%uKBGMHEI69yxx<_E(g!S-~>tlUx;S;a(2ibN-thOH{E- zg`gA4SK{jv-DfuM>q#)gM7zP=&gmq8=Olp6_bR~&uPxrA!cbW$f1yw>XHGZ_TmXW#d|w3KjTE`st|T)#4k6|JpG3v9b-O z;+ZJ_>;B(_5(m%1(Z1x9n!^bA z^mezm-T(`doM8dt8XYQboJ1D&LVWONrjL3ZF@XN_dHNvL9-JHDEPD%YvE6Y|_!LtR zxiV+i*q}bxK=6oL=2xwcGY&RDdMpX4rPL~mGc6qehp_I~tK$FAWVg+886YBTlu6E| zT(Mb6Ykq5^gs_bZj%#(l@)_AO_)U0t+8PK-^oPJv5z;3IhKc!gQD%3iYNMiOC8nZi>yIW5hX%*QPGo;IrLxd<9^O<`b4=& zqoy6amYOHej(0SOPY#@t2|dt;%h+1zoQ$y{^;8hie4}L0pyOLnOYCmqoNo3Pjw+1v z30uPh>I_XylzIx)NH|7e`q~o4YgTS}D&dQNlXmFvZ>%I9a={d|V9PC| zyS&Sz3IH)Pq0PzmAf@2dLiRa4jL=Lq2HkzaQ8dyxtfhE$Bbhpl5+>{HH)=2+W6l{> z==zSodv?_8a<$f)o4~lfX&`{QX_Zsihb>y$^#Q8;A=3Ea61n`^aM}wM26JBstki}0 z*-2M=-_t2%N)1kg`+}_Cm@!(}&%Rq#?205A(K#f!r$Tk)MOvzw-4h?yHn)Cs58U|q z9>9=#H*#btLl> zA}Z5ftcR0Mfy~m~9cWfq3i*C|98TY$7Y^T3b0GK16WiWvXMRqv4J}PdsG0<>tEdSB+(7ew0XF$8BaL{_@9n3Y`-)O4~-mkR_u0-zU1jXTX`;?~E z$-~TVSP}*4lAvehnH8QuRuUXAvm!jbUhv4MijzmLiQ8q`TTYf#bm~-cm8@mL^(-DR z)Csrw<5os6TY$w^WZ?B-p4bbfh^Zm#IDWt>q56A3EKVwu2h&-0S`z1x4R6hA9FoGj~)1-J>mMhHE2Uebe_LIxwNfZOyY zXEz;`zJY4w%ZlaGOl6(??+pU|n9G8l1tsTSTX|QyIOHfcET&WT5*Af=>06Jp2KX`Jc|ICt8%>;;SU!V$VSqP*0yf18I}mI;LVwoNM}#a=h4LLp=(`gi>Md~QcWdY4+zr5 zA2FSrnk>Csw81D|N8X8wEng+5Q!0P_JFit}z7HSNYP-j+Fwas^U}mO9a<;ZOUuJiX z+^(l=_C$OT;guC}L=W)ipfTc~QjzOgW-DIqH!mDiaq=`Hj{g!vHMg~CxcLEHK$O;! zfS|m*T35KvIauW0mG}?pZgQI+TAD1oOudF?q8>D2`RkyP@Cd^0JI@u%`JL0dbGe)F zFjMQUAgD;aHT_C^V8klbTm`u=ze3PaFN z=yeF4CC74->n>*+3r-vv+5zo+H-yIi#7~a$;F&~$J5p*Xc{aLR`8L`X(4IEkTar-1 zb3V%8^wBdhU~o0Hib?)(9?PP*l-a_K=Oe~Di>68+K%*pi1wovBuiJ@+SyP5vx@2Kh ztWIWSS()r4Yk-q$(1u-TvF?~|%il1Jc(X@CuSfZsy!6Dmc*8k} z?`2+w*m4=C(T~gZ**0wvbrbW`%I*_`91*}iDcL-`<7Z^Z-c!Wcs1ous&YB5f6@U2e zkEghYsu^s|^I-rxU}ze9da|izNg-(`Rb}VwPC}qa0^DocTTq_R^w7(|DSF_u+=9ff ztQ+dwuD8&?;hH3k@agWznc~|1PcdEI31y)#A_*=*pM_GjobpBC9);fq+(d4uuI9ft>&=Dc(SxRB$=JqhGR2KcVX6PYoli{k z2}Zh*X*HbvTbq>kfXdQ`;e9PG{?WLVWo%?1NY|W=MwTs35xG2>o_pROG~DS0 z)1W{{T*IP_&4{hPjuttzb@};L^i5_f;fW)PQuw)QZv6FBrA@ux^i(*$2ty09#hfS9 z7Y!KNd z^*z6lm4=q_uVkx4KZ%0#HX5xP`A^m0fKN=MK4y?vRCB6^Gf6LI4q)jxD^DGrRF}fX z=%P4xL@9w<)Y`WViOhB;nL&0SN%~Lz_eq!-P0lUv5}djdo!f-cM}S>8%AZJz@wrUQ zST$Gkv=r8z+RTAQ5ya}{D+6ZNj>y*Q(gJb$cX3KIhlgiM>Tg^AEz?!6WT|Y(5YZg+ zw+UVt(>*u|BahbO=UBl4zX|tB%m1W9slc1#0ugZ!!HZc%($q}1g!!fquBfITc!?<- zUENsH8;w}cr4T1JrN8j9V$Ltb3(7Efgqx!iT&}tggv`#Sbp0c~#B}0k#urFk%(!=$ zuwptjTc|Y6HeZF1NAuQR?V?!bAa7ng`|`8?!Lk4RonEx3*le?2FXjUyPNHG6FYLlZ zo-v*%QIY(*rm8HlSqT+(OOGM(8+r3Q;GO}XzA&pNNY7z?7}oarx5V6KlLo6PjxgtQ z|3~-PtM~XS&e-kC6P2ST6tnJ>nPY0o|K+8JQ#~ZO1SR=~iI4-?diC;bM?Q52B|u=? z*ydd4*D4vxJo{kASfy?~{dSo4xfX`qRuqFR9nLdBL8f}efG+a}X(z!aQU}^1++pSt zAU#5zY#$2qH32S3I%8H2$&{rL(csdHYl=wsF{gb%y*0rw=>oN~wt{ zgIFmfF{N=GDM^YeLF2>6`A<1kE_%B#G+MP0Q}3$@cA7$)n<$Q ze?4bLwFKq4C)WOK|gC*)1yW%1#M{+9l!yICLDWxX1mytlb)JNh(Z$WbS6X=aHa1A!_} zdrk8Z4r0o$np*<;a_@@+8CTPY#wf&<}t(_SA?Rd{qn zxw~4fOXQ}#Pedx)-t;DbIoj9Y?S;84lXo!VkK`IP zL-w?Sw=G;`umAYfxi@%U4NPpfEU!U!w*?dr;39iURzO= zQrFgsjhgt>DFn->-^lP-?x`JP)OukzK6A6iV%#0wJr`D9t<$<2et~B6 z;;cYq(*NKl+hMXK-cb$U$5EqcA!*1$yc8c5HhN&jg15~Mj_t(qF(<(>rEo?R{ZuM#N7h z{6NLX@W#%%+n^yBKA@=xV$8o%- zhniJVp_hJiYTye2F3mG#GTV=;zn28sJ(GOC62K#+R9Z$05(pA~Ngz!|)%&f`S=ahf zvnyJpcCYs1YFilett|wq)wnnSOqGxXf){wt(!^TlC zQ@|?Ng5J#Q(21>T*6Y>X4e4M3LEuqMxiUqg|Gmts}9`Dmi{FTMV}%Ae}K zK#lbmq5&huA5)+&6Ho%M0%l=GdjlF8wgjKe@=$ltpFNwRRmUgrgtd1BE3=4RVp@y^H^`y3%mb7+9wOuYrsYC(B={fSb*Hv9k`!Psu^kWC2a-|Nq)S?x$ zXq-n1NUh-iVe72A;_A9?TS)NW?yiNqyL;j8?h>3}!GjgT5HZRde^1&f5o8DnigTA>`4Ib{n=rK{6QENIt~w($;QSJAirUPwOCIE ztYwSyiF!d^o$T%n zG0zO5U)l(mJ$hnU%sZxGF(4Em@86i98 zP3VwEtTkt3vAU8$O1N)#J?E}bi%vEzg2p>3PV7E&=gje#84##Mb389yVu>5G+8{=A z5WbN73zCFvxY5r0-0ymDTt>K_mm(duji9a!0(yWfyCx!9aB;PuIka064=tEz`XB7h zzJB7m1C*o` zG`+X``dunAl~1|yDSvb;=*;~G@I7seZTp+u>a#LmSntxLlgW($`pu^SUfv8{5Gs|1 zh>WMOJ96k~9KuTb(TDNUgN~JLX_G(I`8v<_zdDfCZIRP`|L(1&rLC=8rqar&Bn3%J zx5lup$>#666`Foo<-~I~7zbs5*UN}StH0#n`N8jub5pasIf&N!J(5d%9GC)U{~m*S z?JmL~{{C|5wZ&uAWyQhPIVFtXG&XYsrNliveBr?7apr9^%wUl%lKh8qD=N#fCt@nz ziO2-P999Fk3^?GSxun5qGGCTWhu6 zYj!5t`#Pk;%q1ivnNQu3zOmf6tt>r!&Q_x%G)2(6X%ptx^b?(2(WeG3CXOM4leS7p|Wj z_RfFaob=K0nanBJ&`c6{af?EEUq`Y9?f>DJSeWDWSlZcT?ya7{T zDDLp2(z6?6+>+?i_db@SRVu5_>V=>3!=tVi=|!e!JMCfaj4(bhn)d09jXd23HzjLp zdMmEGD2i|p*vNVHnKl9YHo7gAW%j5_(0EslBm>>3tl=+YTroy1xeFV!ai0lx6Ab4K zxohfB7&iCnPpu*o+{&Pk*TahiM$^3~$G7C0W+Wg1xWy^CD3Cfe# zx!V}R`SyB>Xw2=_S0g!8Ejva?gHo$TtK4Y2ZJp6WPRv1yJ|Wf&y5ALzemz~-Vzy(I zA`%lKH@4E)KlFgJg==DhR&<;_B5z)tlUUHXl5T67s$p7Lie-?qh~Xc%61g1*n;MZO zIpKn7pBY*Yp(bGE_aTF)&yKs9Bmw2)R%En8&yS9J}WIqwtqF~7>7&HI`ICJ%SsR0SYeW}Q?0$&+k9Y{ zlvn?2N3_3f6s=rUkqgug4LtL%>>a>TXj+{s66znPDSrAY_%}p+W;;0PC8WF=5&7@- z?sZ5DQIR{4blhRx#Ao+iXm~DiGn;t*k6m+y>CnVt!O-Qs@NWlaJzsm{sYzYif5Tyf z8fv8~5_+*z@r{2F+K*lSe00|H4rj>;iua9LXJ14z(0`$0;KnRTi%+U`_9_x}ow?U- z^V=m}RF5e#;fFE=$5XbamCt-cX=9;&;gsU9TFsP#6!KfTIc3&@A}VX>sx$o>FHV0G zZ7vZ*8~cf~s^k2B_)de$ltQqz4%L6~#d3Nz2+NmZ?O(aJ^|D;4q>O0s;1ih=75@`; za0b>B{YU_obdS?!w(?uz?~zgv4ztvF)$0SpvJ7^_tU24(3D`#T*Ru5N@ZNOP4fK@2 zIO@!nLpos3GNmTx5;@{8ZNIIx`aXBo7{+cbwSiFG!6+1R5M&-8!ASTNHO|2hLjlyP zUM;g}P?OHdHkP*kfGFo8*lJ&^!`;}cWCMSV@r74Gqy0znl_1uck^bb<+|o*=wlce? z7r__DpCtrH^%uK}KollVqi-}*w0Bb{pGN(@X4D6q(JfEoAZyi7n_fxUz6yi6fC&;av?S}lW+7(>#9D>wWX;s z+hwevDYQe6C@BifZDFfd=d%y?ZVqt~wQSlW2`blZb;+5~ib?L@rN89-MX7N*C`uM0 zcuY195=4=S+V`8gD(4710w3(E)a8zJOGH2BXp`|_TfMt_E_U^o`^Qcxw1j^ALqD5x zCGHX^jzBX8^+dNACi~#P7?wE>j}2mAuUlt0_(?2vqVWDZ!H^>piZiStU=C1{R643+ zQT!XucKyfMt@TSeQKmR^?ZmK zVt!NX#DiW#R*%%EgCj>=VAc#_c5YV;D>M530a)a0Su=`}RST^RR`Ih7@u=GXC2++4 z*pzYq!cdM*lg|cz1Ej}MiL1nvK_<7~ZYenK^CW$ydvWS!{D6M4X>qiz=}KzNYcEH8 zyx3+dbglSJ$@-!d46Q=?S%R@9A6HmOyQhVwxS3LOfe@G+kaYJ_+c)Tnl9WkOHQ1GvVllTuFt7Yz{Z&gex43h*-v}mdAZ4N< z#6{TWR9fVI!k-PEOIS{LDx~7Q4OUlk% zHof67%?Aod5)|?RmVK7JtV+@Pzi|o_=ptUv4oE+2%@xlSO?5CxE6=7!k|!n6Y6bS9 z;3F1jlG=Ou5d%rQPO*Ml}4>C574l9rVpHeM-lzkSSI{SE#H2~}zG}Re=eRa$+{XsBfTWvjN0kO3n z%Z@YiVCU3|w4F;IqHE>f)grg*UjCc?ll8TSKWcYdFtEdyR5_JBEGoa3HNfXGTiost zK?yd>y;&J8K~`(ge&fj#7va!#Ezo()50S8V*c~XOzP~ICe910DVaa5Y#lepCx3#%} zg^h&|eW2&<7oO;JLwZbu+-lfPvF^~)QBeky?*Vp^mXb6F5JgN_i+URjr^`>y=5-H- zI)0S+T!|xhc$|wAQl(_cG_}ha@r-&J)>&r9AsfdO=t#`*BYl)PajKZ1S5%CAtHxnM zXi`D~rE`=CE`iM$QE2J1Qffpj%sz^0#PR4giqn`R$@Y|@JOz`GAiJi%efPXKaDPc4 z*U>8Vb6|r?Hw7=PInooPjCy%xnj~;#PF*B8&v-~!earx*qcTOwR=*Rb+YRo(n47dz z1;WymQ)OO->ckiy7MN(fKEAVGzwDmKR9c6JQ?_5Ngrw%sPYuTV9f&FY8TpTI!j|_I zNy@Z$rd#VeA>Wn9jXTlbFN==SF_X0tq8OQRJc-Reu$nm7-L5eb<%tJ9h+}cr!R6Ug zS=sR{=<)^Qz6rJ?@G`6|>~@r`YAkf6qv5^eR)lu*EXN z-={w@E5#|}qwkqQF%hnBBbS%|0pe0W6;2HK60yd$qhZVCPw_GpXV75nWu^C<3gl72 zDg+j}TUqkvsWk9v|0!jr!iFGOt_B0Qvj;p#NC3Z8yrNHXx`m$ee=mU=%kZK=o*AxD5|cs09q~zHw`GJt6WDQE2h;77|r~lc`~3Db|hn+NuQp zmTs#j{H1?*&6eT!0%6OSdlGvW1upiixr5W3lrSz;Mq6wpZ~#Y|ByG4uAVnliN4i)M zVyNRHv(geCuQ`MH2`7z{_t&sdXEek}kMC;rG0PeYhPb7(I4A~G=!H1Bm=jM}Wuimi z_olq5!x+@NOxrAveFVmL+~EgR|5Y5V9|Toaxt{+vdlft3@i!z$f6AL`=OThs$>! z^eh}Ta48$+TOWE@np{r(yKD8!o1&6*gcpn6$nLWe-V?g?B>ua!t!*_UO@z{}9N?pg z>yF_LGJe{7=f>1&dQ$U;&3G|E1I;@LpMY1kKar5Dvelp5#MRQ z%q1>;Rol2oMi>2-C%${RE%;ZhHzTt0NNNh1?^Etcl@Ei(CC&huG6+^BgAMr;^<&rT ziGAChdaI|~X&=%Jfv~*OaG*u%bE^!pKQyoJ_4v5r-7r?!Jc;B(Y4G?O?7MTUu8KQ| zvvXwdvkbU!xJjm-X8HYLxyoAcRlHp|xrj>i+cvC#xT{gv`tGRW-DXL?&OA8#)VeY2 zb8exTIApA#{JU0_l2RPl&Fi1f-FH18w$vPIgLBMIJw_+xGsX3GaGQ(9)lgc~@2`3U zYtxKVJ)G_BuOC*i`Xz9O?*1Po@BRl+w?k@HJgm{y^)FGMW&Js$FdVuz_h($PW8x{C z)RTf0&I0k4y|BHT7f-qR*ff>WO7JMcV(8=Q>P}yAd$O%mTF0}4-T5_cU)#$2b90l& zUi*jHVN;_K_*SNghBHStO8_h-TpsgsHB3OyaZVDpLwsXjS*}zbo2XG*S)=7Sp}`Bu z{j+_5KQ3VPY4rS)stq&49DowfFBBb>s}|!M+GfkIW5&+@al@HQ-c;(vtS`y4MRopWsv*@(i zNh$1Xe;s!vB?%@(ccRJu6ql0@bUn|*i(_v5(vIZzkJ4p9hSVt$#=FhlLKFeGmwZ2L zfQ##6d>!6rJ_=Q~#0>pRYLI6U@kVIZb~2k<(<}br!r;vmx)Wmpt~fgh>lVu5U|@ta z5%xPL`NIn$>OFrFLskqrinD~d^PcDJyX7Z~tbqRjym(ArS-r8odT&Y5MozWvwES*_ z8zU*AVb}}Qod^c{eq6~x;P(p{tD!5J1tAVNbJ4ubk?h#&YF+Z+OIy(8CIywqQUB%v<21_diDC5mi$9=g;hL2R!)w!A#3I#$E_4VoGRta1l z#RXcCm=$QkMaPTde^7?XMJpVK>qV|A?3AP+d3)0t5CuhnM4~7xMcYs!5=*49=p55p z>xd)nKL_U)o?~OU*7H~D7iZuzwrFeWZbu?|pH5tz9I{h3q1F8d7|B ztva=zMD&fByrC;+FlzExdDz;`UFT=XZ%b}Mj>QjX=7`Of_Ju7P`Q|a=vAg&BbaOdg zF)bYqmK()!?e5^jhmJ1-2L*S$Aic7LeS|1zPlY;dTob|kC zlwOSGE$taZPJ}~>sJTDei)u;YI=2LtMAQ0d<5>kVSv>}6oM^;^ z8GLBfL7Z%**dp#`{E!xhCJh;Ac6au{=%osH#*x=g4^#NUe~!B3$8V2;&wX zgVQD1K=X}uhmHyxC~*r4=B8I?mny5A)&8Bxo+DQ4g?rLbAP+&sn9)6Zp3aG2q`Tb- z#V{YK5X+bGV3dBs=acBh&yy`QK189gF0I}Ws`l~3 zG?`SPxoc$}2Qk`&^PPLGDYbET<>J(EM^UejO zBxt)R5isU?EuLcTweY%JUHa;x29&8$6*H!N)Xz-Ej4Bz%>AAKo3<;ITW$P)MsKX^( z2@6;RgwgY9=L>lejQlj%l8b({V-~gQa+wm2Z?|E7Qz?niFsg1T3dL(zmA;$;mkX*+kdmKAQY3?hxrsU{IAf;+I_e2 zWa^~uNEJd`tew-Txt_X?ZY%fgk?_?rFdVHHV;B`cS{LLawS8yB zJh^J&5C5gqfhvkWj`1LCf`Bb{2qc!TO|2$VtVyM&-=9z-tdB|IEsko-$Tcf2!T)5*oH{lj;aOMtIdza7mIsn z1B6clVJutG9F`jexYxRce=>evwoy;JL$@v!P1G>{ULxo6c93}UYv`ue2(P9N8-iKY zLQkiA<0yN7>huqmL2iA=@9mg4!wkc-t-HOX&aw3`KjUv}`QKaw1$+86HSo+kgU@h= z^7zANjb}&8Oc6{H$%@MU_iMr3=tqw*FZ4(S4GN)j{b*Mv31(gY@T-M?AJRni3)OT z%**9kA77jGFNO)AitsL0hAvG_Cd9W$l~kuEvjdW$j=xYQcge*?)Y?w2XF?2@wrzEL zq=IU70uMc1lc7}6?;Px%8!oO!b0nIlw$;LKnVbwllC&82h?DI(cfJWbIwRTwb-+!> zDI`PsA7xNR-X}%pDF#`%qSKo3!~mx!wfn6y3K9}0mbk45y5$S<+X>sI=ci4xbY~kQ zjvapmj;9>P1HUJcLPHK^=6Bl#cEoJ9Rv^PrK7%_@s!%P*`PG!i6l+6_|C`4pt92FG zF$s0O_P>QssXKI%O725(pw9{02>eOrjWSeGjG3DPi-@;6aPPbs@&-spOPK%1hxMcY zms&~_TUhuL0~!quaiJpo58!uo;q$BXp7>o*B|;m{Mlykw*0J=YBd@+E?^t9%vL6QRB57YaKqZ7j}z35Xt#Z{Msg!cfOVjw zRjIVBQu~W8Z}1qpg)0X+D?l+(?3mHDqT1u?Vho;_M)-ZeR<7k7gMm2u_$IvSznAB7 zu+2UcGK_AXB2K?x_j|R~!7Rwgf0CUiP{ok{1s{*CI9oBD$<{Q&#Ng3gLI_>=9E-WT zJ)I7=NFpUDjwX9!Kut68@K@tvgJ^tzTa|Nf!H;ZzoX5yd$DT&rT6^f)sS^YFb_zqx zPNm38X?i>ecC>}{-A>DL@doKyoWajV;a&ugKi3VEYj=mviB)#(^t_zmRq?YdpP#=7h@^@E!dcSU*|nDoDS`Rp>PERi+ES{6_6F4w-v;&7U%a)+HcRd-u`<`3+;IDg!-~oE9=fgidh7m5k6gcZ3w<9iKSQ95#+9>sBZX&57^sWFG^8-e?X=3#>=pa>P@0>oS-^|r#{4XlFLY%(0(Hw6Tx+*zvW}du zIAJOx6_6jRgJ>u3l~E^a zt1Zz%a7}$}k8uv96o`duI#^<25$Ti-iK5VQI)>O=(%%P&^|xdV1))x%U{h)(2zc-P z{&tg%SU7yni&PJ}OT7~Zt+b5(Kyj+eNZ7MJ< z&~nY`pO5~07qT-H5!z!ExcP(n-WRWUa#@z`iV4KY*Sm%1kafq|*>Hd@-!0;GLC)w< zE+eh}pP-3dg_&1fx6K~0W1d^$yhIUyI@0W(&UdoV$NvD0oy+VzQ3js5w61PNYsqmh zir?t$Ej(O0Li&tM4)70Y>}{Ck(}4XUQu36+{Sy<&^NxSO-g0WOev%H$O z{Ni6v=CiLq0?KLQ5#ij9DWi(MDTqhcMSR;O>D1iOH_niDqsYX+`k7KB@aRuu?r)7W zC@s<}&vfWh+_c}Dppeha$cW6qEvRTi|FF4G@@HY1gYBnZR1IK=nfdQaHg=6okt|Zq z1b0P>P=#hgc~e`^$;g7@tsk1{PK4W-G*NsXzFKP^z(-tJhjTiHVxl>V_AWmwS&+XS2?KVP0+Yn(A=#Aa*>Hmkf!lELB$&8?>nDNiT;{lp2(sInNzl;hI?{1!* zW{y9%@A!1^$43ztXJ7hI4zPY+*j*JCb&jOj)+vo|4QRCj0#NUL8%1ue|AoE0e(kOU z86ay2yB&v^_3hVzdhRmi*?w(D^k=T6S>Qh&7Wm%^d|V0MUA_kobZ5Kj%x(0W2^7V* z*j#Oo&&_MkSaRVvU4e_&1DadadEGS88V9V{J%y5()#1YyMEr0mrVaPINw!H$S@q`$ zl;fe>n`&@uk-dhRbvUTJ2|nWX86hWF<0*HGsJ_6)r~g+tB;yX(ssyOVB>!g=JpQY% zvhHPmaSrtx)-cukUP{(vLDpwwKIK^|#%Zu59W6|na1`vT)8uUe!?0-P+J2ClM?frC zQ=Z9@D;caZ@|#G`2!P~K<;21H`Q(i6Z%>D#3tE&Hov-r}Sde-Du!DVi*z#Z*OHP>@ z{@@Qvq59UY4%#0iJB7uOSI>Vv<5zN6lFW5qnoob`#KtliuKTt1czGUP6P-K1$$w!+ z6*hKtzOOM-CbJ_WU7ugIS0&cf)k{6vbW>Yt!m^PMQSWKOO~xhDvc!$cWu=oT`v(wL zSM7Q$L7WKW-`_m_!i}YnrVEKIxDWeM$zi7)s+PI3knuCXZ0)@blJ|Kg-CV%qbLNCb zjYIMj7*$tLR2qi4Qdvp9oM;er@~hwhN?p39ssQ?>7tSvL7)CgneMVy|g|uSq4Ows4 zZ;WR{{Mh047!a!SKYmUU_14#>^7@WF%B^?B9iEy)#zRr#x}Ix#@zx>SY$D=wJY#@wjfTnvwX8r>R)u26gelHiK3U~Uvh2}!zN6L8kQFzoqMeu-H(KTD@ zrIZ{AQyeJ|84Y;f&kF5F3$FHiccLWJX%9g#Vbo$$gNeUP%Bk;g&ykBH z!OU8dU>j`hYIF@Lx^>dy*amU}rQ0t|+;rwyrO2@}X3m@PEM7Fsf zito6yRh9&W9N#^*-h29@RV9X;2; z*2d8}fCvg?bT7>`@__27@aRWD7!5Na z9A9il{WMVg*bj&raUxGa&o&BafxrIxS`NKI8K09)UsEhx zkUiHg`q02DZIT(XO@_jpNY1j1BC2SMi0;i)lz8ZY2L2~0^Igy(_aC~n9RJ(EzlYTZ zT?E|HSa_H=`cK=q;b_9V@aC<4O(tHKQr=he9h`n&O6 zeD?3(h7B&;I`?5dT;=Z>8p#n8wV9E$zuyK9lDsw>E2EQ&D@QA2vL(9ED06X@p(CNS zx7)BY%~=J{@2qVQ;93Y2psFjg73W|<0}35L<%#(a0e~Ie2A;6 z#0?Vi_;~*x9a8G09g=g0i_Wu|Ztl)ET3=^Y!)CL53&mOvAeB_`b|vcr0E0nyTb^+E z@3(1F1~X-;*gKhQ=ITN=C0kXUWmW`BFm6k;bSmx&Z(3^@b9H698vc(oq8cUGG;f$m zPs{FX=4RdJ(E=R3CGLRt-JXBpkJ-Xq2OS+}Cj|m`)%!vQ@_zVSHgnk{HY>YwPCchM zcO`7jBP_PQE!+dGQmz$m-v7?cZQ1v$ezp|0B)DhL8F~C{pt4PohNSN`wo@JR(M#xs z>I}5idqIb}XMrvAuyrP%R8Bk7RmzySwPd8ial^-Al!n84FgLNudFur?6A>e{XG@jn zb>`CcZBURhP%~g`m=UE;EPOaEP2MECt+$xx^=MKiF2BD&%aU~^wxxzIT#~d9f(Z9) zap}Pd&;lAn3QzyH-=RTnZ#C%e+CA&go5OJDJbRR?E=i2WWZ}-wm9%;XGw&8^=N`pt zIq$sNB^U`fu3n3}xJx^DQ#`S1=TmJRpq4tl^`Q#4t~3MFe*i36mf=JuR(&Z&_cepYvxBkl@55LlFcYNG6DGADYYqY` zv*<+@Jm8uNCWJgzmMckQWM)*Y(&q38F_3f%;n5Y(Wk3%>u9&Q-K}ETet&&PBwlR>2 zWm>5gujg3Md4G_&;4DPjqqVv7ax!}ZcvL!IDfdmr#{XOyx1f6*U-Nc)^0o$jLsSm< zOH>L!KI+K`bM&+nq+91t2jF6I?17tqK;v&qEassz@eL9ln=_!d*#^%WcPwOt%3*b_ z_DvM_rHxx%8NFv`hn=mB6?eFUB09%mj%d-oQIx(z^NgLFgduiMKICaDbJ9dAQoTR` znpo1LnIG7LIti8=w-TUkBzP9w+G_4}%vyKnH#==4mrk1VtAR^yHZbLzjiBlN(IolF zdZ3e~IRwOsmTJ$HK261ExhLF75_T6?9nj@{l`hClexmX6vboS|Rg_Z2bBdXQEeIQl zokHOp5~B6L+jU10sr#2EDDid%MvE?;5%t^KMCENKNmdNK_S7IA0duEt(w4g4knjQn z$F#Nm7!dnsgGV1lhUp*ucW8Gm+I-rN)MFB!0$O{=Dv>Nn=QE#F@I)^k(b(;^PJ6PB zNK3UOKNO7`_KO^8`#Q9+Dr>+{nq2>&u-q=rC09$B0!kV}yyi4VeJm3tmWF65y%m!^HM%*zASP;5 zt!UStQIf<9o*+!z$30g>^#s8m?c^4b0#dm*;+~2+WtOg3$h^ zGD%d?e!+i-`;tIq+Tr+~4K&U%;-eKeZf*k0iK%CZsdqs#nE{o5L`lq|&&lTKRdW0l zGpP-&rxgAc0qB@Mn*$mfpXB)%hyuC674Z|2yt!)?iG7ZL$Cng=(Du8G>`NBd5Bg3T zNdT>`YW-ZAudU9C1u-2}J5R-HC}Dm>WM<|Ae14YU<>jiPRjtbVkqt^NtCI;OqPpix zl6q%(Lw~7jf9#O-bv>SSnY(Z0HmY#4d5P#Cjt52Hv@Kon)1=l|9_+SfQ{<2L#g%h@ zGNtHJ_ig-LPyNv-z+4|6%TddJl zO;&f8S9xV=kt=s8LARU^x}$3MpAVAD3ZfBCrWg~BLTc}ORtHTsP%#i_#E6ET2p7t` zBxdzNkw22KK}g#B?mKMBPgQ5>vZhDXUY1IbkOqOSU3J_(cEg3qmgBOX37I`@1C8O; z75#5*h<*;5Q$9}8$RN`;uK={xoqcF7m%j|CXtGuAE!IxTffz(! z!rL~}6&N@ke29#*oWWKoNzu*MT>%XEvJv_Aq2`@0EL%jy&T!c(11w{tY7VCc%*ln|z> z$yO~>9m|Z8DTr){#CEFTK_P5Ldov56wOqMm(uiJHJ!GfI^bny~2o-U>VfGvyOfJU! zLK6M?@YjtFt3f%vVSUt5sdnAh^OUJQKk*9qWWgKgiPm!H*gK>010P(B&-V7X8^U)w zZz9t!2A|-4dwcZPr2)V0pPX26_1tjmYjrb3&hQ(<$yg_KU{{CmlUpQ ztG1=pF)bL}JX>he0ncIrE7c)^tPn*{X@EDvCZgT){>j*T!;*(8nekc9J#hbyz6v|@ z_vDY?^UBzy!Y)g+QidsOQ9y&tNIHy)&5FTWKXOES($`S`yYU|?!;P5b?qB+LPham7 za2S=c0JPY9yC(8${}S_|3JKZ-%PCE$vJ&U@<^^4Sqt|O8$yE??SZ?nQI)Ex?Ta9fr z*oq{vqio0`Kh#fN&naF`dW9l8+JC#U#s-ub{=?nq23K*-q5A1b(W~tV4{?5C=#07| z>m}acDzA-fL{jL~0J=Q>TjeLP&0>QS#!{u{X0tp~gasMf5D<5}in{!v(e7ypadlbY zQ*!P%JsO*_rkgU3EDe!T_T=tSu%ZjSvZCo8i<%L0MlhOOQ z+GOh$DQspwq2H?Nr_3`J=j#10=Z7Fzez6pb-#Jxu-WEZhYknTAbTa&j8NUsYYDxAJ z+x!GD`dWAY`ZRWerZB^4?$_$ok!f^#jbEW(3*3rsNqMbImoNC5-0O)bCf0Cr=(nOt z2a5WF0BBqbuK$YVS~*)4uW30N{r>oG=k4J0M@YkefPiv9K6L>x!Opt{=^zDw^X~od ztJ!s7M@M^!B>!$^;l{uF7?h2{fvJyy&%e7O#@nh?V?}gAYm|%zDRK5ZgQ17Ds;-t| zR!`cK+oA94M#ArEDUh1YR`UA?`Hj|`3BIsJ4S|gxqGOd9GVQw(Y$D9NcrTesWyT(y zhcBhkvU2b>ZfbHmRKs$cAMd|)b{0+X033{TL1MmSbP(zt*il!u8 z3;-iU{Q4#M9!Sf6Tbe*|9TGKfNv)bCTqqYyw#*v#D&B$bn8)TURNnVMh z>`|;uh}o9sSacCdJ37Oi)`+J!*h78tucs#@WPd1zCUk`+x-14SE_=3&^&5EOy;N_L zvus^gE*o!flNGSKG!T95A0QXanBU|26Xkk849WVn#${k~_VdE3gn|OR4{5#z)4YA^ zZt1d@4Pl3%sqNfbnR0sW9Zj}i$L}!IW;;eqQu+Vur1hF2g`u1+Q%>A#$gqL&47s7Z zTv+q_%A60B<@0fy@4#m_$7$bH<~jePzDE~maagl8?^?YT)l^_m!05rQ@%NA3#+4yp zg5f&t=HL|IDYB@RLtw2Sg^awC0?tx%xSF+HU&qTCcHmIH^oH1KtS!+t{9^SvD!@u4zp4*6c>9W*hcR22McLI zn-hOq4%2Tb+nO!o<~q;`uscIPA=q%)ab&9uj;U*0~Od=;oU@+krK>; zjP0v<{+OtNGs8jp+2GIN+Rqhc9on|oF$eQ#c!EYsGd+q@_5JER+nhEo4jSQp&~mbJ z(2)ARc|J+5zh{yUG>)D$Q1ORA9r%Tdgwsv1ak1anC{f{pr(7O-bD_leb>D|CI_R}P z=~U&;smoC{WO>!%nzBYVqGdgjO6n>2M+s(kK+9nl$zaf!dDxF{t)G zfbeL6<{+G&{WQ&^g7p9Q$xD4G74Wsi>t0C1^A>^qOEY9;;uzClt8HdR z;4g$9*__x*Wp$EF3e1|c0Bkt0;!glFE%cxQp`=LwuoWo*08iu15&gT4n}Ic_-1w@F zT1(G5CoDy$#gQy)dK^>6lydtXgJKW5iYBhvlBoFBaBbMHnjv~*C6 zi*Wr4Ss}=`$7hhOinoFFY)~~_7HsCKAVu3eQ?@t>h*nocl@(jcy4}iL&)OxX7$C)$px)_dGw0uX64pgf@BqEI(>r{dqFinpSne;nI-glU+E)5)d-sW zw~$IUsN(>!cJQqFM0;6#+5yKdHm)))9YHg#SsL$?UtBoY$I5Sn<74Hf8;viRm<6}X z3sWP#X0%J)Qlk!rvS}8HM&inn)pfoCxdFMwAm-CWU#v|Oh5ifJ7mqp8RM*wdV-f&! zCGfDCUpv(Fi*1x)tzP?Q_Kfxzo*jEuU;gF!>-qGXt&8g5~be{PNVlW+Rhqf1CDG5Q`vJZc)sLbRt$Yuj# zFGVp06R+5CiKMI|7SPfr{+3Rigi8!j{nCK?5JI03Za9rywVaIGV(G;lG;(*;`5kvj ztrmxXRWd1-oaN3%tzT1ehfI3NnnOJS%v9d`UT0$QeYDLwAq;G$tG_)-i?TCWj{$-D zmM75mQMwHw?2IyLnRIQ4sT^@t-f@}gVBcdBN1*e`QTgjQLA<+Rg=gPnnGLu%We&rf z(q~+n-SdAD+Ce?4-Y5~6gM{4o@Wkd=Z!Z|45j=;6g2K)gLl%eCSSCd-a4`}a=y+mv zZu#-jE#W=+S9{IYfN7b(dBl<=h%dof7e#;CRFP=AxoVHAAh45MV8spfuL%aaZw-S*OGcrxa^CAQ=GUb1O_)V*AJQ6ltKn8Q>5vQsq^lhzl;52h&+H&C zI@lnBJ=jR=u|OJ#zC2Ng7&dic0ChzuBfiHV);tG`0c3-cyTLY(j@K|V=^gH(Z%M7v z7~feJyE$^D3A508v?i?Ca{OZ~UMsH`r{N5DG+3Y7}-?()%GZNU?D zKFj120;h0hy+NBX`og}A$sdfL3>e?%aA6#f+ z&#WQKsjTTLWE386dPHnRwvdMB=V63VLnhf^hp}8!NLRQc>~Y>lHvVe)GG>SV)X}!hrkq`O zrzRVqmFM=O$|bN?wqE7Z?ImxplgY0iF0xEV;4oGxU0J{u_fORgqF!X$Hzuk$UDyH0 zCi7EftJ`H89-#|bB(uBs;}J8GBswnJ{DM>b=TTR2s}C$u)^%~5Et-Z`J|M#C>D4rJ z4r8$HHM-`qu_Lf?*g|gj9cROV)?lHyj2!tOp@?q6$>%)#OuXtV^KaLA4 z37>Q8nxc0Z7k+ZVpHRI0E~mb^1CU3DBk#$le5sant#RWi@8pFn;U-@UtK*wivN6aT z`A!EKpEdq3#=&Re_|25Cg$f2{LiR+0R!rOq0J^{$&puf`?u(puPa?k7X{)O?Mt_1gqqR!R%`WXzx&>`Lc@C7Agg>J+V8N6UI-D32dU^A z2A2B)1aaB+$f(m7fSd0US3irx|2dO61sP`;!IJ19@Oo`&kvW1$rB_1xdz1znH`b4P zbe9{y0e*SC_6d;gD!zMGFKHvB31^FTJ+O^!r3t%B8vEu&NH)DGP$*$-wGc}k5R51a;5^e7d1H){=4ZVqmtJ0`! zd17 zOkO*jwD~&kP7e1NaQh2xuFMUy=r$loIe)tiUz8@7Z_+tJK)5oUJBWt1MMX=O-18AX z?^VlkRa(!aur+~Jc4M)BVrHwAeQTCzp}Ox%7^7*MeiKG3`ys0-@4=|7Wn9UDrn=F5 z7lL&kJ3z1-v7i=koj-Yf9SuxGEi*6__KaeoXrgbR6Ut^COLS_U3wZ0en7GAAd(KHF zItDY}uzFsV!qv5G*xmCc{CyYcMib-KnC7 zs8)>G;Z^WQ%8jzPBYR))d<5MFslKt@`|hiISv{!h-}r43^_J6%j4^o-1rT-f{SmGC zk|EQRkTd4NU!`UX?#f(A5GG1PL^QeHII+*;-z0;ddJnkA(Q@Mg7B|_6cGPcyOtG| zs0^Z^)4BVBUB+IOOjb1 zKP&EuFYwic5{*d6QSC-=+h z{%Len#T@4@(D6S&L#@@6Hli2?Z3o4|1mbI(G$_bCQIk{A60ob@a8CNw|1}D6!Y%3O z9ZCxSx>pI~fDCKsX|v4VF@ ztGbQkBRX@+$UOFjt_Y{y=6#k2fJuAV$j!EW49UOub?g<^8O;-ZawLKcI-En9p0BMh zoc=T)xn+U#6(im>-}=$yb5~5`j@Y^<=4}Xud_i1^knZ&1yK0_nW`+E;WuBEdb z;dfmdL!A?YFUd}fPl^?kkkF)b#BeQxc(kM!+b2OZ%y@ORw{5MQ14oCx7Y)>^ixqC~ zeYe8h>*9H`!%<9+S2Rdt@O4tmD4vk|z+OA4JSR^4wPSE5rjCa{zHLCyVSWV8o4FJz zZ?e}##jb%*r$n)eZ_|DVHOcRC3FJ{xlIe;X9)+D3+A3QR%SfCVezJ-#gU}EZm%%zi zK2bu;ty-9+QSew6o`(AkgvQ{Bh28b9#;+K+a4(M7?SRj$4VkCcM1BAV3e7RvW?Ib> z9x!HQTd5TII+HgyjmdjC0|H}SwNN#Gu85uy~N3ys|h8`j(0;z#~2I>7%Yo5j%6W{8msDLfjQ_k0oKIOzaTgGBLzI2#^j(tmG3>3BLtoz)mT=pPi@52 zc>|X)YyCv1?~bxwVJ5brxL2(*KuHgJ54uS}pyMJyx}kg^gmAKBuSTlX+o}0eq(s`? z)-HSEvDE@RKe;+7MhJ0_5;S#fK%>rnkUy%%EcBU8R5U=gjP5PSWn}P{TnB zr?%&O{qW#bPHyI+d@eBTk6506XWQv=!*G1lfJ}^$_H9R*8zoT&;yf3Gl$TYPgYrS( zWVz}82iia-zXsh$x4zNhyRer_bv{q0q-}fQYov7dUPBus(fZQN@T(zE4{LGHY60N~ zjtNuGf(g|c>K0rQ{{R8Mw%((pN$)Ow`C9s)z2n@s;kWR!vG!)!!Cg37@0hn%yq4|E zkuxJYrz;wTA3*28V9Yu9^QvVpF}d{e{l3~M)Uuj zZ%tR}R8j(;{y%f?z#I=BZ9+ggh>Iw{pY!$rXvy$xOgW8ISB*IEJ9rKJ&mZ>u{ArIS$jM5<04P22<_jI?S_>sWHU;!R;eS+cG3p%UyuO72yZ zIVLoV@h|Wd@fGm*l0np6wPFY%82ZbQs!hsPtlWl4rK?iqX(y|D}6NpGvLKNBRrDmHftxj&VxFj(u zB-w+rh}*~DThKff=-<1WeD4jfTPXW86#D0)Bo)g@OtltLKv^WQ+?=6}+dy|aYx;2E=`*L|*tK06RJvg*O)S}n z1&qTpyeV+bd$e2w(^rpK5WtY4Bx%ZuzSHe(eeFDm<@6O?65#%D3dou%<#6jAXNO);&VO;0N_Q7S~$B{OBH zxHA$QK2>G#u*v~9CN)RZzN^>y{H_-xkQH;#(q@jNklQFCp14TXDZ&`kh~^Bf>U(p+ z?&Df#hInTd;~Yv(JBas{W4;So=6Ig``E;<h=cQ{<=mX$>j65^0H(s%M(dbf?ev~pWNcd)Bc7J~LCClp3m;AHysFotjW0)(U4sFzZB0cWS)B9G{=xfn} zDp9gW1&-ERJ*uihf*5ioKg1bHUnA)Nak_YS1*oN^l{T?%umH#iV8QIp-+dazOHL_L z1gYy<3ak`v#DLmg%dBII2*e&(AG#2Kgp~t_C!Z&ux7*xnFdig^O^I(|YkE2JV)3Et-CciZCeLABWrbF%=b2`pRaR`JQ?pAmbHC&&2e>q?1=V3}sbY<{dyG4i}s z)1rL!cWIEauk{H^i|+f6SJ~p+yOOApw5=~FiqeA-??M1)F02c3y{bxi?IYWsLDl&* z|&7 z3CSp85fItkbNGD!J;v98@;u(6m)J>IhWUfZM7)Qs{vGVq1 zq?t^M**)?>43bCx08cM$VN%NX{{Rqga5a0Tq?sWgFU6TjP)Rmmba8RBznLu?--UPv zUkz}+Cr^n;rxg<zXYSQ1Yla~B`GUPH6_Vn7zPb{`d`j9&u0CeBNWD~u^RLw;nHeW zCCg^-ic7Ep;9bi%{{RvUYR5mlcUI^DbJ@$;Bw1Qzj>MqGvN0sEL{wgU`0>CT0mmLD zftKH2zl3!a1`&qgfQlM9G|m|01<2}DN%LllUz6w0zrX9oyBmkhLE0d)2lwM&Cp%$B3UMcB*o2B%GXDS(-_1n%w2M^9OPChLX(ek-&HO$dLOAcW zI+}_~`LpSO67wp>Kr2)EnXVh*9X_FTcW`wdC5p&RJllHyGQ5;#uT0NYu#u1Nq%niq z?0f)Z?*oC_2i2?Q1HcqiH2(nh!le{vOR;Nf&yePA9IeG%MTp`vWy{Q@cS~ge%ma(u z^xXdWD{t(~p7g`kx0o%AdQeGD#lH1>79)x?I@@^pl0vE_?qyUCLjM2{kadLd1~(^$ zaS3v1rRVP^gyaYy2Ub!V+iBMQV&{io6!@hOsi|0Gr!iH9#mU{g{t+E>Y$)ZgP=?HR zVXb7tUgTk+TLB0K+Bth<95-?h%A}R#eJ7o6j0i5f4X3II$f<}5Qb@Q5-L=!!FzV#f z%Bh)4O*LxDqJYHmzSb{e%icS@AWr@`a75&;5-kx)OGZu zCRvWG)**rGe!jaOS~__(5*D?pT7hGgZ*#rw{*AXdzwy0JwYBNb14tSYjVyhLuF_75 z%81f3mh`!E;aGQY2qbydCsDyE#O9#0N_J2t-5<}$G+(f~nTRV?auCwq?3!;d`QFz(Orx^f;`4qJoQf?Ytso?B9D6Le9ngMW<0}$`f{CBb5J(Vy>sg*TrQe|fU0476q2M?em^0Qy0 zd5ru^>2*gH)@mcDeNB(4k>0|LmJZ6TIOM@&?B2+t;pc*#T824bWRgy0i1L#smgMWQ zdmX02s4y_BKMO_~Ig*+E*Ljw8gDmG-!{25*Ge^=M#_RCvt{0}6MJ;kpRHbTPct-k80E)BH>3Hzr*&GGpkWNX*NP$JxlvrdeJ%keF)8+uY!j zz#naF&LiS{D~ITpiDGp~ks?V(b<8XVbtYO%{{ZBz29Fyr62)X;=}M-|2`VTFNm4;2 zO9`@dKcmKQ=%2wi#5Y^FS5P*t#@LoDWja_VpL(uN#Y{{%w1YQS0O=h08#N%iVQ$`l zrAm)InQIrd9Bo+|QH${xYCJK+I7FOoyAr2GC0vBgkfbau%?nO+ge4h?H)Crn^IQ=W zSg#UNVYFX}QiR161q2|B*A52ke8}>&cRnS)6uPtU#kgthUZP-KCdlD&d3sqqRBd+; zVmz$~FQr(o*K0Li{aEG^TdcDgV468mN!Qc{V91rrWc*r_Dt zP)nrcac#!&Yrwd=ix@~DLQ3~S<|8$U1nbX3zMOaB1eXK?cDBBb`$tf>BhSCv{eRQ_`^16@NdEw1 zyJ>xX{Gu{R);_`Dat}NJIPf_G?0NqH!Th}W)Co~ZF8q%F06w1xg8MUW@4e4MtbWBS zM&0P`8;@p2Dt)-)o`2W$)CKO=e}n$eQWB(;kO($n0Sry;_WQS)Qt)A%z3u4@7z8Fv*JJ*yVK)iq$cM;B`L%yC)K(>$eS+%{Q>4(TUyXAlqn}_@T zK9PTK-Rjk;*^wq6Ib~$8SwZ@tg@~+PZGYZsR2oC%g3rf@!`N&!TbFnbNBqUSTZom8G{pknhShl zIg;ed7nNg8pAUEhxHC;riI(d~7<_U6TB&uT7UNYDK zdcNV>l(ydO#97(7`lCN*v`w>ebaIpBClbMBs>gD)sf$&li@}goXlbJ$nvYerqANk+%mIc^6YMf)W&KjUl{517dfZ4mWtMhVl2P@sK z50|Nbh2Fm4_Fb%g%(0M*Eq4_{mP2x7X%>eYw;{-uix*PUT!yrk9(SvBNY$26U108? z(pOIFR|kJkyeq|X`<3Rx)iR*Sru4)(o-W3_WB47Jis zK?Evmr3ET*msuqWJ{Itj+qSW?iXRo!a+w<#ow=5UUlRps+T)np6KYE`2+|2`_a`c$ z33RI?O&XG2Pbf8v_R;-LaNlKT2tsDa#AjC2$xP{TSKs8NRMetkiIP+jthw@s3_vVW zZ#SFTi!^xj6Vv#7!Dho+L12ZQxvyhhnADm|{mi%V)%X-6F8yOeI-Am1e$Sufb ztqp9nN!(-avXMW_$gLYgXMIqr9cot1&F?&+&4Gaix|#Qw5S4- z2s&R^(YePcj*HAEFv8Xcgq4v?sQdP@9zNay^W&ekyAuv)?R;1vnkt^Wvq{3L zD3;yHtlN=PDC~{rF{dn1L1rcjdRY{b#Hx8ol!2g2{7EFAQxy1K4~Ag0!jlW4lSNJr zZthA*1oI@4qI}71qbPIFwx`9)8bTUE8bTUE8bTUE8bTUEKMCLyq#_agKzwU-x1<=X z)jC;Z&u-c;@}m(}8$lzIP8F>+$zq0#_3L+fl8W~vcL>qO8UCGV&N}U%gg8cv6-2B` zn-Rva_mr$Enr3Q*&rkt5$;m7tSx{WSCT7YA0IR}vuNQWk!`wAXT9H{QrxB7?lv2Whi{g%Ce>e0P*E};$z|#_UYZ(ZPyk~&5PNbgtng>J)0sk!kY210P#*J z(G`+9#Q|mSAhR@dCMv znKEfPNpvMF$w&l{7y)Kusd8@t_fF@@Zmgr_^3n+|v@t;5pY~vdBaS2zFb>6IL{QF4 zw;Zu7Pdl53xNet*s8K^kfTRl&p_r8ia>n8B=@=bNRQjpi{KPVkgcOZKFtD}y?9pvB zfB=6Z@9ua%X6M}d4o5zHxaU`eNm#m2NIKg?`iAoKv{ETENh=|(Z{&P_5kk3=uLPpA zO&ZM0+^SoiRQ~{Ji08NM{6r5U@;bOkro;DMvupKYoq8 zT6tWqDK5o~mnBKA-AS6Q2&}<9iwjv*q?sZ{auzWtB+J+dCysbvFVt8~M}ldVpp$x_@Z%%hwoHf-im%l`oO{cjfjLfY5hupwDWGxA9-=kp~&_Wb}H@y4=x zbQK^=xB;Hl+o$d{c&3mMIl{a-3)nXM+wShx5pP+{!(N>4E0`mS37xDJ?)0Mw{{X*e z)H_Jgd0?uK+!PMdPa40`QKf1~Nbd`RZEm9f03T?rrIFs&$U;zNrBh{v2iT}}U@x=$ zqeWd-_=f8C)7;%j*!yF5-rXBlHI|Z9&R3&w$yPa~OA=L~EvhhCrfPE4B}l|Dmq#&| zLauhc8-q}$o>b*ZNSIOLPPcBSR?u2Lg>a8&JRwzu;JA{gBqFF&N$(OA4t3-KN?0mY z$#QMXMbpx6i`>>TYSY@8J-fMgEt)e)U2>5%DJOdF%p*%JZtPMsK!7I-8~_MnPa3XP z2SGMuxr-_XKrGF(fp%^D^okB9>^~31@pWPpRT61Z78WHW6o*L%!V-c8ogU2*o2r{H zac-USp4^yCzmgct-MMlx31Wg16xQT7UU~l{Z~TLCk-k} z88|UYSx=S2l{b0iI`g}e>kxwZSke3XEgpx^eNdq#8+^FVzJ*{E|V zj1Ot4I5GNY@v4#Y$Bp}GUA;5aywk& zp;E_8o8MDLe#a+ImuZhlB1dYTR$$G<5Kp4##MN=V$_2$_lnJx z2g7^l2(|TXt-M95_ae3Gi!I-7L(i=s;xpb)dJ>?pb@ygv2g>loj~xL|OWjNE&C&*e zv;(cR{j`l+KfaifW=m|>B;4Ok;^o`jUENzx9hk&LXXa_v}IIWBZ}70%b6~cdi%vB1*IkPC=vrE>_Ft!V$l?;d|-6zbahs4 zoNZY0xq&k>V>b;r;gb<&TCxbQq~4^0>~TW!!RbvcWQEn(w4MgC+Fk;x6v&@A32Sij z&*l&LNduc5%QuOOMsr)()-i@~(8uc+E=Vk+Ax@dV6p{%_k^pu#2Q%s;HU9w4xe9VL zkXaFVEL6oDh*+N-q=?DK)pP|P{y zI(u?${G(DGZTMU2uKMb&jnccBx5W+Pn}u^7k=2JKx3PtxStj{=u~Wv^iWp3d95{!O zospVI`=9FD7-Y#fRv9Lal0s6HBPmcw1Sk@$$|v&@;UeVN4pG`17ut^t&~VBq=1G%> zR8Uh9EeJ^ha?+BdzEDb(r7$_{%=R(0kAqF0uznY6#|165O73Oxm&I-7Xw{cV%w=o#FC%ee1UIm~*-} zWej#ddL|WN$5qZrT+y9p$klU7A(V;=Q8kNk!`orvm5qsKlzN<(#QX)usKe>7YO0F5 zs!TupQajbEU_wKm2vJp>{9U6CtnDiu!~#Vb+|* zIX4#Ft+_7Yr^wvel``)wjD9@Zc_6m7;k6;BI;3`7(-SPgLZKam4N>ssCB^&&O*T~| z`SK;PD#_uq65)XL^U^J{c-Iu7s7j<&&qT$BVKsl6z`j*%cJy|INueio@p^!!#NYuf|E92Dg;;rzx=a4 ztXe$% zZTaZfGc#qc+LCsarDx>G$M_fj0BXiQ!}0)o@_n^1grpm}ec0RfzTRNg^0>q90Cs2f?U=r2@u>gFw({F@KLKIoU z`s?-kzc{<&v$po^(nW_vziKGJISJz?yGiu-`UxoY0aM83qELA8$l&T;R1=gC{HAdr^2Jz*SBcf$VwX>`2#&LO?9U zwZK0UZBMjI{7EHA2`B)zz-mw3`ra#>FQolO>Uz&-Y3=)YDv(IA*}3ahg1mA$f2Y39 zhSQY{k{m!;biG(_37b{ri082KF z5~F5(5@sx6b93)3loax^QeW6LX}h?sbFxz*$daC!Kw1vb1Iz6va34=G(}c{GKQg>U zEQcnE7xja!d)+kw!j0)Q@~Atq~p0+VI{xMO4Q)-d;6J|FA!gKyw+Eta=` zC6>KvqrvG$Dk4{k2M-&BSxd2*febj{56{gSXG#iOqP)`pcLKrOGY}8L{X}G4o_2^h z68iyYh)Zhbax zJ5N5^C1_KGs1i9JxAcvVCbp8IV&zDwlQv|(`;z6%N|ulOk_(Smlm7q+e-GHszi;~( zZ017cJT?}ZVz7^_``@QIDZaRO*D@F zW}s{?&#t3iC*Ckl(YVok9fl&NOrW{5)sYHNB`9~fItFOnFHXM@t`rD*5_iJIgHR&$TBKs z%3PTySQ&pXAd(BQ39)N!yl6_!+fj+&c#aufw5qOjrAwBnVJXz^JR-m|fZ*+{Zznaq z@q2G86}pt_&9<$#%uiq!d-7CvQXppYD25Qg7y||SDiAfrv+!I;8k+y6LE?B~z9qW^$a~7QOCo`d;lBwvOi7xVm=p^q#!+tpK&sIqI+T@~vZRk9f(tV<2JHd4 z@->X(JPVBCGeuSU$H;XDNV3uKy?m9bRN}Pvo*c$&o4B#f zYn?WX!GhjdY(2KhP{=#mYI9qHKK@17#dmtpOs~X5SSuU{zlCt!vY$+yU@773MpivT zP`bJFAA_lrF*9T-QkN^bT4p6u){yB~Qxq%^QlJZlaetx2acXJEsHR-iCTi0#y6gy0 zHYH!0)-DJnIcV7j!jHx;RIY6|#O};x3_Xl$&T;g!7TU#`snbV)LcG+awyh-T6MU+< zWmb6MW{pAZ`iH_F)sG8dGPrYwF9XDDUhD!?QzF&8ts}KN>z0%s>)TSbv)@vn=WEW$h1sIT2wV~ zq>x6M>lv0k+BONpw8@=UJS}r2nUbW-Wi4zo0L}m|*SEmNs=gP$9bT*M--=qb`m<{t zkLc#&w>3kJ`5MbxZdbK-4$E;`r(ERh3UD)|DrqBDO9P-%l;)x$Yzy{>Cpu@7uBliIL860p-pkGlPL}o zw314Yvd)$Xa?f%BPgu@fE$fd?I>BDt+aGaO%RLqgVgn7jR+Ud2N-AHkR?9+3?e&%; z7jW(F?&J)+hj^=pxU!7b@a8Q^i&6z-RO(5Yf>gk=l0sB0LApkRaE)dcX(h$-R(h$-R(h=9NrP0taQpi*v$dE@m7#$3Gg819$ zuI%Zq<;mO3Pb}1I8`p7Gta`FTax{6YJ$Sp7qYVUHJ4WoJ2KqaJ*U8S>yBgx&5aaw) zi79j9I6o3%LZnOzR%&%jQbfr)OMnoR=MUlW7Q2DL9jovLCxc?xjv}ekCR0{Tom7P= zsJITbOGvs#M29y7zK4Af?+wK!S0iRwnknTH%NUU(NPtM)lrL(iuhk9 zB&sYjl3rZ1jI`#n7dJ1X-L6llj&LcOiNSaP$cvk0O)L(0J<-V7WnaR`+u`I$%1NVyn95_-w zucVNyz05y|{zu==s^rO9P!hC;ZCD*QC5K0+$T-WAY!SPlQlZ z)M7NrlTK=snTh`Z8WQdtHIM7dlxVkX<@&2v;@W&qPx^sdDKwcobTN5c{hOEY@#5`B zt0iR_7$@|fvC68$12UdHK2Eo7WhEUp2C0IMS(;=VnI)q$sRVe70tKI&X?vdGorPeB zv(zgP!ILIzx;&*RlPxoQ;)l#i1E>ScxV^WH)n1&`SgK@$S)5mdSa`@0J-3ZSuBdqJ zh{y>-6dv9OX!h2$rkII=V4zD;aMp2uCgxe&ouh^^Ts#ZXg>Ui&)h9vhm2f@uLms2`g z48$T`g~c=fuA2 z##>Fd(@eD-lq4#yT9gn~2;83GA~fy__VL2TdFPYB<5ijavvrc4ZRw@m{`$q~Cl5sL z{lwMHm;V6x(n)97oUGn$7#FEOhrh(fN^rAgH&;q>c@IKh=H)mbnuI;4xXaq@9HcMt z5+L!%kIO{Vc*qLMorxE_>O-@iJtFf@Ps6oJWL8b4S<}5i0mHMaGJ`#bE5A5_yX&Aw z(UiU@Lo`buu@rF3h9_WM)s>=1fkMZS7=!Kh<5R^ze5Ghm=w0`|U_SKHD64R#(1@XY{T#0 zmgXoCDs%XfKwjnP;5hYY8l@xl9p_T!&@Lo!kdOsR=d z(4bO5W;P(|bogJ%;Zg#E(KRVjR0zs$;n;!*xI4e;9+AHWj-qudH4gUAbt`)AET2kf zQmeG6Qp#oS-AsN?e$zs1e70=7*s7H4P_qdb^#(rlj3;lE;t%R?1;T=8D}+>inhKW)p%^)kv33E=?rqpss#I_h146e5nB>-Q338dW*Io z+VEV@@>ILsBq==eeQg9O84WzHXMW&XFnnuE@E#*nq)Ni7Y1K4kd98rD3MK2nUX3y*U zEwLExY0X@k(owl4pnvr&3#GU+lC>c(?Lfw8pI^giT&DU zC?U&$2_-MD==wZs-+ulg8*c|+Jq{acW#zq$5?4pg#wkHO>dg#sFiC7TW6V6VBa*Be zkG8hn2id(2UiPI!z@X#9moWggzfH_^p9=o~Pm}7^B6U_HDRk<}vXnBEQtoWPJ8b&K zton`dchz0b)SA6V!{wiBU#D6+bg>w#63dUPQC!T`tOr`_kdA32l4J8Qp4OMJ0;mqQ zB)%BIrBfnGWXT~ZAz7uQxFEIeTYiP~aAT%$KFhdw48mq%E1;E8CS#K}WTXj&MIj|w zfl*5~#qD^PyHoKoxhC8cw@%!dGpkiAOOeM)l0z)YQ6A$&?OkG#z-IKLQixlHZU(W* zycLHUp*CE!p#+SL{pr*I9*K)Zl!?pzDB zKyIk@lc*g--}!8YFL3S&Uq0Wn>Lputp}S^_x~zEi5KY|BI7p0@`?42Ryc2`)#up-0MLdEj6FYB9>6?HDy#{0y>Evn=II)r)A7#A~nb60C0wWxMj>>SSxr zlCGib(~40v&<89K%AlUmN-$90SSa2X#;ud7X30{sO97bP#9zysx67W7{=P^A+)Y|YN{2T#)9dFPSxw(dYSNcCm$Mx@G?C@EVtG9{=-y-EV)od3`)%+1SA(h3e%=NhFtUdcMEJV1p#psDz1Gi8f$v8)5Ulg8Ss`M)b>6PRMUqiZzub z0&BOQJja*tWfXg2%Unm46p zcZHnRNa|MNH3naybsO}8PL<=Ra0r`s%uhKq|KOwwsEdk&=Ni4Z1!vT%wLHp zY3b?}EO)3&B4Gx^Sb#(NzsfvM^+$VVx|hFqPW#VFC(G_^y}Y&Qf-GwC*pQM|H2NbQ zp?-w{RP)C?ldq;YE*~!m!)Yibg{c(s5iBW0sY=S0btsZlNqdu|eI25a_-aWK48=?` z<)jrWQdl|E-=2|=f|5x5wR2bjh_&zhd&NVKf6?RfQsdZZ2<3tou`F=L3p|V!K=&l- zBquU}`)EXqz~WaEhDy>YK?(~NWKcPhb_B33$YxYL_HuoXf#+57&;S;&Hx5JF)+>-g zC9n88ansCoc#~5$M3!9Hh?kj6GnNHPIs?tU{mf>rfb==~*O<)XY-h#> z95n-aSsD{HiMwWkFG?hS6QAF5dz}dF{Iw+tm6=JIHAr3Zqz5E|0ekK57zZA4w7v?X zkydDpXW|J72~Y_DKZvpREOqzV=SwyYKc#y*hR(#TD3Mp#3>Dnb}lkJtmus( z>{dhRj`r>DU=$B}d4WqRmi`cIdYhLz`VUT=i^jZMzBR-}eA#B5GSik^pi3kM02XI! zYqVu7Wm@?=^=RC*^r~B(A+c^W4r|8bD@YWT=d>Tp6UW?Yil(sPl(VLOY=|ws#P4^1 zJw>k3fK5^t-XTKmXFhia*8P0mG8XRmYw7P$Fx2t;{-pU@*sE`9lSed^EFqO*21y>Q zfQ67oqp&mb0pxkspP$CFRX(IjaHY7oVhAKy>KfV|OikhLKF2sEQ>5W&?$c46DQaTC zh6R)kn|qKpqjj(0-)D7yW$n(C(tu%N#)!Y=>(psOC+w80q3kqqmWfKwUO9bING$Tn zF9t?#V4~xxxOQokiCWyVJCHTApO?lm4h`)?65+gRqZq51GN@G*s&u7Hm=dzLARVlx zBF$@TVg~g4;lHh444)AjGaa(B^IJ{LEZwv4tLsUAYc_xY%l0+^d zbztFmz1DFy4aB%)@rIay?k^{29vIQ)} z9uSTQ8pgPvv%kmF)Ktim-OWHz2}oxQTjKoBURP9cT#7}}QM<50CMca0=K7ltiM{ue zHhRgm`dNX=@2$(1{hfl_c}wh0Doj3|3-6IzER6meOHfw&==wP4tZsY<7MCqDW=on%W+g5`-1&9aOL^bT*81yJ zezf&3d+r){uHD98+?$IjjaxA#SBj17#fvl+^e{;*U$qTsV{4KIo0}fHdh&+vZ@2sr zPg8_&z6FKUESa(}>KRi;b1;R=rUQ*B%ceNbR2^Y zL9{#+vNP3^2`bMdGeTGEjo3P}k;e_-_~7z+{Jqb%VOVoh%|?MpPz`NIo};GzajYk) zQqop}QoRzONFdpbukFe*+=BSa%W({acpk85`~X(&l$RW0O$;GJ=%C4=Hqsn*Vq8 zRe~~ErB@$5MrhsK6Bli`>q!$t$n47`WcE9U>rd!{uOFe}+;az1ZqE3^FPvO}zLQzRANSLxoD_qpE2}=QFl!tOzO1D2d znTIl2l2`<->rW>>so?jtjQ!30MT;H^kJNu$R|%w=sWKd8BqZtiT(`jO>ITP<#XQ;Q zOvI#xOjCe$@u}}_XXEP<`{C!{Pp%&l5MweMb8pzGh{<1;kjq_3V8=$a;hsjjV$3mE zFwyUYYI@Hf{{WRNQ1*bq*KXi^BVAsZWir%KqTS@cQip88PMpLh*<68g01>;mFR_jw z;u$nmff;FPCNh??Qk0>TN|ZoXfyPj#F0}vv1>gwS81C_(%tcdeA(oSL6O`G54^0XCG1hcz71(N3RVmXpPx&OT zMj$#!E;KEFOBl)c`d9I|+-@@dX71$3R;Y_>HG8?P+bmdvAbBQcSVMXtnp4~(Jb~@4 zV_2Gbb!$RAH#WAa-S$6SBaFE2f}MOpSc*DxCRmzmkN{I_a5@dN&_-P7=fk&A{vo$Q z?OmCN%3F1g%=c{6hDNVWIIBCyHJO)>L0yDV$Kkn=(Lv>guCz33o>anPl#rE60YoS! z$}UBnoZH`52=HbjjpA~}JuzIVM?BT6C?PpUW>T`NR?5uoV{jh|pNA(`sP77Ryynfj zTMCgwUhY#rjU{f+5r{Ok;$Y~#s*bT-^?TkhpvJzT?aZhszXFL}(k9hatsqRDP>&3! z0Fn}Oc+$Zjy}vb`hf3fWKM@L~(aoeq9vdMcDN;cH04Yfh!IBw)IkhBd9*X)=@ME`j zRXXuOO0*3WDmacLU*51*u*8KQT#_$wJaT==`|Bf@B4g*9F-lsPsYH@Zyx*beqekD6 zN8KalK1hK-T>J z{&t9`*Y2A1dQ2Uvn}P6&UgUE`19~#aDuoKceI=Gb*klBdyq9F%6!Ops%`s_Cmo0JD-A0k;^Wu}> zTci2SubH1|Q;DOQEy)z{A5s$hKX?ey#OuQ;1xN%CPoK574dR?P8m5+P+p9{-y7mw3fWhN6Yj3p^@Wuzo2t{64-zgv25aHYfPFXjd3dH%EhOd z*#o6Z%%Us{5=l4T-XiTTnBcbuPp)q*owy;tlEBEH^CsB3s|G@)DQ%@lZ_@3Sb8oVw|Khy$0a4zbbVwKC>}1`IqUtmE(M(QNI!_3oZ;EEeje zU4Loq>q~01HlYt0lt$5>i+}YBw3*|D+ zh4*vQt{3SyUw02p_uf-`-1&_4oXm5aoK|i~l3lSUccnDe3QWw=7j8R{o(}+zeM3`E z-^)-3{EG9r&^dFzL$k(R#rzRT#MmUf9R)Orl*yGcS_4W$1lbM&&=zqM6MlR}?5?qN zD|9YX8+Q+v$IVsZrxe0k^4;}gdNN07`Y#|`J%(t^3KB8MX6_(b5k-=6JJhKysYSz= zU`v2TpLUHU!#$beyc5SbdZ{Ohx>|s+xug(NB}r0d1t-K3N^>`i-rXY2_z&t`9gVnm z=1Xy8l76T1VWoqVE0?~lZ564q$rI#aZ+=P%7+(e7qcRb?ff zIAem8NqAWfr5m`*5gy&Va&>7}4aqXpsp>AohfRSmvzz>WFR3t}>CTp&6RNS=*>Vz@ zWob$PQx*l6n4yRS>B+Q-_tUM{oX+h%y_DRkTOFRvQLog zJ`@J$$>0;M@mwDzSw%R^o`XYY4{-POW*pBp@uv;Q!m*0VOf_bvof8z4ts_*05Hu?I z*m=c+@d4HCgS7T$O9#{nlrz}Oq^XUc2PCmeBeSfcJF`@Ebp{nzRhN>;Wnerut~eJA z)-J9^Nkqh{69R<5{s8XpW)952Y3UhK`>wB{>$(B~Ywr)*h&5geR@<0v0NZJQTx+k`Jc(UE0>tDT;$2AOm*76Q@;k;*7!4#4t3%O)Z*OGY8w0wISo@r%G6cQ=t zWy-@xPX7P0_~b5gAo4Ja+#>fV|u;K{)%c?6Dr z+-j85qHNCfR#DZg9N(k-T>Fi0jd4E^V{+wFVioghB-4zt(y59eg@ZXe0p;x%$dvwm z!^a=sKTdx?Zy%<;n=iY%ZsTEZ*M0niV`fyvp8;T-9$!o9;9fh`=4Eie@Kld;&p)Tz z`@a1Bj~b9Eq*O~>x|D*2hAsiupF3aa6w2@@C?qJP?$;LRePb(A-JEsG-%Y)3x+K2+N_UE2?)&*O^^dE^-$uh=OIXOZ`?{@g@bA6)Ci6U)NEhx#8 zQLzJ`=;6mrE-k;*nXvY)*pjq%n#;lxcYfJv#T!T5DvrkNGb)vpm6wqtjt?hT#y7$; zr%NJI1A(9n>|Ke!*o!RHttd)Vgpw2iEX9FtTl`$T<11%6jkof3Yc?)xOL|D-`L>c3 zL`Z^%deO`!W?}0p$Vo)x^T_8|T#gl=DQZh82`1@CU`4E2+JH@>bcHQaS1w9bFF9qD z7T-nyeC_1XO{u@}-M~ui3h+6aL&QngygSP0k8T^m9C7EL9%j^V!jcq}Co+xL4RvAm zZdxXPYLzlrmn0R`D$J1NsNI7Xzp3RJ10T5X_c3v2E9Gd-jxu_GJq@IKq_-qQCHhBt z_VL&U+gQx(9=e>A_bbRBWqg^RUoS|6@ZKpPnQNw)l_+NkQWcnvVcNy@i^CUtg0?F; zMp9i_PoCkyRz@5jet7mg`*JmpS5vNI(^Sg~Li{R80NC3?FYE1hjDBp1)YXYos#Mu( z2aWCTr}buvQ&Z*4w58@=}z;>>D&45}HEW-dwt2T|M&t~4Ckx>#9==hO|)XasZj z;E}-KAL1O5@Ap1Nt}9W@c`5uR;DSDVe9u_df>515yqe^tS)B8JXL}pF#HHC)b~f|b zIsKDsAA2oVBabV&j^M76)3QrMB8r@kB;5K3&@hh0^!+7e{s1-3{98?k;5=7Jj!<|e z8R#fXyJu3oHvykp#`EEvLQnSTQgZ~%K>X)WN1xNp<5eAS=?6@@H@cs1boD|>Dhs(< z(OwtLX{`mZ3QcdaCXIqfeJZ9hB8(Id!g=}&*rjei#28X*akQvqLIkk4{Hy>MVWVl! z$|lvT5U!~?>P*QJ!vM#^5Y6;)CfT~rv1fL#HJoDi5@5i|F=Yh|S)mPo5 zF&aNxwf)b(m@#XqQc$3W3ckGd^B;X8PxL!;(Av2xHZCn%R+=Shxmoz`tm)i+H3dNi zVppCY`#kV`Yo7aGz$@`=Jt@P{GGKBSEdYR_U`2_s7Ju3-H4>JI-S%=4?!!cbsrwsk zXOYd-Jg(v0*cuhHR&C*{~|m+F8UkkyjiEzRnDo*l2;`bvw&D{vHd)RZ13TRsrj9sy=&gXO5ZeL znNsw2njhJ=)+wGDjA*VLD>514EP`meqh?Uu!ZUr2+ki2tzP}XFXL$!}_Y$HqIc?`_-!24Uls%j_X!m%tp z#ced{hzLMKI?}RJo>@r*uPSn@GFsQ~ehuS!IOZKHb$r=UY6?$yTp<~gs7t87g_KRM z;0TvHcYJOA%^XtV?s|(fZ8c_*8WU;+ki=})NjqecxnMY#uqy^q304PN`v&4X6NqHY zpMzpln2ecIW=LGxNywcnO_H-NYBM04p=l{sA_#cSsjI1K#%g)UnhUW>1=z77`x|>j z>aE;AyZL$iwR?)V`qB{65YiCR5I%fq2=bBmy8J(G+8tELZ@Rc#jQEYkk(T{SH7Q8= z>3f)o7E8!pHN6*V&pd3Z=@UlXovaa2~_k;uDUblu)T^ zlavIJxNk1J`hw3Mh<^tE0EsLX%%_^!d*1p(OD1BI7Ousm89?zQXrZ5g8o8AMh5DI; zcLF$V-NAF^Q}?n}GdP+?LA#}x>cYT*Vfyx`V7x~l;mgA+=cG&u{{RY9uN#A40Xk&` zy$LbtR-91hE3N3eg-dX(kfVp#QB)8A3f**(9FzU)i-lhlddymR&Ul;n^}K2JRJ$U3K;OD<*Bgs8rxIOO_k z=gQ+qp{T>;&V2Hvs5{*uSJPAVJs3u;y0`dso!KS0upcdrigaaIW~1vW_jdvyWttHS zap76LK&Os>Aw@ETs3|EqToL8v(kWGkx)m)1BoIS2gYqugd}AMU7w|^urA^EK05WxA z#m&uEzrMEDi_(W40UR`0NuC=GeiO-MNKImexbAKbHfnwkqVJVHspqLm%o11tUv~#E z(?J-FuFWt>wXgB=V{&n4A3@;>8$Jaf;#`s$!aNG=kP zUsgY*;@~7CXa>6f002B*wXqPz!74m^4m^K%_a7(E_nk}#=k)#j^*ov;gaNJoQLgWe z-mzzPpsBR*HnaGeQ`5U*{#y?fis0X_BS@*qlZb?l4$BN@t?x5-F<>L(TygzZ_RouU zeS}b8{4I*gqQ`61FwP;BQz}ph63Pfr2}vqhSSTe-mLo0oXK9>ARfN$ZMHK1MCQZ+p zbf8jFLjXI*AOhs5YCzXW(XL;3$#(RqZzTd)ow!IRvD|sbvr8)a%>Ee!>SG;*mS+B+ zGO-+QjQF0FQ*jJe5X1_Jm{T@t$_Y0hB&ddcpVm0f6vrlF25g1bPEy#9@=eXTvuN%~ z<~p=lyNh=Gj1%J{y=J~Y)`Qfi9N}wA>9ljRtCenMmy-;#AX4ylr;i6)U$Sn|c-w@q zs>}vl#r_hkt14#aQIMslB9$k_;VK%IBGwNQupT?a_L$*fwXtbS6m&TQiLC4lM% z+KBJf9;EEO&$nM~X6Rybb#J8AF4IvNG(KHjqA4FT@h77kK))Yy#Br?Og)j~yRy?jE zGwPK+bq1hKDp~O!b!L8NU*t!Aq5N9c>Bis~1Gg__<*O1Kvr8*to(Wx;JTWAd zteJwSR$0R+CAbO7>e*3-E^>xKQ~gJs?-Unsb*5tNno2gQWDm4cQ(>7 z1~V&r5pFBAEJ+0Ly0|LL0~I{*>Ie>}*s1VG<=~37q%9>Uc+NtY-r(y00DDB=zqYWS z5FI_oZoQ{kEN1%K)hlCnCOR^&4YLFuh`lI8My@WHJ#}|72t=NlG8PT2VKe7pRm)1k z%!!1kDcn9_-X_W!$lKw2{S_iBDbyt`D4H}$0h^yo-LGeTEfVv46LR9GYS%H%9I=z! zk@rZ&lf&`Cjz=VU9zOcZWGxkv(z#O-r($mBPH(mT2S}iuhgM1UC6w_b5|V=|R-+iNjUkZt9qg0YPo z(<@pBg2-I8z_D4|Jkd_~B<_dhq)#^~HxZN-?mlgwmoWN%oWStYY-V?2{`+=g;U1&=3! z03Ln3jb|8j0x6`V6oPUk$PD)QdmpTIo(qIhVU!f|>E~Luirmtvt|skGM}>$N0IDLxc>kx zTPI5>QTY8n^@xCNJmZH_W7z!wk^uK40sPLr7a(_4(<@@2}tAD3?A= zx`{?aRX)Kg2i?(`}ih<>Ey8z#L87y^H6`G9Pz3NG=UWvl#p+thN15I>!f5DO*-iD3QCnT zCSnSKd+GA){OYSU)%>Q++dV?)jG2ib#N&FUkIdm?4CeC1MtsaSp?N(~*%Zxc29dis zQxfv0k;V3A6-iT1JxY}+Y+&z(w;bJ2|to%mk$c~&K_FNBp?fWQDl+muSI zMbUnw7m`()DbP(Es=<-tU<$fFl_*sA4mso=dGoDTQ6+u<0CiBoNmf-pnviurG4C30 z5T>6Zdg*8QCQBtMNqs?LFTa+p6;m-*93ETG-^XvCbNT*5-<~|`hOUJV;VwEq+xtVuK} zTtOsWnT7j-8}ESs0I94K1HdS->Zt}w<)&I@ObP@kCsNr%G1U~{6%%o-tyBD|v^=^#baYD42z=bHWa1!rhV&g;XIcXUS z4`y02delopJVj*=o?Tut9SC8-`dD`p$F~R1Bpgo_qLWuCNLWcqfep?2Uw(YrDJ3aM z&mvk&mjLqn`dTI5Lpr&D*_lZ)dC9H8Gtjg2{IlMU-b!fZgh?|bkqDynj7si9aH@YT zb4O`CmaB-VW>ZniNJ@w#wa1F>XFUG^Xp04WGOld;smUrO(R;fV1FnMH?luzx6Y+;E zxq5R)n>DFp1ao2W@x)%WJ5N+ln$#`og;`x?ok{H>VsZ~Z#jOD|u`FYU>1t{4+PU`cOucloO8xq zi^x_N!jsq#>ju2=)H}ZtZN(N;lEnTY9GCp-XIMZVDD-LejK2+cF5@l!EX(l?Wt^ zhEhQu+M&dy$eBY_DkP?Sq{y1JOHgp2lob#- z-wymfBRXoSsm!TpnK3MoK}J=mXm4h2R_bHrwkI_;+(C&=E|b-)0K-kH+{UB!0R2C*uQ3n522C77`RZMPz2g)QWT7dIxcIM zIfsoQlVS^zW9cHjNUOyhF-9Xulp>-40;-Mx00YO3XP^KWSP1Db01PYyanFp3!1~l5 zegOmT`j0=~{vLFMP~U6OXYMe^&piJC2>ky5K1Ps=ja(&q;C4o#JaT`a2S`PwytiIAW$nDS!z&Dw>2Ww8f!hGJ}7_SjQN4mBy;Yu?mz+nv_$gG9(mc4x5b2 z%yl7&AP*?g2KU@qtt6n43OQg9cpPgc;O-a0 zs4zNu+()`fR+Q#c<^TWzF8O}#v~qO%j7F~+ohFS-J|fKLGaW6f+VO5?a+}K;6+4fc zw_4mP_93r3dQ-z@F5%3vSml{GiBxjGQ3MTayb}xITv=IY(M+j{P`s?9ux-Qs`^44I zVzMTgn3RdKP+5wSq=VN>?atAGvN?NKZrQhH)U#N(E6r{~LVA)Pq_+SC0F~ec1D<)- z*rda0Xrv-cnPDJlYx>)ku`+U{6fyXkIchTuxBRIJ(3b`6Z3t}-FnUJA!TT@Ac!vv@T|z*W zF+~XlOhI9GBo@8EW^S>qZoBVFePilX%UcqKtd&@*$4-+$QgyMAio44*+%)?=c>8@; zQXM(%kH7dx)Sy%*X1Y?clz?_3{+j-vYaKVhoHaj$FewyRl|olAOj6WcJ`$v*IRSg< z?m5J0xmLw1GgY?)wX8t>lR+{xa>n2eRndTDKYj?+NWU+qyliwSD?(I-W=ReUyN^GF zb+s@hH+2T-zc1W-7l`7&x3@;%+LxCjw{o`eR}+8$whLV zC@mIv`vj1;s93Tg2*6vXnRr`INMCmDQkTj#BS7rh@9tvjBNM7xN>-q<3j(Ii(1y@I zM?09tyX#@k9m|;eILE_oJbaBEiV`qBuWuoHB<)p}Ct&LY>HD`m+!o;Rcc!Y7h@ccJ zXLGA=4_<}Kd?e4NoiEO> zXVhHkH0Kt;RF@Cu_juzhc;lXb)6ciJKK}sc?XM;Tq`0{~ee}1(c!wx={eNGid}D^3`0KAt`lSoB4hD#WcjBO9enU zHh!Ld@yk1wXwLu;8=oJlk>``o+f}l|%Lx}OL%daYfPj`S&&%PajR=nuC$|9h{{TOK z^E&Y(Dwyx5U(3u-^`NagX3W-SFtzosjd4m{k>E_g@o=jZNRFnZQNJ-4E3;s|_i#DKcUM;VRE~-(;U%}Ye`=0Nn)b7%`UY&%F6cM#W zS9rTjiq-&T1)raw_VLNoH3}Wz?#`eU6qf@_9f2Ri?Q-m|gyqsuPevz({{WCF08F1P zK&gVRoWKG(xGv$YBPe`4d_!$mb}Sozb7ba=GlYiq+t+ed;z2DKB7vfYppg-+OvNP$ zUKWyWNCXnf4OF?Z1e0E`dTKKQ+Iq!lR$9WG-`7Ncv$!=M5dtl-5fph=&) zG%U))R<$8hkXB|gl&2OUKms}aM_r=X!QGeQM~KMc3Kc63lP%q8J<^qwkP@v4c6KG0 zyl`|A;!72i*;lOfH$9KXuF5A%#c9>Ubd%;vn=d-iDFtak%%PN&rGN|@U2PK2ZSHIy?ZsEl z?fb(1F55}+_TxrWm(!8sb>Q>-4vvKX0PbJEol#6uRIncBONaA3@2=yfk)?554-vp> zMK&W#GECiCVoFO9Vlx0+aUZ*rGy4+*n9CxoHadly3X->KmP9JPex5iadwc!$Ot~(g z+JX&(zuC5*IIiLPC1O~;4KY3y&Xx-TN9G)xQ2zFf1#Z0E&B?hl8QUOy-eVzV{K7JL z64{xhV4hg<+{cbd{Z6jQN>YS^bPjeKJ-IlveG9^<5krR3QwGT*o>~Kd10f(e9a;SM z(kJi355Sh{>cu=x`QAC3J8NlV7AqLq^q;#kmxS>B@Ut03IF(s@Ud3r6l*mF8>Atay zR~@FN?<6KrnsTTpi-7OA1m@&k`daO%xZkzx3x{ea;I#DOw!LpHkb+PaqEgP3mptw^ z@{OSF4~z|)bE_DBg6*6&%*6F1j}JcLt9l2;M%1H3l)|e6n`;zdURmI=Yb7W1$lDxg(?*dCFFGIVF4`Uqb#hsI4RTwQ_8dOh}Mw)eBovna z08$+K#NiGBZq@{bBaZ=lLH6=VC*Q{>$Me>!nkCC6D@(GQFy;=rH|reYtf@IBvXqq& zOX|h>kTmD&b&S>1zPD~2i$vk-)}?0`^}JC{3ki~Eb0a^hvLGY4zaFBW2Z8!(E<4}~ zE+d)vq)f>)d@S6J?e-or>K-{>6G|xQq$NHaq=b^C6ScM1Ua=$gFH^tgiEWEXEm>PF1WipDs6za7$YLY@Hk_SZXcPY2atQrR-h)PmX4lGD$u%lCI<>5B3R zD#Rn5jIjir03u-%{L% z@wTzL-M28E2FX*~`=fbntd4Upm-!qbVy#zc#h%KJ8GmzfNul#-O$`D@O^Yv~kKQf5e+LJ&fn z^co&o=y{mGC`A5OwY(lm@yBWB%J4y89eDiwH;=bCmMK!XK&+)e9V~RW^Y9`pbyB9c zdp10Bf^4 zdAYy0^B~hx&Xbs^6%I!Fdk;O2NR}ND&f2Ao$;*z|5(EB<*{K8s71H;pxkg@~2KFP$w!BPLF2WMsI+L z?a{uZN47Uk13itGA2YqIbhT{WNhZh3j!U&}G-~im7i_Yc@OH{%C>Oiv@(SsW3C3i0 zf5+Gl65=wIHdYs2iR(ZLCoR%I0`GkW6cLBK# zukB-Kd^44|2)3INzUKCH$*h~Zd-tWtuSx<~!GiAb26y_Ar3i}P6=Tp^?(q#>jt znVH1X&SJ6{i}4AmC>c1ZuViA5a%KZxd3hoyhk=`k~EEJhjvN9 zcztzsfA+a^=T=qzDLw0y(~=TOB>+RD%t@L;^Z1sb`B)8)V6b@Ixr?WW#MFkB3NS|n zdhj5QDC3PnfT+A8_#Vmvk7WP=HISt#LPAmiNU#FJHe9)AlPwaZq#{}p5`?4x0#XOK?p zb&HtsHQN#Lwdj@fY}%8KP(uuaO3}F>2|{)cC!ZRJFxve&#FIW=RTP}WXa=P60NnY2 z-D2IXz4tZ~>K7NrQ zH7g8fAR#JdDRL4GyjtGil~^{zQoY~rwD-AacchP+cL8&6UzQL|K@X({m`GeWOsn?~^o z0`kg8;g9NHwVpEJEO8t?g=Lk6)=*|r#GrfQFeFXPwSrz-f}pc$0%iUxa8ov_QlF94>~weM9IcK_ znNd!u!3$i}g8IK-)HBAv>2Hj-A8>!nbS*4dAKxLKG>zA?zye;>D9Wrt@!X^7Bb|0l zKLcU7o)j?Jgp^8Duo6K~cS~yT$owp0Ug7g+P*r{|T9PI?EUS=R_b2u8i+_0aH*xOU zqD;)unIVtm5<2zbi?%LVmEX1??8T&F&D=@xb^y_QI z;}yrM@lX*s=9Z8hl8_IP1M8=pVNLtH=xn{Qm)+S(;fEcErzM+0S4c#Pzd|f+DPHH6 zQnADbyrd`u_}0H|8aQSlV~$c48KsST z$#KrXqv1;GN|{SkrLIU&auQg1+Qs$X;~QeHvR)&@u__EcZ90;grOQrmGa(6Cxg{H>E5vM++{8eW6&iUHe=rNJwu-jsBBzV7=wcC1GJ4CYFFxmRMKCeJV>+~@V&pO+b zl9M7_pcDC0JUcr#@f~f$y_n%x9s@rQ2$ednrkPTgP*5yL2f`GWOQ;L6Zzz&GpW^Rn zHzDCnq8T{5A*F-pq~h@L0(%+bGNg_X!<0FNGg4kWd~ z<)l{C(aNk!Q8zG3R2AVM5^b)wIYVyui+cU*c{`Key)#d0)0t+O!iJH^1Gx68k6;IY zJ-WzWnzRa!nR^%T9fxtR~WRXyXN>FgH#lyER&_|%L3`pt=M#=ZX;ySgkFx^VP zK>}Y`mJ4ygDm|E{RYsp1bxX8>!dx2jH))ZjlZj;P;cQf*Wd&fCLrN#B3%1R%vA3jO zQ!~Ak5JTkx_w?P5-sT{uQTFwE(nXf7^fjjR?7_ouCtn@Ik;;Yy4hi$YJbC9?vgON} zAWTDMZJDjV-=T|C`9%btY+skyzfYbd?d0$WpT9q1J^jDCR0u(s;Otrc{{9h74%5#b zIn;AJ03;4x`gP^jJSBtQ(l5FytB9(5cxHJ|K1Tz%^UvkaKjo{8wlaYt#jWrjZqeYD z7NDbI-1WClz5M#`=Z`+X`~Lt>Z*T7Ye12MqD(;hRg}(OZ*7k(J6EPy>5Irt+KeT?Y ziIDzebN&AS#4`K`0lV=C-XrpJN0_Z3_G)hm~&nM55 z`hS+GXQUOm2utzNxYjrXKMYVC>r^yhe&?3j$lrga zF7fC2_x}KgAC{`7mj>26x3RZ3(BD{~lG%y7IDS^&Pf@L(LSquxJ=4ffj$HG{^YV3I zpLwd+R*KWpSm4yAJdY#%=?H2vmsdl}$`SfMpC7OP09t^sJ`#N4A6?uqDo-GSPq()o z-}%>V% zF;?f22eI`k|zJuu> zrQN&+V5I(Mf`32M`;Yc@2uK}#?+l0xa)Nn~32!!kNT2@z1U*3&bGglzn9WNZHu1i; z0)>WCUR$eb2c8=L08j4aHN~mhmQ*2J{{R;nlxf1&;UU?<8j}8d3$U@}XN?WpXA2tq zKA#n(rwg4TW=ZB+R;82>tg2Hu06J3a2GKe<{(o-c_SWkCqng6@RNMDtwN3JL{&R^t zB8elaClg62mRQI|jhnPH1>h5^Ip~`uKnMK55^v7ime%L@8aIRRY+7D1hR#h4;E%+i z*WD?A3J-Pw*b;B_F|A*QzJ;#$%E!F-m|8nC2;$jWt}3Q#Sev`h?~tilpevV@7`ypS z$vEU0m1|O^E?m2;gJ4|fZb;Mlj?c$xs-@x6De&|a)XYzImJgA`SdCck>7&H1>bJ(t zJVguJ3#Qbey+$hEB64XY^eUtsuN?F%C0VJ<5_SQ^)m1p>n59$Bu{t~rYnd`-QxvUX zMJoiA1LbkIEm%gH!21%PU5QM_vB{xJRN*-(Pz6ARW^mxU9$}wtBBA(x?_Ql!>IUk| z?rf&l!)LSbSC0#fxgE&g9#!XQenTgacCYGLX-c}wYDwRY2OOrYoM}f~nh`F8I-~#s zrKA$g^wF$yVEF04K8{UQj;`GNmH_QAvypV?89i?TzMLVBMX(}SyVh^ zO&{0Y$7xf~){HpduRQbMooEWmN$FC8DPZqytc+p-a8`|h%ZS~w}T_2gf63x-M?_{DBeFHKQZsCE;WVF)zMF-olKdlDIp8W z=il}$d2d5I604!8T-rruP4gfC#hTU~FMd%kHum|Tl%F9jmVxWnGBU)kVFc|NW$t)Z zQX9Q^B^)RO^T8{}Cu00FJs8=e5>zCrXmt^lk`;ScecwpVFkD7uWhk9cM5JXI!Lb8d zi|w|y_VR=A_0~?S(D|X`J>G(2&o{%DGM6utvx@Js5K}(B}>2Y)B33%=9%s77o#yCL~)ajO_ zO}$Lift)I0M}^pvy_mDu#uD1!6aN4bo9^~TJ)c6TOEhJnT_WbKENPxOBU!23yE?|! zA@33*O=4l(soaHI>e^E<92o&~CrFoIZsZV4hX5(xo7+avn9D!(e}ZWquCi8quA(O? zDN2V-xpv#zXq6uem~V=^T~EX1H-`3rV{jwWyN|z?#AN5czktK4o z_*Me~9Yw>9$rp{-LvpE))mdVSAXKq(u@rN{v-eBD0JkbRJbgpQI_H_R^$D83?po9i zW-N3!0F89{bB-XJOa%Ffr#VrW*z$AlYYjJ!y_K_W)CpSjiBw4(lR+H+0J@o0L~$t! zeJMkM$7lqe?s)vI)p&fVl#|5y2{TGm#HGk_rS~wFnX)EI@vHL-=nHLsJzJfcHht5K zb^DmTxpWAcwVL#vv`UK+L}=8oMUW^4GRxXOhx7N>750nxEGrRI+L;1kWX|sj5}(ci zCA=q7u(W5ibTcNJY3n+a5&pNa-L5%%?;fT;4|h$L>7L!%m*a+4#AEAMtC5C5^A{j> zW{zBqQWetUcDKw=3__bZ1~Q|7ewKes9j;*Ap>TCB6+?$rVf7q6O-(~KT*_&(q^$u3 zGS-DDAtn2=>_b}7^cS;ySXJ=-1tOA=s#y}`rAaqhQkyAAazc&sC;*4GqGu`Oe!qQn zBy4E<(h$-R(h$-R(h$-R(h$-Sj8V^Zj zjiNIvko*%HcJS>1bvPCR#Qg`?^on(?tg2c;YlSH9%nkI^_2^>lqlm3ewQB1WWLJeE zR%9K-MxjCHj(vyU-``QhX_mkZ7&CqQ!y}HbYP-WaBUhAqQ=_0|Xxw%(m2N=q*g-_hYTAXz&c7T02p@m_DDxOz_MP32%z~hg}vkP*G zpm;zWfCtji=acay+1bsBx$oyDrxYu=j8|j0_T;}7TvJmYt2}n3nep;fS-l|*NT7$h zRxqm~_iKZHsa^%dXX0{k+RQ~W>E|VKspMTf(o>efEEs-LvI)2b%^a88)g49{xpa!QDn128|70B!?ZuyJvs9;S7QNTxVz3fLJh#cn8)V79UOYj-(;SLE>=TqVeQb?yLB_I$8*3JVPY<#1bYIvSX<(VszpmJr-5|EW` z&HdOgh=V_+1#>LctHn_joY=D@HIWZ2K$1xeX&677vVc7DIM+~Nt5n|VRFtbS5|lDt z_S*jdAKEZ#jMa4k6bMq*r7%iCC60^-QKu*9V!_PiaR-T@&f)veu#>whMM#i)BR8%& zKf~IjmL-QHw68fZ!mHGb>X9=mZjer+o9G+tMb?i6#-%^vDCe1G>?JA4J7_&lo=r?J+w=Po!Eu78~re>{{#S2#{MP;IK!riK}Bvw!|BO#R}L)xx{fTvrJy+qWswStX& zEvx;Gk=x3bLx==aqFkBY9P;L6O7eoul#oc~ewsz5$-(`3`}rTA9^9Yz=Tgc7Sdv?o z{{Vk_#I+=(-1RX_C9zg z>~-!->u-!B^HF6_FR#uK`ahq!GQ~OPk_rAyM~^-|zpwPv_DD*I9xZe?)8F^%9?o+Dtjxnk z9~*w1W2hAO4u0qPlg6c_<{%I?`alqLzocXpjK_+rQdakCNPWl6WBU zKhOI4=acvQ^UoUeGX}6=!Xr=_Z>8U##@aJS#%>cLK{A8K20mBw01!{#`(O6dnW+Tz zXXo|l_VFT8L#syRuP&dgbgE*Rq4}BEq+|G0{{S4HmmGQHp9KE^Jwq)4{95v}SiZhr zTgQvB@ZaPOpuA(VR zGrfnFRwdO5Zg?ZO`}q4Get*-Q7E2e=m~H(2n#CweLx4B8P(6Ksi%M_(SqUEO+-ln~ z9ycM$Hv7}BzbL32xkncV@;dOKfJxiq_vHwvRm9|OfB5w0kbCzC&u{DX9tV%-qEJ=P zJ3l|az2Y$~O1KAaUm^7$q*Z_9{m8&Hc$-eXKBe^A$^8|{^T^==0D169<5cRDw3b-Q z)U$co0!-3C$gA$fzWn1bVEPc=k(Y2}(My2K1rX+RK2K{Iqe$42K>A#d$+*;|E?Pe_ zg|*iI083iKSg(Age9R6~QtMK)YT1ZC4NuT?j)q5}8+!O^_}dL$!+m0PX#+2B z)X2#%?s)C{fKLRFc?1fQm8m&`b!W0i!omFi07$k?pfbTS$xt0#fPTLx^D&OHzYlw! zw1Kg+!*-zXUHg@bbHO8p3)(%8o@{ z?ZcJCLA0?3O})1xD8DIOt#^>92!crw{&X#kI&SsQAnjlD9>dzzcDOH9R6b7 z(OPKfqy;T03s45y-`Ceq(j<>c{ua7>pW2x0&U@CZ2Ly33vszIg!NO75J7TMchJG673XdSX*+%1T%o>mz%@UqmPW)uce zLDyb~pcjfqQN9r~drqXAkEhw2Imof<_Dw5K9W2FkgnF+$G9ZSr5GeHrHa%~o_69ry zMIx}yP?bJFDN*r~0SB34{{ScH6`UvAXAbdNC1KcfpODIzx@+{45qW=KKeVX7^xHUxl6AYy_b#ls*p)OuvR2(r-8C-H^ z9?`Vji}B5{x9zkN z6usR{`U<41CRtb)1du>)5asu*a;7c)I%2=tt1)HFN@}H*DKx;knCJjV$XFhIB6(pq z4l^-SO+xic@GJT*^sPZ{V_+C|O)Y69RbW*3ELd=Sd+SA4VKbtWIz4&w6X}q)2U?#{_poWZ>VSJNz(UMpi zDA^BU$kMxs$wZM1XM$VUbIe_=)zn~%{9Q5wxhf$bk-shP@zKNN91$vBFqza-5H7$o z0&aN`zq50VJAO0$Ciqb4=T>@q8*V)QYpEA-_OiRj4N`kE;j7|hYUc?9%Pfl}cA7=X zENPGS3Vm+QBIl`pq^>059w6bWnU$1+RvwA{+=`QsXILIE0Xdlp$cR(AX$J$n z?4jp{0f+}(g;PkW%K(Q%0^@Vbp0?69m1YZBik7QX>2iZ*LO~?+2WBic*I3NI23>iI zzg92Fy>c_t%kCgW9L%;oEPQgT0y9?KNhDfH3nWEhk~-!?JC5cGJBBU9*e7{Nd#NcZ zDRp%T0OezCw)M~fru%2$fis3tI}M{$$+UzjV3#O&stz2h{fKQqEEx4My?jeUn}!UQ zLfFb8LKY;eD#JzLmHAl;*D)l;LwPQ*$8U0Z3U#9^Dc4IeX;hGvc$g>$-)25<_Bc*r zRZTr&bcLc~N`_UZEyai<&zr^DR@79djbd_Qhtg=(OS+T+dt)8kw+YAuw~^WpKXZs; z6y(dAwWKA&h5U~p&zC5ssZcp46ojQEBnBG!zM%Sa(lrB0NG9hiGoB$sk_1TiZA0M+EJy!Wj?J;c2BBs`Kq^XKY^32>GN!Wd01 z8G>Pyn0%=+Wy@FL*EMBR7J8{f{9`bv;1ADPOkZQ`NJB_NNJB_NNJB_NNJB_PBOgU6OJyP-xt4jS z9Jg`xXO*l@f0N52w;p`GWl&sQw6)tEh7*x<{I+u~iS8>P%u;qY_kAvgO~imHel({6bOKr+wOok|Jew*O z<9~p%AW|Xfzviwbvci|_IMpo=#?x?1Hrp5jP1>zqwJ)G9&O$dQF%72+n-U*;P0zV+ zY)3Q#I^Xxsb7Fcm%7|k>@0>~S(q3qvxf85)WL=$Gzlo~JjhYfT;}nO)!N7N{<8wA0 z;?JYGNg1!hM5|q+6GzDPMqZtGrZHrd%=V-xXJGC841tBQB2hky6)d<+LW^W+alBD) z;CBUD*ed?`sAntyg0k$uoV!2NO@9vHVEp{l>Zp&Z0RGadijy4p4X)Svtz!J+db4E5 zmNYg0sP|t`D48?_l2*yaN_dRLl23~BuD-Iu&9FXZ@~FO*G#9$bK;>(($eUAzp*>JS zyqib^t4x3c;=aoW--xYn4~3x;`ww8n_}57KElA@;ni=Lk3SD5u4r6iTO^ukS)ib}f zpi)#+(pDJwiGR$SvSD$(fVuHfo2T6tPoPDMeh6KI%8;RVjtjZ}_63cmc!ma(_QEe8 zW(qD-B4kEgT07}G3mI<{`JoGM=6%#9yrE_xF>rRiL^koa4Br5jFiK;qruOU00E%yd zvulCcbL?NnB`=&>@k&Gw5@8tsZPLqk{*#NG6FNH1x^#c}xCUv1jtd{yj#vWJ@X4`wazukYACTwow`c3&o)v%T- z6vD^z%tih3W=yKLOc>juc+hb3_};?{kLUZY zbG-YB5`YR2)t2MoJcUeI@jn2?K$ijK%Lz6WFR*6m;x$W@D!CwgX{vBD25%rkpjh_n zj5&&;VD>dYU<4jx>b#g*hR%p3$6?&@_4I&A-ntQOdQgyc%aH-u)BwB8qSX04K zDF;o{KJEH=W{2%F{_vK@$U{vi?e0WgA6Y_r>U81wUCib-EVG+-B>Bw?QwAvqty{-9 zwS1xuBN#*QAKBUeGmh9&4jY}y)3orLetQU4M=v^lJ1vB2Q}!Ij9h$m6i}{5K{l43~ zS&HrnDJO(p;K#-Nm|r~KeT;HHM+iuju~qTE6d$c849&fZ23$@(oq!nK940@So;05? zv3&OajK$LzlP~wJYWTy#jw8m|3hzNL;R2EgRxuyy&mHxP{G%XyX{ICFKQ*j9jJbx= z^MG(rl0Of#2h@M6;McZwJ1@}<^zvz(D(l}52H6eA9qmJ-I_-Q!eYD3vlw<`0ZvE!c zN&Yy+)~afyl7T^wJKot%q^DEOM7JfTR0`+jc1!n|96|a=%9poG_fczhHp}_R{PhU2 zNatGZx}fSku|pCEprlG?Q|tVF9<~_i?%%_|So` zN-l)gSRq?2R+g^wLc_vtpZ0=rlb0TQ>a+J!AVKW_(Foip3MtJow=bes+#$$xM)91> zHic_wG+)S1Aztx!zb)9KP$1+?l&i-bw-!3x&9h9!HCX4Qi@b zCUuPZx|xt~5NXsRN;R*Fx$Th^x>)@rm)vjkDOrUw$&T&eR_guQ27kX4Jjr+)-~F+& zUtPv`!=*=~PuFC#Hl6`6jin=Nn*mAo23&zPLX&a><66$NF!OqM%iid(5##;#NFmG+&knXp5Y*#l{pLqkhE>rtN6%_y>VoXVvyc?PMIMgsKR{q`D zG+@)T+IclrTU>jErM0E!dWrYRKnRmOg--l|1fADEG2nVoM*#Tl%G`}o2e1S zx-z$5ecg90?4i%!K*rtt2l6wBcu`C^TOIHt_-lrRm-+s>$XcYB4 zPzPhXz2LY7*&;6oM=z)*CzAu)S47}-7(Suztj&_e>JT?GEL#^!A-It$0xw0uiq7hDkisYv3y=Y9Yia28ak1glQcQUEVnw4bz0?FD8sEx z3!RnA`ce`=KA(8zg0OOCP&UXju-v~%@K?>Ucu7YAMp@>QqsXUzLi+Q{puH}GFk6Vo zbVtmmb=}PM_C+(V=Zu&l3xb6vH*?nf#4hV!v#kv1O9}37YQTUT);E1gCU9e+T#^kI z3{z*&Uh!jGra;+sha$eQq;Bbt8E}4ZGP0&h2Wo$xlyTx?G5}+ zxkBd5bUS&<3`(rX8DL0IYYq{VTCq1+Uq$TdL=z<*@u}?UX0bjk7~|fa>7TEF@#me= z7jF#wLP_u9bPgCx^c9%~dsnypA4eqif?SqHP>HrQK)gff_TA; zhC?cU#zanzOoph&qXO6+_!FhKpu%z*J;fD8^JYZ^`vq#!AhoG9CVF-=qJ1h^u+j&h zfr@(7AEhJjB+MMw-Gvhwq$b+J(Q#K#XL11LX<>2MW~+ybSQVFf49k~9gp9oSa3~gS z#pg{!`>w-#XF^Oi_upE;CzP~ssa*5_0SIqma-l-heA&?q-c-ESNK7lgore1t2?{=~ z%pPA}_Y7L=cojwthiM1va`JLk;qXK!f|BIx{bGKv827rZQGr91gz_AipZzcTTQFZY znIgc7NgOV<#|P465s^v?vY|$7bzj$;y3rp`BrO~L(Ap-oGYCC17}Oh$_!Ms!UQdOw znwxVpenoeTPkR_*yOEYO6gD6Ji9ld45tW%w$|zX5ts~LBtFE_)`p+~m&1*nNZ%Dc2I#Ppqjtf+Uq(=RbT5r=Op)em6A4No@r$ z%Hqxt-M^Q0R1~xnw7}bg@c3m?S8Vx9S)e^sc9kr1;c;WB0DTVR;}iz7Gp+`7tb#pI zhJbq7kEc+kv>zOi$kZnLv;u;D;~6^&1pR+s_?@%wj-z*;h#jxwlyY(FhTqsmMcX9| zAPkOGc!qVK4yp>n^=QOXI57x|oej@M`yiCd&m?{VZtfj}p?tymdt$_y{}%y`_W%4M zvflJUyvCebTYH7-J?5mPW@D!j?;npwZv2L%r558u<74Rxr71m#-f0WsZb(lbNGscc z0MN8PWP&+3dwPq8NYn_jsQEm}s5a9H_SQUpg? z(_S(etQ=R9_;L+oA!DV*zj^QdePm&_pp}#$y6ndSt(5OQh0*L;*(?ovnF*L2p9dei%LKUTL?O zF15+%Q*LG_9y)q?!N*18NgB+J&Iz2_W)UGQra-#8&h~V-7{LA74ox{vZ1KK+OiPmw zV;NDzCJ!GrqdJKN)zL#nIa9sXm(oD#*COK1w0n>p;T&B5F`dN>Ia?%NAar8H{AtQ6 zM#Qej?TX7XgPz-0`w&NoUdHhQ6NX$andWALH-21dlnM1e0K)5PL~-&>v#c^Zr9sy2 z4>s|b-cj-wYPJW!M&<;m1L?|LxQ_o^%9sdPXUoGB*3NXF*a*hx3;UIj+S-u zFtzD#2oB{f&WwP)QO1uWmm-6CaX6nWn+dN2jv=u4ObxON@NYB1r|+fMT+*v%UzD) zkgSJ%1`s{H&cnuY(l0K4W~uY`d{LBJiDlNhu+?i{etnkI@4jo5t?hxrnY4zrC|LE& zS?CetA4;zT3jy(79jr&omRj`6_NX3pIwSP~!1RWut!x%~KaZLj(!Q!)gYpGtgX1B| zN5SoWaPpG(I_|megjP7>oEt~jV)5`-fVQ z4a@33z}fkAo1&d5`ywxUa;lM4M-aH))uhdgdsyA_nge*>Y~XxpV0=%q_1$e44+TKl zmg)NtVP;7j;4oEG#Oz1Au=uzZWOi`F=jUg$Ub!^7cdVzkmz9JueMzX$vIOS(`^RwmQuhOIM!`^%81>Msiz3qencX(E7$xh zXH*HL&F7=;upX4x;q z>3)1p@9@(48uQi1`@Vu5+T7v48}@$oh-}9-1$BRTL+Ke~&!<}tER9*MRu}6VdHw9m znGdTIeqBpxdxun%@lfr`5M|Po&{#I9A3=#M4)kwu`PM&%>2QBw7wobjlV7s7E^<4x zF9P+09UVzft)!9`o!`V^&=|>=zv%PrCP)VLVJ;J=`k8O_ytV9QAwMoac?lp&-W^1L?7dx6sN_ zKsXefuFge@kpWURr{^1TV8tt!CGrV&qxin!t z@6Puo1I#&VYB671#8lztGh%i}us1TEgi3GijXO~`|Mg(IM$oO+&tX6~2Vnf%vXNC?iCU0ofrVb5G(%)lyCxfOj+2XPelkz4 zc`QrLHdYR=rt=1Wou~;M+^K0$Wzu$ksa~vr^@lB27LN=EkgmX;mpE(#G(Sb(LZZjNMCa-IT=&fI{ygmE!@6I=cGYLCEXxx$6012jGjIZm7M zpW_Qt%dQr(>BgK((zD zEtJ61`BJyue&vy-n|~#K)!}wyXd06gn5VK&VVTdEe5gE~UgJf|+OSTd{MWrH^0b8e z!I#p1k>TpEqs9ka2;1;*MmInU;wHGK8TlHwBjj{$wcsWt!(}tfes|d5S)9xJ4Tr9KLAeFp(nUHKRLdGD5m5(%v6BukXQs2 zw;zma)2GlA zQYZ&mqCbzX0NV500UIfdslt~7J@l+TD7H((+Jy6cgePwh;dHcM_cEE z)eZK}Be_367XSk)8ukx2y)upCXZ+ocDO^=2BmzVGZQnWV7CCI5AhrZ6>z8sSCF;Kr z#RpQ>_PS;%MvID^4gHpCj$|is>SoUyho|K zYp4gL?{-c_sWc8J>ynj*3`Qa-acWLI#&qv|0PUns3w$+XhM4IyCZ0&=@HPs|y&Yqw zcT)%3)8cdH_Cfg9oY}A_Bf|Ahi4_v>=M@E=jjHLvt5%x1$$$&NqV1QWIq!2&ZF4jN zQsvHdXX0WM<5NMEqdI2u?Xff^M9$zP@_b`I#*kk{M~@EmLN)7RlBz8#P)~gx?m zm~#7-zX&c4G0u^P==n%l!-(BdG_fPHp>wJ#J>90VifDlgx3o%B!uq*R#!X8Z&ap>I z-gg}28)m}gipmO4s6**Wd&X~LqUvUlCP7@D)E{#{__Yc?sRu^-cCw!4*O=<6S}qo~ zJh&YO2_aKB3;^f#tal$trK%yTtt@QD^l8PkK#G+U1M+ccB~@(Dce)ejda3i{C9{pJ zqse|jfz$Gl3`5;!w3kxIC}{0J00{9yy!PWdpX~?jXy(8T%kkrjPhNtC{3-@Em{xjA8uKMyPKjo+J z=hYCsKU{O_^)6(Q?YhAccpUpTUaIyyLcWMwdgDyLPKCb$e!z6()#Q8qZwd6jJ1O%< z>9{*_sbaPrTsE&L?~lGZd4?9FS!n=GJPsdy$Xgid(JlXnX8;gF|A3I{E#TQtKEiB` z3WJL#h50{YED08RYEKQk*3j&Ot(ng};P)bbsjaEC%c(UbxrqcV&0;Doy)|~cS#q5N zbv@0dbg2ZPejQsorn#<1PR>plZ3;+xn+g2@>Tr_GV$`xkjEM@Wr+>%=n}w*#h#fZ2)K~GlrA%@HpC$Uvi*$5x(#ArVq1t+~ZK< z9^HZsHL#6~q5#^2MD7prEqp)3zNvU4BZRM$>Ud|Tr|+CiczyBV#N8D<(Z#&OU>E}0 zdpEgtttAX)TAy)d-%N}dI|^?!9yGK+MirpUe6NRKMSXW|{DUaeRd4tXeck4U3rJ>c zEY@(wv*i@+g%4|``yGzk3$VlMkj7zSo5*-LfLN+hZ<1{TF-sqb+b>R*N#}n&t|RYth^?rSBmO`#^6AJ^cwK#H_w0SXWq!dE z{Bt`3H(|;o%J8KiLWN2b=MXe%kYFaDqu}7MoyBZ`UHI1sLRNsngH0z~K{t_XF57D3 z!@NrwO+Q6J&q5WM18{`)8Mhsgjl3-@b-N*~Z;coSZ7#3wQ^SNwWv5p+$K{}j+-kD{ zaG5|cV8R5CB{?9erc{FL;EySP!uJF5ppa)rB-@Lf0WDf{*?1`SHLlF(zWgG%#bYAt zyyI^F-C9UK=#AN%-fpwzQkQdXwV8p-sLKq#ApzIq7gCCFZJkW1@U}gOCSd!OShOLR z4j8{@s#7SWm?aa_s5!hfdCPG(Q5nV6uabdb4F4&1TjKgYx;zxw_~yDl8}e zjT`-}`JHkrML4l$CjK8_=3_Ll2%3VOjdviI{{6Rh+8J}t<;`1gsmh3nBi z0hdGFi$Q&(3-`5-lZA1O`+*=?nI!FBm?>a8EoWNtjbSZzayyIn1jvS1r~g5V7ysfdW2lUe2(i5^Y{1R`1olX`^%XvS6_>Cy zGe2&_kWck`7HsfrdP4D5vX;{q30&HovpI3nuA;^&-@Tv39DcU;9J7>VYx;{KTIYwCccc6iE!^-<-vmi<_SYSjvQGVH3lIIeZ)J60d@5V;g{_Aw=x zpUe62-v$@SLYMR<$qzr4x5yn#rB^5$3${~lD1G_b@MF2vOq5FB*V*0ok_IbPZ^5;v z%u-d9o-P%;SQbjnk_B;RD`QJ)Pt-oI8VH{eN>2*^j+IFnfH|}@88$h>HLUshj*4>%k z8VEdzU0Anmuuo)PZSH<)4!+e+`adq3T2T0U!wQ56O3}0>yu}@rY!{G`SWB<3T_#CI z>gyH^T>6PU)+=Y>J*ScJ-)O1Y3w(^cz1@BgARyqIaWRb@k5x`gYNg^F{a(`R@^Os{ z9Jhnha}-vq_yUc)PH&MC+)CYhL)ufC0BuRWygJssh$qJvV4>X;u5JB3&sRDMYvk?E z+T;C^fl&?Kns?NgTK39R2@X@bd3x88E|FPAToOC2%JaO9tw)-Z!DF4GjDvgSf0uf; zM|yR5yFWE$L~Su5vM5^3zI$^7T3@}Nu)zmmhA9TZ3)m}Rcw*d1(N@$hFGN^m+B z63e0q>{5gUm(ZH1b3$*F%iA1>`N5W0w+jsf! zVICj%?K!5(UaD(Lv)Q3~B@lg#SvL?i(1Sk59`KIQ^}7F;eChI1y7(B$0_iEiYZ{sx zB6OZlDbf|=XV8Qp=_yaW)_tBoyu78k*8Di4xbfr1uL=Emjjzg@0h!~5#5955C3{xI$Q_z&RJAF;q*#8_dIs?$%hdb{3qF+pSz zcD=|RJWuCnX*LSdsf2P1Zh4xr`z_in%XZ@m)f)>2C#E%gs*=(hNA?!1UA6=wbKOaT zg6QJnbCBNG*4334M|k2VbIls}taR;~PM3Wk4-kp|dC|nupKs3f!SQO(nd@;M!e^Sl zqx|yUf1Y&7SK}IoVQV;8lWk`3e4`uT*WBF^mTV(@`R*wL_XEA7?_w`gLYdRKzCbG} z9s$cS6On`O)69E)eI_79!f}MrS|nE3uAJNrBAh!KarLvbUikg@hp@iB08QwI5iA zSZUZOBFxAz%TaFvgEbl`Z&^Grek^`uH^O_%9*A3)FV`-GnenD7m^n6alvUZ;d=e2T z^oSp32Qpi-orOHAS%~mi0^yHDuh$QIvA&x}%EefJoxU&W>4=>c_33%zGNk;4VE;K2 zIT-hSm0GtSoJG9LGrzdFG$HPbYq8})TzCJ_wT#V0(Ly0j<@W3Pt>8d3`e$TzJVAxM zMZb8`u_@sN?mn`%JOgH9ZE9a2?)GS^OXzaqi?mF;Fm_2@%hBySS0pPr*11*Yx)T{cg`5FG+DL7(!wpBYqka}Qcw&%(c04+- zWjT09r5Vr29eP<^;6GUbgVh`0t_o;;pk>1m>QL3#-j|j0;St5WC6m@Zuy!$a2K0mCn>ZCFBGL4bjT-0X@?Hgd zSKRZ5#`h^1b1J>M`B@d(d2P&CLx(eYy|+#Nq|xb&$oJNk6CR@DcK+r2UCj)iAm7LC zK;(u1c`6;vxGO7*Pi%nVWFWyzR3H?M2!xA7^qme;!>i@?PrQYcXp)_LgpxQ_;7rYa zC6UjTxRl}~qon{+9~%>Lw%KHIawK7788AQw3vdv`4`T3BEBrR7myY71;0Q3&o& z!A3GkdJ7LUGYX35ZQ&E*J11`UP{u0tLn6+{9s##M303hwKk(j2+Q;KRr}x`9`3OD; zeD+{kiEVp^o;IJL0%gUBc&Gyj^SN7a+X5+62*&^^ovX6h>l?w-s0&vxzdYKdY)I?O z0MLrKAm%S~k3oq2=_A_4@M6_Dew{9RjQ#V;Zwd*)A8F=S3oD--bNFdfb>-(@ZIoY_dkHeZG7MX zo{IK*qh4AEax^$kUb7gx|J!JqQhZMAh32j`Yb{c zd^R(rYrKL--nVsFrucLiUtNzcb^?~~iy3T+;qjJs1`iIV84h$uz+>vS#cN$Iwzz0g zIH=_Y&82Vzi)-R3fRaD7C)#k%wfMyZP4O5ua}5(i=w?x(!VpAvx;K|%)RZ?2q49%s zRF>vm9jz8#-bRSBU+;Ai?rLt3K1c+5Z=rdL{C@H-Y_$}8 zAq~X82b>HY#Xc}k57@MxHa}WhE_!+W+bzdL)u0@%vmp)J>pnxQdm1_@U6D>nPAgLg z9ich1xIJ)RpRX?|1qK$%Zy9Yya}hnZo7&{I>`vK!WtOL?S7ggKj}>n@5|>;=Dj!x9M_TaTw>9Y6Qn6s+2IF@0T5bN^==@{PrcRv zuNO&(l;u}?tCNzl!!+bXS-If?vUpXbT96mFIRW0RK#omS#6`5BCn~9~!)rF7U(vK- zI*axJ>X(U=!D@W%lj2#N_+-rCO5~6cES6c3M4S!OI%56qKt@+zF)w{i0*6wJ^C6ty z0;qX%1q#?$IU>TcXMRsF#722nI@i}TQ_(N!A+FdHF;nMJY(q(FONUbJ$%!VRL>W>u zm1af8^Kp$P;e)k2VePHo=fQ^|kWWSeB5ha#9$W4OJDv2sPg!!u&ECCg6h;Yb;AJKr zSu@y4h-T12cJlKXY1)4Pj8{u*lbNe&2B>HMcP2T`>(}K2$yowJTukHULs&qCzaBE zfhC*T{I?odha)@&C)@2D*L;mZTslf=IF${I^@$^>D1?r~kr;=HI-sz&Jm7s>09qYWk4c9;WU{!D$UNof3CAk56i z!@-!(xDp*A9i41LJ;uS&C7uMmAC0YNLK2geb3}wD$sy6;x>GmtOg53j@PpuLr3_n8 zng)N`>f)gQ$0-(y>OdcMRT`;XSGij8;`6%q)5AeDgz#XWWM><|TXWk-=R)KTe1=nw z3j0j5>NxXG0TTKA_XqsOTjTLL-nz3ks95WEY&nun=;ISn(q0yl9d9rok;UuG!)EDH z&%z7~6|j1|&)@KJalJZ;y=h3~ruBz*_;$yT{>|(C89E-S+2skQ1-LjA7VbR~<x9oq>|*sH**ev4yS zf|}jjSQpaBFi_l<8wbAzkQhZ{5lFtZMdHFgz4#N{9B2ituRmK#+wjK?&HyJEs5U}2 z+UuJPI%-{6FmN~wwU!51k!G7VjQ5U7{Ob3n;+8Z($Td3HjnREs=>v(KxvQEy!Zlqf zS#czVey~%0n~(apPn9JXHeEl~xG;^hpGKI6CUk?xelS|&(B|jMemUoV7^R}}Ks{Ut zA0L*{gE-oq5qC~r{%SFNj;=AsgUc*O?;w1N`$ss-9DQC95?T^FU}UKlij)`kXfN6E z@h4ye#{GI|MT*p6rT;G{GB zY$AI_AO^q1mll<}P~I*``k0db1FJ!xkUW4AI^#aGz#I~U${_w{#n}1MyKo-F1!a{> zrLW>N$w#@fe&Sz^WLsXtT&pnAjrHBq=2S+dFL$KzYAti4qn$2+u}+zfTISd?2rD-x zGHC%f+u`c2AQxML0%*bPp9lOoJ7Uh3FyGeeltBnvB3Ze!uxLsNDN? zeUhjjxFidnOAf&)SgymP^ef6R-b}qqC!O>D{OZV%-AE_vX%)^S9$nh#!;wf49n2OI zBfNKhzMmF4*H#9HJun>)Mh2T5Pw=%rV%B<3r$Ya++bbStM^|{4DS`3K#1(_=I;ZV-% zq``<|0o#h6ST=RCy_CgqmDK+>CcybWz}50o*a=OazTK{Y-FX!InK!PN1~6?W5%nAI zkIazQr%`Z%KqL;B@6$kW@HAbSp=@9@UIjZf9ZxSm+H;dv$Di|;`{;|PI$g`E(8Imi z!03`+s_QMbEf%dQ0Y+edbfbQE+4wFU#y{K(Oj%+dbP3S>Xa;h z4PdJQy_vs~I$Wy=kkw~5>gwQX3#!_R=qxm9HJ{V{(ZYBkOqC{95{uHjBj9e{@uO;u zKm<-ZY(8OMRsa6-CkFHzw4(&1rTb|%isXn;&e8&>s2=Wa5OR&q(oc6>oiC@_Adj4_ zWLnmen0O$gedGTXZP+Ju@Tu$rZmrMgfMVE;>w?SP*tn7^F-BMI&%^BM)o2A{tt@Ic zAq2t)$BnvrdZZ7b9o21+#owSOugLr2<})Qvbk~TuylT%Vll?`K)||i*f@}s-i~tgr zwR1L{y?k*ApSY&HV$WXH2w_wo9zL{H3f0+o@>VNjlh7f7`T#Ym^yz?qEck#D5nj<} zQADy^moY|(hsDSiG7-F{`vM$P10SEr&lR!>OZ)P-Q^ELbwiI%SUlIrCVaVvT$yk|X z0X+5EYX1RHx1z@Cf0@%cx1LAh=Ttd9OwoqHrvslYCno0Y#*OOuwD!2$0~`WkQv7#nB9Ksz|U z)w!UQ(m`|2wg$pqXYXCMJU4lC#{&s%KYWcV)H5=tXgto74#jP>pyZ3Xc#LKtWHq@` z>}z}ovvy!xwFo;BGXNi8`?`*vE>xG*=YX^OtKa>vW+6n3n^}*|v3#Bho{o#WmJjQ1 zBGNaB3jx=2wCtt_=B>f=%!ari!tenNnR_eY08CN(G9DDz$6+b2cwDat`ciHd>C~ z>QmWsw5uXHg%}Z5kG9k=x$>F#Mm}6GvB2@Np@ed&7K4w_#1eJ@0{GD1 zF}azqF0{=+dZ4)v`R~9LZwp5Ot$2&opNLYXjS@PlEl4HTFR11Om)NNa{dE0n&F?G`voz}H;dG*m|nhr1E}>jG|rno>FRW)yHPhXl1we0%`{ zAt6vV6Y;uRt(~G>yR$)4#!Q32BiLO;FQX1>aNd1bcwXhW0Y zJU-WJu8leGJyfDnLh8(_|8#huX%;|K<$vWqJOBL38VibKi1(0S97u-k8*y6TbOZ;} zMW&s>%)T5?vioA~luLzQ%gFFrYK@6x_}3<<46C_OuR8r&P`Bbx2(@BpyG@j8=2GyS zkNfPPpq$ z*IyFH29`c(+Z258T^zAtTEatQVS%kwpzZhxJ9z&&n6X&#s;o2+_sOs3fl?nn4bYlN z?$1}3rQJMV_d~4+3le=*;Y%g=xw}=Z7^s`S5B%qmsT=oeErq!i=-_)d@l|sLL=}MG z0r2VE=u%up1Ep4vDltxaaJQjX+#eN9SsQ;zZ5R8JoFgK>u%dv}QjN}xbo1$&-j!uQ zQKRuv-s~8MFP$u%L`QwusKWrvz1srsa;AYf`jruDFiQ!2TW^(lh%DWCKlc{{e2Bk9 z#6sl`6n`K)C?Duh$UuX_O`iFEL`fUKTqH9n`nxMk{+y&w2*x1fbc5&E%&xL6GF^4T zg+t&om?xQE$CoY#mO3WR5L@@}L&B?8U^DR>tH=RPK`w$UJdeuf9^tLqPcz+w9eKrV zG&@u0+@jaWxuvXZ)TTTE;_=<&kg{Ujz^`9&`iK5<5QPYOI|?Z_FF4y2zy41*V7>;W zSx{i;@gr+%V{b4%9RGUl>L~0f$`DNPkwoFNh0+=20KM`O;I$giW_0me0U1zH!F7h^FaxHV|n7f5EWoZ zx!C=~AR8-_T0h3jiO=`!dZSH@!Ht;ldyf0xtokaP)04r@Y*Y%~fQ_ELY$0!{uLqUN{N zcrX%0qfBk=1$nL3hGN`~SZ|eJH@e`uuIT24%bcy$)a;a7Bu^rqnb1qrRv9tJu^Z(m z{nr^HE$0c&NEt+vVB)QaTl9bCgSv;?n6}`fZ?NxqW?=PSXaOV1X$9-4!#%>z8 z*~7AAzf8}UKcHKT6qso21 z_ST~6G;~YO{fcKv1vqj8UMya7n2Ki|(8xSwt)Dt^|3cy-X2f~bP+x`b=m+|hSbExWfyq$hN{ib1JQ&7h;F_|Huk8@#W zfRd}x?q%=cM-ykb)Dg^_VST9X)?B!G2 z>Eima?RcR{h0*N38^RSvQS916c@J6}xQxq*&I@-tmb)h@tq)-bv@mKi)d!+7NVm<8 za=AJ9HH4kjTAd4U`?a7kJ8AdgXm9*d>J95nZ`~ET8E74wZ4*uGZEDjIRTdMo$V^j% z%S{nVcP9NGKsRX^r@od8nn-b5E>mb(I1Y|*1Eybgz6fn5q|S|EO(z4t0}z?d7>`Q+ zt=GFR+wijXN9GEna0AhK-nyZodk?RcS@iyZ?~%b3$NguBS0&r_iq9Ui0+9#;T?(3~ zYs0-vCWMXswe5+J`(>y90EI;2K4j_~F8~-AIQ{Rpn{OWmg)=NO9`Hiy8nwY6mc?;y^TD}T(+O?;4H+hQuHj_Wcr&jY{GjXvz>n8 zM3%hh-;fosad9Z`IIQO8Bd+ut-z!gEwwzgU1p`?k+8x?Rz*C#`QVi38UWEMzcjtcq zG@;cKoZ6D;&m$<~`x)68_%q4l+d-IPE>II!(^~#==~sb=S{E^5w8t)6LCT(kb1mG} zddkC6>GB*fa!8z{J4DTI-J?fF%CN)N{?UtSibnd2d^e@ z;S-W}<2Mx0D05GbvEy}_{Qc9HH$5AL{PUPJMH|(LpSKU!3uc?{W%UOGyLjuHkdzj! z3e@rXFZqeQ!gvzK$N2Q_IeNsJsjwno;`Mmp2Xi|Ey@pVl<=;3eyYuyTNDvh<9?&KI zEfrl6{DFphbdL#;Edch>=pjzHgWwHT5bNPm{+e6=?@ldYx@YH>wwC-P8|Ud1iMleo zpiBntQHA2X|9BaZ%3>nyPtj{GrVVEadNwMC7T9yE%c(ivn$D|>(I?w%42Z0T{8PlP zlHACF1pOsjjkc(?G*?|9Q@$;wp|_y(SYZd*z@3oA`Dn7IUu3U*U=X_8+yIlTnFovK zxtUge{6^M7+8Kkiv02)Tvhlex_)C07j=P7?`-`NXY{-oAv}~UVNQRDTlud|aLW=oGjk2Gd)|KElHI<Pn!tW`&iqvNF|Vk<`!L#n?y9 z4GCp`{>9{ zuOm`JvBQjN@#Vv3hRYJGMa1V3P(vMBWo^H{rVja`LurFs7+a{#Yd>C8YFN3We#2W> zv0V}JSo{qB9$MXDag>Fm#OIce@wIv*#(3e^-q|&l>DXIIN2RSz!EP8}#~UNm0wNov zG@6&#V5TVsp$+?~`d3v{gbdb$FX7u3Q@w==oM*!fbhZs4HH2DNp~Aq* zN}4KCnfPqid_V0z3m_4ZW`y8c1{tJ0LzeW9ge;Aft)S8PRWCO((doCaNN3y zx7p#G83-V$34btUC+)sAS)Ax8Jt;0#<=YMgwl;iQOfct*65@>EGow0b0y&9(b-U*| ztOF_+59#Oz6Uov=crhp>if9>=>aY}iJKL+q9<2M*l2|qknfMb?ha{~egF`r9z*5z_ zlUHeBuRy$;M5}&vT>xGi;005a%=ez8{5`nP&GwR_mUX`Tye9JLczk8TB)cWR8ntR&8WJEMfIbl3`P!C1XpFQYh_h|YoQGYM zA5Sft+|rW9lL$_9QGYnf>mNMc60g>=&9u&P&F6_3FhVMfx)qlVBQGS_Um`oBWVh(jw>?-CE0rP(G$^8>|N zWowLdHre4Dj3khX!*hktC1BK~uQGG&b6~nr5Q!YR_t9kk6pRb(M%e4R$93}=nEhL9 z|20iBo@z-?Q)#lmSX^SIrQ|5v@~~w<8<#1%`qa3d8V!k$AWFP;Ix#ViPEkUXd2hQ_Bc>b~If?`}G=zYRqk3c?|W!r{uEv z${{?Jyabx-EbooXjVj0mu1*JT1TSL@W>pRK~
tZ0C2Y%QOnYBdwM{vH1iMmNB6)3ef+@vuAFRj zW&xWf%+KeJqRtgObL+i!fsCF5@1#K3PwX_Io0li$6hBnbEPspal{uEDS@Abqvo2;W zO<8cyebkYpSR$cqHj$*f#-6T40nxGk5WxRlzK+Y87A`;5%1M;IvdGg?oR53;f^iIv z&W!RDK^Y~>@xzi?K{2D@h4A(-pQ=U?-vO91{P07}#F!QTidJ6F`78rbejLC;Gw=qE zE|SvxD@H_~k{ex;5@#H&&Gagf(H;IngFsIHSG-HZ&hAy@q*nO(6gSfaKyIs)aO7}d zf}H1F-`Db)?7Ax0=4LRqk7)T8>gFf$%8Qv>Y&HTE6P*^17TdeK-n5tjNPyD7YLec( zR&s;cOaDpVLirwIMCyj9*R~*#m}az`(0v%FkC#5<^Jz0-)tvY zA<4lkY@9TgHNOJ^r!O!caQ&%8{GqG;c~y$lKRsK-R}1@D;JnSwONig&9m>j`vMG{E zu?zf^Fpix}~7g%DynS18BMQR4+ca@6;^hF-Vni%Y9#46`C6)FgHQc%WH# zFTIplF@4~)Nox$k`RGt2>;OSn3~@upH*q*JFZFB8e#_v$0fA&X0#*YX_~)-|5G{WJ zxOnU1iUm~oksB|drkY*@ngk7CGNKz=+cOcB{cHkOP{1hN&nY`0mc00uiOndU_Zk)D zO>nzHr9r$9VpEg*OCW{E&V)Ed{;@@PB(+cq==ULK!J@09+Ce&n6uEPs9%(n2fb}~w z(%{?=&*}0hWB(_~Whf3&JtZz*tce7Dun@VF;G5X$#l~KL`~?7N92lb$u^FcV%BC^l z`{Mb(ip>A#+8?HV{5&ppTuJ;onvO;S<&f`7;6GmTe}2srZj_!0r2i3@ayZq$0F)VB($_uYLCV(op zh(h1g#9bwYM;CWYC0HrI{rJ zL$sBX=8fs$4LcgyrpWr<5RokJp3ABz5W+J2ap-w(+vAZ<`_8!O z@l~ngT%#2^1r#s6zqJuAAFXOHjTTi=Oc--wD2nT+f-?Z*%v8{bt7Bo&OyW@77jpU~ zVwhdP-3QZ5O~KKgg1|~XE_j67VAFMxSb?%tOew(rJ4{SMiFl@*UBJNaZJx&}vR@C) zNIKb<@}IATFvSnUWw6Up;M)G}b6&$Yr!m$h zC`!qO@Q{0wjmF^!76WY+Za;z^{+)bf|MjE9$ktXa+GnhoQZrxvV3xZzckBylzhFU= zC4nsf^<8s52>vWSg+-W`s&Zt`&3eeEIP<$ixFOmnx43j;t(F5<%2o%a zwa**;MzI#(po={1tUe{()A@~jNV8L(Uc-o%L`rozs*rGc)$=m7va$`n7aUfa4W=+= zEIC~~lWTQuljQ&z<1=%9-`>+u1ptMRf;>I%iz+c+aBe&7iCbUaw6hp#A#Gjry~8ei zd?;c{-0rN0eVwyFg|b>ggW6B!G|gu+SUruk{wwg(FKGYCrRXNT-T?2*PB>A5U4il_ zflM6}Z3}@0ykMa%`qPTQ#OddoQhz~@1EvQ40Gz3N8n4~;XhOWC$VvR=!@xT$51%(Y z$8HbCJ!PtC@G58|j>K;Gobk*j8y*HvDQS$4srjpdNFCiJV1Z9o# zxc+(Y3l&9V!1p-dQ6ZPtL{u^w6hX<}Bq+q}i zbzBbfqlirxy%(D~%7fLv(kG4XGZNRQGHbipNLUf0Af^qjP}a*B5)<92WL2U_{WcR} zdhWt1pLFpHhK?c6;u`)*Cx4&jo1>)g-m79dE$)Z0CxIWj3BQ-6ms-TVJDZN?>Yeoa zEzEUR5sB(m=Jv2}a+}Et4J|VD)H2cAm29p&oGxlnCU!;?q8fHg-35w?p2ET9=`NL` z8@qCQ$q;c4WK-a@`)YPPakR{_0u4h-nl5I{Xp|z?S_&Gq_tQ92;<32W$2}P`Nj(Dg zKVp7gGaU2E`(txjvX}Onqb%?b9DQr4rH0o*fi78?PvE9=l`LphY0+ejLbyv+N18(h zUT$qX@sBji6o(P24(&(!bYAX=3M*+v&)$<%*&ERG*-S|O#20KV9AV`5SKJ$?zko8_*y)A{>%kvw_Rp>=7Iav}k3awT$M zo!g|jck(mK@&q=FN+v|fi%ZEJ#ru7mWC~NlRl}OMI9GeWd~R!L4$@E0rdmlp-1*d9 z2d)kbNE3bP-Z&%O-y`U!&5JE0QmE#n`5i;5NjQ!IMhz-sQD9YLg2-R39#6m&$fO%H-r+!U_mhcRG>C8ZH# zqR4_2S0SN&&3cG%oYaK`Hmn60d)0V~y11EX4$}WJ*6(b4nZT1a1hH0B==d#%jY1zU z6iM3YC%^uqf_dF;ls}TL7)>Q9r16%7ofQQT=L$oo7zm}#y`1v>NZC5ywiq#VG#Yoc z^3hWnJ(5Y;Xua3#EwBb?r^98!XaZMp3akUwibeST*?(p6A)&LtWLT1#peN6bB0k@L zx6fH&l<9SGe&62CWQAT5kJ9t5Ly6iBx#`&c0kD9vYEHkSQoW(_c2HigJkGnfojQ))q$8?{<5FAvXr?Ve_Rh|soMD57dWMX4X~1N(o!U%?i|B!QFB?+h7aQ*`iRwd@ z-h}ya^3>$AN~p0#bulQANjK4b(Px``W;h^xY`S?+KRvaXQx0^E%_1L8$mcZp{DspnK&ZNMvc`X zeudxiUAXyp;u=O9Q?WaqWEBH**HemAVOM0L=;2+}h~dMz zl&Q`lSN5hHEGZ+2#a4+QAg`HLf~jSo2SQMt90S|^z*N_N0lg5WO}-`vV6WF?F!U9H zr2n4#UH~TDEoK;U9&W->2vM~->vEg4piv_2MI@COxOyAh-5EMPCX*|}x4a^r`bM5! z8?)Ml-o>+fg2P0kE~#wfy(L-=ta|m)*r?oQm1b%OucwBK#lwl!lQG{~l}zE6yr-%F zS&*M$*_-Vf`@K4f&xOscGi7e-6m2`j5We`hp6CjiZ7P~7w*$7$yIjeX#->>E!jfK# zsSi8_pW}F)%nQmZ>zn)87f$Tx(KkV2!D%1D-^yhqRRfCU&?xpPlmpeMY)+24rj8Em zKQ5KJuulyJ_E!U>S;>G&eWX&`q``O-wCcM}E=MJM*o665d&Br#mh`Zzn|+`6h`fH5 zGfG@#b`BFBq7SUEd+`8@OV9K=kA0i)YaPE?^H!qt?>HmK;89Xc;UeFaZy+*RV8@p( zMM(h?1cy=WqF2_LoUAaI1cOhI`K>f@`==~Jz@*S>o$wg$#>Y1mX4-U`lWu55Xoy@} zVJolM!@&54%5KE>mh_Si5>cKWhY`r;zWIfGT1T8*pk*>8(ACPQrPm0k@xzYXi7q8I z0jj3;{uyKAFGjn4Y|0`=>mAT1XN;_ri9@!Df)=XIQu~|s^k$%3e(}lh=c*4Wsqs5y zZB#4bdHt+RpJ^a0;?X8w#gS*0k{>edEUOXw%yb^hnYodhFZ++dE}Ac{n3MQYLvy;O z@z^)4$XVZ;gX70wStCcThO=l0_gs|eF#z^B%0>pZPHdkZ3$$u8je9NSJe@#5^)4_;$L`0@j z6dm$alxE?{C8Q=UJ~Jmm7diW%^m#MHf~N3keem?1X96KRmf54wo?GLpV53G6S{Vid z66R%fzf1LDiljajS42^g@&gEJqPTl^vAd`?s+O8;1!#%&+`EhvR;ZA3#p=iw`3ShH zQv-@+Vb|at0mqQcwpK}FXrPMC?f~MCEV>hf?S|;?^VKrtF8nlEU{LUMo>w7uPHOdqFiyReD^EJC zAytxh!X7kRZw0mpMS~Y&tzNf!h2>9S@y(Y-Jx*T%nOYS(M`7gxGjwH!*U|JF;^^(| z$9Lw={Ilh;`6Z_f;=F?WR^O~t4d3&eOLB!)Te7^3tL6Ox-eH_&2|49|%LsEL0x1-Y z^3cgdtqboU`Cyz*qmO7dAn`lPUJ^;+5=%SVYRZ8hh5Au@&u?d6`?anftg8vf=j-ZJ zcGXsi|P?*9Nf+$Wc4pHzrjVb~*;qKShhtJeOvHclW z7Wt}vvzaXp`4>IrdeF*Rx4a>k?DR9P&#`*O$Z%uRlO;*7TUX-7A(5m3_m+S}4^7^+ zmDg4+oXy&RW_oTW37p9GA4ZD<#mK_ux^_{EcCkoHNqURTO5aiSr{EY)y0~Dw1f}=}mPPru z1AMefl&J@xaJ7e0fnW^brga`h^b*sTp$%gO0zH35>C0PC*A4Vv7FG6gW6LK1m5{!L zBz&)n71Un1KjTPb3r5_4D{Kbrm>)jP+zBU$UH(V+y$4$n?FEA7&-+$k>XhqN`abIi zwW>(=3A?cm_JNe&$%Lqe->M{etOae3q7EFC*^>%csmC`{H*Rn7N8C|qZ(rfBLq&9u za7KZmWKPOczM|c3ME7t$z~{U-@$p^a#7`CHUwq>ZjTW!}U1 z34#e&3b(rnWH)~S_=tO-x!TCwhHLx#W29vwV3!pw`37*byU$MMRRG)m$oW(20*F6G z7PxiYeDrV&3KrVD+*RNEJ&D`vdv{Cd6!1Nw+5|3BKTvC5i&~i*YesY0_CRfV7y083 zc81YX6{?>u{SJ)UQ29Xv^XE4Yjs?sbnSoaE3i#)9ZP;|HUOt>938oX(`@-kh;-3Jg z*QpwxGgno&^~1xzPO5CwVqkipDG`mm9-Jlh9w1<9)VV*;!`NgPo>7>ARl%jM-8yQsQ3^%7S%lNXrLJ`(TrPNbzHRD$Md$ zaMIqnECXaL4eQ!I?F(Ro7qiOrg#j3}A(GB>v)w-rT3s`ZO(~UR!J5Je9&ERh7iq4) za$uv$334o>D&Z2S30pQh{ao4Ih;`dD*)!3&!2(XJ<=;0M7VbOE$xAbqx$*^dwejy^ zk=e|RWr5pY#?@yOvRiIp#(LDYMW?KHLm`TiFgBfH#}vOCNfQ2!--2FP*PML@>S85U z48V1b1phYy7R5p81th1#QW4mlW?EIc3_)l`?}w3L`-2#jb#7dDbXQYDyElln4$d5}A5NYv1ge~{ zNAw}@>+m*(5)i2;~<-WY?EXVYX8j48UKK&iHDKcPv zO2(^eTuDXp$^PBc83?8hc&*y0Y-RhGf&zpXKC1c>2`jY-f?n7tGhb z{$Vbblq+Q;&0?oWpp7gn@^aPZ*^p8-1}~EC&u8FrBy)tw<%METugRIqZAipdVxls)jq zdl6~QF0QZV!rt$mudbcOl4YL>X?%DHpwcik^8av)mD#kNRaU)i&I$_Zk>_ayzQVje zz}b*rURrpC&{zz!Ml0QNDs0u9WJ#`k2+Z3=OoBKa_lNX7B&vD&Y#r@ZyiZuQq`)yC z*H3hth*i}xIJQTs)hisP{5FLo3j zk$;y)nw0aI84@Wi&76mV?H<9olO(NYWA}ZfP4v(GdC&debfn%-Q_L5S zD?XHg-@odjKbIX%$U0GzuV<*)l6hu+l872EE~;?a`4ijfF~Q&Or4}GlO=5}@hrGo= zzQ8}IyRnET%KZ^C0x0wS?3w!HoB!Du`R`lBcB3FB@h|2(D$kKtFLGc?SPqO_$Xwps zS#wfW>pe!-%Y0`^YD_UqB*9|-L79^fZ)Bso&ZYi7@{Z3=62ftDypn<6pE+>6*oWj!doiJ5_!@q}A3AiRA19xD@QPwIer+Q7vH3GkEvqZa4|#Ms zmmK#n@}ryx@&2tc7#&bJiJ-!?JEmPix15>;arWi7pPL!m^;&*6ZdH0%T(XM|#C(6? zuwMG}LsD&2PQpn;w$AXRf+|SgSlztX{;8$^-Oowmg=wBFiO_qEx4VA4*!h_P;eEECqMG}`rD+t{2@GZpNue5vjop}fZ~V9^pX5Z0n4*~Df!^!(wsUU z&5DX*^mYYtj-Pm^m=IF{ep5M|mQzsz?qFwqpLc=u*P|BN?cCLbC|e{v|8c7iCV$GR zwycx{#_#_H_yvonUezM2brlCgW@19V@OnEPqGHQC!H*z@dzr@Phl>fxn7vrQ%br{i z@r$9;=WsLAz3q4?*-h}`Nzwk113Rg3(%X`MxG5i$4;ZJ^LBVX=E)|)70R!{Mk(v}Q z)GW~lx1%y&p7oq_8s-vP>Yo3^+yB=JZU5@8WjnIYbepO1sG(_qzko{d+vyimQ~Rml z@RI>B9WrD8X;d^oSuvqHdL&QE&0r$WPc2M6gONp)W%A%MfYcHUg8Bt>{sLyVe<|sX za3qJgcJX8LK@?y7UP^zrNuJa=0QvyPh&BEPQk>S0%lk~I!`kJ?i~#^@uaB+q43kGe z@>Fcid-E@#y`+;7ILUCV5x20kaKD;rGhd2{`BIK?ecs1*u5h2dwYx5<26f20yu4^N zMv&EVGj@A^VC9{~^UzhoG?<+V0eQ4o7|W=iWc!}1Lyi`v(XZMb3*q7Dh@0#z#cFoML_Fz`A^m;zuO_D2SLx87H&&mET z*TK+`5t<@AN1y8%O`+4*m%{#SVn?ruOec+Dcf`Muoux|P8si&_J0dT-v*Kwn_<-+V zubHsgpY!UzJ<*<~>xXB}mXu-K$w|SIM_S^^`5S~_=P#icBKw}tmzCNLqV%}-UB9K( zI`#@6frc-kQV%ww?E%B6PxM-(L$c*WO8*->^nbX4^IPHf|rH*z+Q`t4o?>Eu6Bt7&mtQ@vhfR|ddwK8 z2?`^epS&L>kPN`l-$Q1}0)c_Z!9FWyZ4-)Ik$@u?68@!HFzxl}`}xj=Z@Co&chN>`AJ=j-!$x zC!fi_#fWZRxk?3v%PiAXT8Ib-?R%MHqf>dh&~lFp-eS7T#if9!Xllp^7KM-lOu}{lT(1%T1<)0dn4Vl<>`_zU ztyboHw>^06L0X!Gi?w9l+uyuI$&{n2QPSn�*qP(OE-A$aLD-j_-Qfa?zo8AzeF) zB(Ia22b;}5q&Zfkm{6aPE?_6du)%u@6l!tOX+;epb+4$5dmN8N7k|_N?o^BpJLI34l_YAuEPTzYIj zSHUv4tvtwU-qy~S=@<_%2ySez45E`D&@p^K+}YPD6zRzH0$UQg+OQ&x1L%7ibE?FH zL0{B;9zL^do^N*T-tG(8)7cJHcf`#>Q!gSECLt&!7)Ji(Ba+??0?~oask)kA?yP?C zT+GhE?0)~iK>z)E3kr%9jC7jNg@8Aq4il+jg^SRRS&j@*eEY&*gKb6TmM7+2bbURC zf?}APC*!yKu0+Xm!nc1g4v2?Ruqnhx7ENiU6racpe+gK3HuN?$5KnF7j;}eqde-K{ z>KOSH!G#`{YF~&JkHq?W&wOuuI=kN{w7n$x^)O~!nPz|Bq+IohuWr9UpiAFQE^yKf;E9lFuysQpMOjVc9Pm&`GQY}n^ zi#Wbm81*5xmwm^T9m!$pDJRG8EQbk7cfeYd?ucZ2OmjG)AZ9|HiJ;)^0Ni-S1J;nl zZct#BzkL-jM^Prf@IC6n(v{N!uLaXzfD1c|&?i&#!hyn`tOlYqiDmtOW@M%m6n^`V zQEYm9Hn?VY9I2o-d`RB84>ndVw}BaHM5F;64J8lV>5V!>zpt+d{sp8iq?l`<=amEZ z@v_Tb!RQx{G4AXP>hWR9-N(X>+D;A|FJW@Xqszaw0__uJnfO;L`)segPoH)4}BlBI@CqO`bo3JrL(xmO2%7xs~-h8J^t zo%9Bj@MqwBeQ@)fK082(mAQSr{-7~kw!-F*hwz>5we!=~S490^0Je$|^X3nt8Rf`f zlSIz?#__Sf*SO`TE(kS-%->*fg_V}QqP6E}uOu%@Ep0?MG>agT(9gD|n4y^;G_pRO zuV#%(>`~8fdn)~i|1h#Jib8t>a@ibcI=j7ximJ&vHz93az;f6BBb)lK{}jhsBRuL9 zmZG=rjn(AJ1n%|kuJ|l^l+5&)In006jaOqcx-_(bpr`rxHNJdj21u}gNdTo` z6m)TYTT&>tO=0}@VdzV-uB47UrX)khF()hdNsZOLi&ZaK5g>ffkO}pEa$Wtp^0bsG z-_e9vE`@+}Gq>H^7}E5dqC81=N%y09m< zgn4(qAe(1NXJ+c===z*#X7tjs#5a<1SiNM7a%riG?ePYpp|9je+(;*m#AC@ zyoYKKsA~kBZja)}=+|%Cu~PxZsbP>-6#T*(($2oRf;L z-kEpFo9ZB;Eke!#t@nZL3kjjezxCT!Y3LDS97L0D`9PXsbj0Lo-Q$rLsmD=V1uQ>Ol=i(`lUI-FpvlUc6gaw}doWYK5xe;d=-l*DngACD znBspNohi29*39#weKWY}G)U0{YSwc<%emOxY2J82sZSn%W9g$AP7Xu}4;#wHEX7$Wrv5HN`9Z2o}rnroBbNi_rq=?II@? zA5S`dt!5aT1oM5(L;QaW*1<1-cGG~ofyiU?FCfNy4jE;cQag!L1~wRUP4E2$C^2t+ z`ywzqyFK~_8A4o|2)qe>K^JZ`SUA8p;d$MZ9;_djV zVp`@>liB&z+-KvB%v0~z@f3N}O18f|!3SHK)>}e)H@mKnHG$r1F%ol{s_{Ib8V}>C z&m1|9=7u@ctzK7lzoQ)42Q{2G9T1PrefquXF?=qI>bY+GSpK@|i^#%u=Kz&$vt_$`1{hvRH6v#&5(YR_qUi4S;oEI3W z6)R?IJ4!o6nR_U4xGwbYay543dR<5P;pB!d6Qr)zYuy^H`Or7_W(12iS-ssIc)Mli z-fYP%6waioGIFxi*KSMuhxFLEqQi>!3CzhrR;61OFmzz?r_H{~;!Pl9sbg!m?_}?j zpGAa4^d^X!%x5i@ytpul@vVgHlG{r$YP}AfsyVppY+Az91~#@+b}_A;`Cai7nlXv@ zUqHloP!`PEYG}yI%Yl;JFt&wwxCHiyeaV zh(Rb6G{=7dUBV$B=61xO`Sd+rn?1Ao{O$Bd4kfvcN{u#~4O`sLa>{gt+H^ZlOtM>I zS=a`CM+6fluTd)=nY<<1*mKXnVX=e*;?|yh<_dKf9;{tD7#@{O*XNd%Wsy`sK{e}* z_{Uq7L8BinxT$rdDX)mXS{hT{!<$B>sfVI_4!?J_DY=A~pxJjtmu#hX%xRy%!+XU~9fRRS!lqy_q z*&p{KS^B`ieJm(;WL29ryyW#BtO_6wYd02`Qw74TSr#5zua<5f%-`raS9T$Ag*FdK zZ#Iv{I~wXE^g`7rnJuMfM7y=}L9fyt<|Y)jPy7Dpd4p3aR`x$bNGD|!QE zxIr$6`#?>Ljz4&B1MvEphoy$L_%8NEpJKhoTFAY5y(<%kq#9n$;1{EX>6&MXsIW@= zgv-?--$GvXlBk1~SsLNm)ET|^m*KRY)iW)G@{o7|i1gd>tLjU9oM$yG zkg>2{&}#AQcIy|v)qZ_NSZ|RJ8fW~WBt|S^Q!BOY+2+X@t1!6gpH7ARvx$*^?H<0& z_$Xb!QCyq|t4fy`hF5{Y?Y+H>Q8`eZn>@MC-x(7YY3{6z@DqrQw zbtBv$z)61bGb86#PD-fpW3lEfQ&1=Uc$SaIS7Q5{6oL|UN*FqptGoqTj-5~}(|LVG z2(PtQBpOMUyx$F^U?S#|-|Hcwvza{e$JFmv~y9{8R@0+HAuLki3*{<<|UAXy)fV)J6_7)_cE#YrpZL z_Ugm4wb^nu+)<&mxSyTkfj(j$)wUK4*m^1JXp34_h;u4agTmy5Xi^>D7l9ph#>;HQ zd+rOG3?${AVK(1ri||E|>A*v}ZkdVq)i3V<)U4Rmy4&$=m0Xtj<@^2=n)f$z-!W_oP35%N{)Jb1|qvz4T;19s|DFU&EF}zqAUBEe%|DA{Nfeh{F>X z%al=_=SIH;;*DQCsvL|{+Bwrnu+FX^tnGd7?t2M3F*!JB5?2VJzsf(cGkW()7lEmGm;s+`~ULtYzLh%Th5UtO7TAnN~Av$(0$_CxikRt&Rz? z$S?r_u+z-$0M~lbUTaeN{yq z|N7`R9@ajh7cHDaEZ_W>?=us5eKvk>T4aH}Kq!lM$O!wO$Qk?fNM6Dxo^V$JQvHXQKMw9xbu^96!>h)R~5V z_yRqN%d2(u**y-picq!6Ru^^{9PKJwXb~7e;Je3Zgu8amND$djo;WP(KP+$ug!#rn zd?b#_jrv+t4$FG!=&wMWs~0p`HS+Qb;be`7?ZCq0G4;8mPddkn9fecl!}NiMUt26= zL1|Q}c+|k_wLoJilro#VDG4o~!tJsD$;CRA3T$n*X{>dB9)l#p_MhUSi0g>?Fo3-` zgOJeh6U4L}okZ;9z{#yuxX^EgktVUR6+e)s%Nt)}dp&m?{P?dgRj{Q=({!6e3!4%^ zK4;^r>QcG@!1fghV)y6q<^luljgDBE%0xS2fgyP{7}Sp9s#$nAq?*7|%H$&zK;TCu zcQNXZE4W73wcAO~oIUJN-xKt3yR!6sN|%{L6?Zg1fAArUzZLr4_{Qalm$waiquX<6 zIGzyEzxL;*yw<5WkVbkH~68W`yQX==`+1bjZ=s&Az z{5$T&c6(FqVIIhvs=EGM9GIsO-+dHY@xW50^3Q$!vSJ+`69o6slz5tYg+adr?GP`d z+}9Y&rPrdN>F5y0Le-hX+ekc1tle#x z-4pz#Gki|ZDv#1?zw&Niuh@CG#FUGy7LJ6kgz&W5un7D?G{Q#pOLtn~!z7R{IXQZ% zwZuyJx1YOfOdI-~#)P3i-xyF=Xkp>hV@!R+%u5@#b zYYatXa$^q@W%u$iq#CE1PcF*n2xD7r4NF1^z7pe@TFvgCo1GO<-KX*P7-0@SU2A!% z9SXAHq0x-G`n!!ERK(^=N^u0o0Z{7b+M`cMzJ>p`{DQ%By%i90>+9k!@KNk=zhJ&_ z@2;M<$L#MM%5(!MQ!8C*c%0n&S0w5 zl$Di$cTu@hqKiamuU4J4SjfrLTk2|M^cZ$qvP9a3Hb(&MyP0yW1H&(xF^s&fk?7Js z&@|-_VtS=?L!_)FcO{7o+?=#|MY(Q5#>|{7`vokoC}8iHf^w6etX_Z|)?cJ`2SMsv zgB3)mnql5Y)Ho3pH3lX5lVwp;C@gMG&$_N#?%X0B@HIuH;LNXc-LSBC?tJibFX;23 zG7f1?2ix3N@EDI&9!{JkOfi&G$<3kbbN`FoWB;gd?CaI5OJe#sC>!i`?ljH#<035R8SjLG8(T7N(4`JUiidZJ2qiWbb4kDTFLwLR^ zmS}E~E*P&({l(>ew7xHXeg13fnZLb*6amNgk2ul4ha$+Xz9vgBZrw--ToG@ZA5dFA zaflN;R(Rlkh*{cO4qFs#!EeRyi>hox^Kq}|@(SsEk^KEsoyu6n0Fl%P#eY>1`4 zfCSLi69P@#dAHS?lhaDDe{waWcVfB6jdt=b{dx0cb`%m!kkR`WP(*i8pHp9HEfjmA*a`Q3d4xoll}qJDy`*H{e0k}T!0ok239L#P22e*q{YTSKyJRdG2Q zk}V`6AKe>{`4-Nwd3?BCf3x*Xkks%FV!$W^Bw`e)v38HeIuDp-wnkVLe)CKRXOhTaWVqLSToiKJ_T5q6El z!+TMtv(+Wsx&eIih<@)NK|{h4P2uw~dN3N(U_DZk#>}KYFAGK`c4+L#qxNr3jlD1B zy_iFwr%FQ8t8M6*>gBSeG@SjG67862ZBn&2>S)v^aAM7Dc2#k63 z4xd`<_pZ-mM3l!ErwMg+HBo_|h6|I?2OolfF5|nqw>B`bbf*z%Csj!YM^1cnE{FFi z=l9$fEvEuB^jdv#iVP*D%VA%RAqQPfj!)AVv4MiZXACJ@5dz6Ja*(5BX{2lnExo@ z;PZ5ULSb`5dXv(3dEmNvmS*oV>BA;trwD{&!YEOGFATjAXNso{oESh0kk&CKUURF) zqKSlS47qIuL-L7P=%Q0?)@1p5S+y*K(txk>iv%yK@%{vS@tb>i*Ma{M3^{ZEVf};m z%6j=ygoMya(e{F@)kjkkMR!E68N;3|i-sye>ZJnMO|Od5$P%K2Ia^e1(X| zxVnD2=+7M$dIRoP@c=29n$hZhhT(4COaT_WEbq)R1*sL-yx-OY-uz zu9gJ(Ihokg2|XNpqc4^|joR$8+q-gHLW}odpH;dmn z@j7V289wutlp8U@BJp%{V*laWtNU{sCz9XL=Op#DCx=ds1OQtelr=GQt^zy_sE=cZ zSVq;15I3Wi9&C@#vXl_#@jZL5Wey)$WpuI`U&RifEuM}(VzhUbsdXqXBpLr*%E_7* z*~zLgQ2H!i7&Ng2Qv}%0CAP#0&n~&eD)rbPakt8~tJ7IWXa>1`(}$}Mjsh;P2@783 z(*b@=JN*T`qTDOLcKRBQHT#q=Ce412oj)N;`4Tc&D3uYIX{UaUzuzfHJ78S>9V|7n zkQXe!t09_w+9hGcq_>|5vU+R$wWzdrEz$GB^D2CDIzPqrdGyS2z^eLbq|^G`cirP! zCz(#jz&ObMcB~Pt$HEHo8u{gy?_I3H)A$pB|=eC~b2V=+Y zuAQPF;W>o%C>Qfb(|7MFZ)$HW+NRjvbNn3rcv-+T{yO4U))ZZouZ3r&i`n=C0D&tK zixS3b@wWA6E}I+&b$|a-`|`#|m=!A()|?i`wI6ACo7it+0Y`0`#z2T4$+E(nihRpL zM}ea7c}H&)aNC=GUTE|4k|klXDOAGoc+-3^c1^|&#oAFj&;rlv#-F#re(k_r@k84O z&6+X&h34G>psR?u7@lZr(74z_Mp}4)%3v%q9ObCyG)lR@{`&ccFSzA8W-qVNt;s)1 ztnAQE{(Nl2jCbiVp3g>-J6W<*OkyiWaK@p4Nl?|9thG*LA9cB5QRhoeiU4QrySu>a z$dYlDT9LrYBgV?W^1)X+l6zIv_w9nLQf_PUOA*p|RGriwZ(Erjv>=LBqoW?%fdTKx zDQ@yIDHln)etc);%3-`zzzh~S_jb-*_j&zEDN#j=sKcBwu6{*A8-qEPVv=~#%A@$C_ zp&d;pJ<@0VVHGg*J%k!ldGLPLbIi#VF>T-QtkTyKFOyl{LcHe9st2$0DuF901?R8` zer)E`K-v@oO(hW4y99}GcI9WyN$o#uT~R-p3A3KPQ;_=nniyC15q*i|8V#JI)ZAvf z^|t@1J zrSe|g&wOVt8n6=*O;b4+-+bzPeD|8Z22%{_)qDJJRMKPk;{e|G0pnGVP(!{isysqp zZRvTrC|!MkcyrnJ_kQ>LkmOaUn%keB)8H>bW$$Au9>9m~uyyOPvX}u9)IUSO zuwSTq+FuFT!&6@oFek_xoEE`jpHF<V@nIV`vR@UrVR$iL}=d`)}B#qrjPDeoig+NmDV@!Ivp zZl;U+!Wd>+BECXI$=Jzbz9I^1z<{r1VpDA`PL$$@-=(-|*6gi{dane235x75edn@9 z(n9_M7N+D?P-+~^7%!I+o3}ioxU@`4ZWeJVV5kXLlnE%iozRxnmofe;oBzUkqd$6+M6#hF9Bg1=D%PMU!s(8GViw+2C2WnJw_- z-ZGIWB$v)@M1W~b@{993cFYGO79NA&g}pVo8@hB43MSw@k+uydPdO9?oE(59l!x?j z)jK@B#GpZec+@c?<7RMipQwQ6XF?4rAy6J0*iU?oQh51^X&>Zg$&&U~z(p=DiN2eV z?Oh#eJoN{as)%A&jW}eH(y{Z9zeCi&5??*g8dzzhppl!5X=!!2sVj2$J^SSxBLT0s zs<<0vH0j-d$uVf#+mTAhlr>}TSDemOLpzi?Z580kz_6+>z`lS~8R?%}7POg>?)?fm z^c#BH(~Y}1PSu;V^(^NGBb)YOjWVwEbW$fr=rqQ`s)-_|agV@i)ESFaes*Bpsmk5E zv6_O{+1kn?IDg`^mGFk{h6niDds*xV6u0e993)kEM}E<|2PIQ)zJ0u^eWRBwLHC=s zsJ6;^<@y;5qcV%Ut||k4)@sK}H-E#TU187edmD#CAaS<0BgpbYh19m(o9f<_S^g{3 z50yY3xKF#9#uvxg6Sc!#71KJHfqryJfp0{S?M_Cno+U9#8D%msrZ7Vho|ZZ7uc%&| z9@Jcg`rm3hub?KNC=CarG(oze0s#^bLPS7{)X;mEl8B)6CPbPvDbj0z&_#+8Lo^9}8mGw!l0 zJB|A}@DRulm1ualQZ$W!@jFh%u>Mo4kJT2>-7l|^YU7)k; zfN$u5r9H7ksY#JM{dw9Y?QwCe2~|MyAw({!{{UFYl@$c!@+pP#p!z+OQg*n%)2=w$T;wjy)Ou2;K z)D|oML=uiWntM_>G{sei%h;O1B{(Ny0TUc2LWo!H@7=jXCtn_g3WuGha?Ht-&qt*5Z^ymh5%e{3( z%nQkzg)Iwq@D_`>He9~D@vd29 z{u%k`T7L)^2X9JL=grqoMw{(~1U+`qt4QZ6#Rsz5R!e-`!53?JW}FQ5+wRmUG#nhz zj@M0rG1znlaoKs)Bh-&BS!%@=`@{yF{REAoL87`zqtzx185nJI!bpZ?A;Tkxue?gw zw=s2BXBbFyoLZ`fYN>)zCFHr>GY6gkzwHL%0+&lB%IPlm-wSH6Cb4@-`CCq{qlf*4z=Mv+Ii(}gTLKa#XrldO2zY!bjO9yj#$YQ)sX14`9j6lm>#c{ zW_DLREq!S|r#_Ev0`3N5J&=Dh_4Au`$YEgM^;L_Ccjm&g8zVk}fuC6-lL8yo}US^QY;C6elH|_n{!x+EO zmp~YbDQUC1+6T>1l$4oZKjLemX3=F(Keet(CzokU38Kn)P(*L6H|m-?R`dcdcrM15 zgV;#=$I8@r-D4s2bty$pVMFBez^d0#AE8aPmv%8m=$wAZX172qf2+xdGF=EcPTj3K za=Htok?bBL{6-H>ZhQ!x##RVy+Wt(O5H%GwmD8_^I4R^*`pQ;kXzZcbQ&qzNuPs)Y zQ>H>JLivJ}oWH4}ivv%ZHeSOtO)e2S2U2I?k0|E@rD={^|3|VA9YPTu$nX4t`t9{D zeW=XROZU<^?icPt7_C$`ox^-mWJy@BiVV~tN)1xy`ED{^Y^{GUXgeL4MEr4&%H@lyly9z%Xwlf)vq(+6F#E!R%rrsEhY=74IKVDA0oR#mqoRsJg>IrT5^_?t+K}z6MlHxr_MkPx!n6mnu@^^S9$& z21+tMn2uo?_jl%%+uy84y-&NJRXL2*-+KP46Le!Zf@P7%65i5L-k%NT=k4-M)Dwk5 zD36yQQ8W;FVj!Eh^3rnI6ZK5rCRQp*x2$L6&u@o-DjkcE7a?7qUQZr=*A@B3!~E27 z2`8PQdHIh22*jM^eKS{ifCaMqeW1Py9wh-~jfK8^*jdJ}h{*V8j5bmBSI~N3nD5vX z+O9RLbH@bWKOb(LBJeRpXkxaPN(IAFcJ3vyKktO@|F}HNscLtVKjH79@6B%+N6e>i zFQ?tQ6k&bjz316DRg0J-?@gdw`8zgq5BKyO&5|89f+ zDvC;vt1E^K3tXyihg9b_8Lh+A#3%&IH(8L}mCU+$M8RqT-!H?Yo|ck2{KUkat3@x} zVKGqzF;?xEoR7mDAEyW2+M25?c4qyq%ZlpA_y-VF`IK&;Wo{Bv^f~}62X(|PJ8jif zjzdgGCdKto2JtSIs7nRuE8i?tREc#n;+QCPrjFPF`(K`yJ^Vi(ugZ59rAT1#9^Bqnn$aVy$ELTEv$;YH+QCgw7|g$g0VP{n z;{yL|30OudB*!gzZw9P|Ida@;i=F)lY5$IJ#k1q|@3C2>fr%EC9wEpq z|I23TEB*5NKY{^_abI88mxmsjTyA_MdY*Qgzjc@gc%Wwy;H9t=`FB# zKc%8OWM}D$Hoh+gf&ic>zNxiAQ@pJknR76+&s=|GLE_M1-O#39d+q~cC{s&GW$UFy zYk!k6lE?lh40f5p+qHONc}LPZfTo5}0SE7o9Z0UNpw?4_g=3_DmOj*BuS7eTb^ri- z-B3D^esud^7NP~7TNm{BlyLgx&7xu`sK=!yg>(0HmK>@%V-UVIuY1t)5&6{&^kLH=R~U;(QQ3Sf`2xP@g@``bFd)!$Y9iJHCW;U8G#qatP7 zqT}l9uz8m8S#}Ou=6Q6?M?}cp;I_hA-*U+q&0o4zcZ4caeOvK1_dbkhcfy8?<6oZu zQb#4Cxgv_m>}`mf7uN5e+Kg(=4-MpR8AY3*{6ldj20jnkCk$Oi=eSQKhOVCqaZ3Cu zIzW`*F3MJ#q7?#_WiZIAMR3u~xWH_Ugp~LKha>x@^jajefUt zUE|b%LHt4$Fx_u9N9Pat^!>4^5|abRPgDFLvrYCP4rb1`_acXZ&uuzjwz zL*4x3Z}Z;%a-#;iGW<%*l{x8V;pxxKP_ya}_~08Ha~&i4<9}+@zl;@)8JH#rNBLVb zb7=0=_q>P(uGmrbcv!u~Rle7b3rSvNMWkLSP<&MQ(W`8_SEp0%XVZ@IcJx=N;q`-A zUXwN3D(uLD-@YAkb8$`DKBvaK3RCr(EaAk$(x1-?Ntxa3FwAqW%*gT2N0Q=2^ca_$ ziX(v#bWMPfx+V7VoG10(&rCXAGJh%q7ZDrQ|x#g0l@xULd zXZKqfkCTVpgX)c8a5My*P>tpkl6NYTifw5~pQ0K4qPvxTKTAM#$(uV(%nOGa?HW1f z3yJ-@J>fAkX=Cl_1WeQyDiL_5ekmmaGoo>&40xo@11o3kFqcPjkc$7V*# z@qO+?{MaFS#Ur2vw#15z=fE^fB@50b{)}I$p3E34ebA)EdewB0J?8guq)%4ocVke8 zG=aHx`|ZQwh{G}HNc$U(3=g^9EZPW;oTSEn!`A-Bii>ggrJ~pJo`b>=irTaIX51qj z^QUL_4R%gQ!Iuo|h$U@c)6kD>XZvGzmHDFgQk?Yu>DU5n)KBb;98zMaunG+TZ_VC0vp^09U#Y#JBQd5O|=VF7bAIMp>=S*Wy``%o-_e z*=0D79~I|OU+>MYzC6^c?Q`s6Y(_uT$$yRMFMgTb z1?tt&h@aH{xkts9B!~XNwmc)t)4{*oe!NCx%C7mfGa-Cd96~EfFwzHQ;bz-`%ODKG zZ>10^Y)=}qr>(4(!l>yl>p^?!Hffj&im7o~=9MnC9^UR()RqRgLlWAqQm1rb_0JFf zOr#7rWkAQdbSh7C9R*3gKZ~DQ1V$)p-HFE%ci1h0mpyY7qLx}sV&F^xS4?tpCg)}q zPPB&JN3euvjx}BN*zB&(=xetgZt=$6olG9tyQu|BuoLeRig%}|(C^@)Bu23hMM%g) z3KjSQYJW$*8W!T~D|gH2d8tywnbbPTr`O8sBU6U8xjvy2I6;p+>jAFh$o%hd9U)8b> zSr;KYA_4l1d@;-bw|upyovqQmzlir^2jATfj6!^!O#BBRA3|);I?ToMvZb^gOnxIS zvOw5%R4J3S*K@JOF^2?FkadSFpIEMUR;Iu+xBFk+FVs0hN+n|r>N~FsnM;&-G&6t0 zn(?glSG-;JkZWy;Z$mr~?kyE3L!m-5n1SyLRPD8xU!u{-KL8=r(Ti`Ibk>lJroI>% zYwBHa!*H^u!z*lb4)0>zS=d1Y|2s)Id~9%TeJmo5*>8!fra%!c@=5Mddr6D&6Qw*E zOzYcFTlvHmb44|N&6$LR6P@OX!!cgX7m|!xZQ?w%fy`Td)Pi)!NFi53l(^PhD=`hS zbr3f_6;lXw=}#-@WGb_6-b=ROZ*Alfa$z!;E;JL+bY2d%{NQaf-_y{yaKc$DU!K$S zyZ@R5LvpAh*9C2K+ZH=V{BQ7=Pipl>!#PWtkAYqd9aW_H3k6^=f>n*;#dPZ$E;B_s zH%tD*NKU}Loo*hV_HFIjddOQ-8i9iBJZ9j7`LcuLl$Z7}S`xAf?#i>V#=%lf9bFe# zKQZF1{GcDOe#eucG)ktRNNHD{mS;EO4xjv@C7U_B(&-n9f#t<)A-h0Ry~0gmrUf5u=0zw5b4Y`3dy6R>v2mFK%xSsZADke{7WzHsQqq<0!t z)H@F_4Db*U51S(Dufiv9nIgY|EilZ9w3kY57dtQ-uPJpCN9WyI+$OT2R!K9zmJ>>n z7J>R(fY#u85eb%O@V|tP-XjR{~_g=#^4XTFDg_6)IZ7rrLcbh_^nK=x;e|da!42V%sf+} z*oo~+YR-fBDGSAWEGC- z1>7oUYuUxXfQ!bxQ!T$Gl)@a-mV#tqHt|o_UCeTA1jJl>TCPUdW3*2s8~DtGouz~o zPvlfeb@t}J|N23qUh|p3+Gj*kyhSs5p(EN(Nt?~J$6@tX<#;gS8_(0_>h`N@f~B0b ztja|iK^J<}+K+STi^o7Ce$haM~RiH;=* z7?}zg<^V4V4A&8?94>G9aQTki3-|vp*|fS+jMaUR!(91;B&o-c&DC!n2}#N5wF0Em zt0)iDNSg7c{vQ}Wd0Zsm@!f0GKhDJ8{g?nqRqf!&)(L2z8h}(BawZ+~TwGa&+**!~ z7%UXvT}m~53OH>oKc_M`RD@!k%oTNtecGsvE~>>FzcNeTEWR<-%-xtS=KFk?uGpSJ zp_(XFePtE^WN#w3txvPE0Q9qL80(+&V!0)QEI5 zMsHB};=lj5@^7%^uIp={%`U>0O8U!7ytkO literal 0 HcmV?d00001 diff --git a/src/aiunn/inference.py b/src/aiunn/inference.py index c86905b..09b1098 100644 --- a/src/aiunn/inference.py +++ b/src/aiunn/inference.py @@ -1,73 +1,136 @@ import torch +from albumentations import Compose, Normalize +from albumentations.pytorch import ToTensorV2 from PIL import Image -import torchvision.transforms as T -from torch.nn import functional as F -from aiia.model import AIIABase +import numpy as np +import io +from torch import nn +from aiia import AIIABase -class UpScaler: - def __init__(self, model_path="aiuNN-finetuned", device="cuda"): +class Upscaler(nn.Module): + """ + Transforms the base model's final feature map using a transposed convolution. + The base model produces a feature map of size 512x512. + This layer upsamples by a factor of 2 (yielding 1024x1024) and maps the hidden features + to the output channels using a single ConvTranspose2d layer. + """ + def __init__(self, base_model: AIIABase): + super(Upscaler, self).__init__() + self.base_model = base_model + # Instead of adding separate upsampling and convolutional layers, we use a ConvTranspose2d layer. + self.last_transform = nn.ConvTranspose2d( + in_channels=base_model.config.hidden_size, + out_channels=base_model.config.num_channels, + kernel_size=base_model.config.kernel_size, + stride=2, + padding=1, + output_padding=1 + ) + + def forward(self, x): + features = self.base_model(x) + return self.last_transform(features) + +class ImageUpscaler: + def __init__(self, model_path: str, device: str = 'cuda' if torch.cuda.is_available() else 'cpu'): + """ + Initialize the ImageUpscaler with the trained model. + + Args: + model_path (str): Path to the trained model directory. + device (str): Device to run inference on ('cuda' or 'cpu'). + """ self.device = torch.device(device) - self.model = AIIABase.load(model_path).to(self.device) - self.model.eval() + self.model = self.load_model(model_path) + self.model.eval() # Set the model to evaluation mode - # Preprocessing transforms - self.preprocess = T.Compose([ - T.Lambda(lambda img: self._pad_to_square(img)), - T.Resize(512), - T.ToTensor(), - T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) + # Define preprocessing transformations + self.preprocess = Compose([ + Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ToTensorV2() ]) - - def _pad_to_square(self, pil_img): - """Pad image to square while maintaining aspect ratio""" - w, h = pil_img.size - max_side = max(w, h) - hp = (max_side - w) // 2 - vp = (max_side - h) // 2 - padding = (hp, vp, max_side - w - hp, max_side - h - vp) - return T.functional.pad(pil_img, padding, 0, 'constant') - def _remove_padding(self, tensor, original_size): - """Remove padding added during preprocessing""" - _, _, h, w = tensor.shape - orig_w, orig_h = original_size + def load_model(self, model_path: str): + """ + Load the trained model from the specified path. - # Calculate scale factor - scale = 512 / max(orig_w, orig_h) - new_w = int(orig_w * scale) - new_h = int(orig_h * scale) + Args: + model_path (str): Path to the saved model. - # Calculate padding offsets - pad_w = (512 - new_w) // 2 - pad_h = (512 - new_h) // 2 + Returns: + nn.Module: Loaded PyTorch model. + """ + # Load the base model and wrap it with Upscaler + base_model = AIIABase.load(model_path) + model = Upscaler(base_model) - # Remove padding - unpad = tensor[:, :, pad_h:pad_h+new_h, pad_w:pad_w+new_w] + # Move the model to the appropriate device + return model.to(self.device) + + def preprocess_image(self, image: Image.Image): + """ + Preprocess the input image for inference. - # Resize to target 2x resolution - return F.interpolate(unpad, size=(orig_h*2, orig_w*2), mode='bilinear', align_corners=False) - - def upscale(self, input_image): - # Preprocess - original_size = input_image.size - input_tensor = self.preprocess(input_image).unsqueeze(0).to(self.device) + Args: + image (PIL.Image.Image): Input image in PIL format. - # Inference + Returns: + torch.Tensor: Preprocessed image tensor. + """ + # Convert PIL image to numpy array + image_array = np.array(image) + + # Apply preprocessing transformations + augmented = self.preprocess(image=image_array) + + # Add batch dimension and move to device + return augmented['image'].unsqueeze(0).to(self.device) + + def postprocess_image(self, output_tensor: torch.Tensor): + """ + Postprocess the output tensor to convert it back to an image. + + Args: + output_tensor (torch.Tensor): Model output tensor. + + Returns: + PIL.Image.Image: Upscaled image in PIL format. + """ + # Remove batch dimension and move to CPU + output_tensor = output_tensor.squeeze(0).cpu() + + # Denormalize and convert to numpy array + output_array = (output_tensor * 0.5 + 0.5).clamp(0, 1).numpy() + + # Convert from CHW (Channels-Height-Width) to HWC (Height-Width-Channels) format + output_array = (output_array.transpose(1, 2, 0) * 255).astype(np.uint8) + + # Convert numpy array back to PIL image + return Image.fromarray(output_array) + + def upscale_image(self, input_image_path: str): + """ + Perform upscaling on an input image. + + Args: + input_image_path (str): Path to the input low-resolution image. + + Returns: + PIL.Image.Image: Upscaled high-resolution image. + """ + # Load and preprocess the input image + input_image = Image.open(input_image_path).convert('RGB') + preprocessed_image = self.preprocess_image(input_image) + + # Perform inference with the model with torch.no_grad(): - features = self.model.cnn(input_tensor) - output = self.model.upsample(features) + with torch.cuda.amp.autocast(device_type="cuda"): + output_tensor = self.model(preprocessed_image) - # Postprocess - output = self._remove_padding(output, original_size) - - # Convert to PIL Image - output = output.squeeze(0).cpu().detach() - output = (output * 0.5 + 0.5).clamp(0, 1) - return T.functional.to_pil_image(output) + # Postprocess and return the upscaled image + return self.postprocess_image(output_tensor) -# Usage example -if __name__ == "__main__": - upscaler = UpScaler() - input_image = Image.open("input.jpg") - output_image = upscaler.upscale(input_image) - output_image.save("output_2x.jpg") +# Example usage: +# upscaler = ImageUpscaler(model_path="/path/to/best_model") +# upscaled_image = upscaler.upscale_image("/path/to/low_res_image.jpg") +# upscaled_image.save("/path/to/upscaled_image.jpg") From e28fd3d3304429759e49771d608b66eceef21ae6 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 17 Feb 2025 16:24:14 +0100 Subject: [PATCH 048/100] added paths --- src/aiunn/inference.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/inference.py b/src/aiunn/inference.py index 09b1098..b364191 100644 --- a/src/aiunn/inference.py +++ b/src/aiunn/inference.py @@ -131,6 +131,6 @@ class ImageUpscaler: return self.postprocess_image(output_tensor) # Example usage: -# upscaler = ImageUpscaler(model_path="/path/to/best_model") -# upscaled_image = upscaler.upscale_image("/path/to/low_res_image.jpg") -# upscaled_image.save("/path/to/upscaled_image.jpg") +upscaler = ImageUpscaler(model_path="/root/vision/aiuNN/best_model") +upscaled_image = upscaler.upscale_image("/root/vision/aiuNN/input.jpg") +upscaled_image.save("upscaled_image.jpg") From bba5e2179dc5161734f33bda6069f40b3ce048f7 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 17 Feb 2025 16:26:54 +0100 Subject: [PATCH 049/100] setted up correct amp --- src/aiunn/inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/inference.py b/src/aiunn/inference.py index b364191..182c8da 100644 --- a/src/aiunn/inference.py +++ b/src/aiunn/inference.py @@ -124,7 +124,7 @@ class ImageUpscaler: # Perform inference with the model with torch.no_grad(): - with torch.cuda.amp.autocast(device_type="cuda"): + with torch.amp.autocast(device_type="cuda"): output_tensor = self.model(preprocessed_image) # Postprocess and return the upscaled image From 40f1a650067b4aa093e28385acb9354c6a128a38 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 17 Feb 2025 16:31:12 +0100 Subject: [PATCH 050/100] fix --- src/aiunn/inference.py | 72 +++++++++--------------------------------- 1 file changed, 15 insertions(+), 57 deletions(-) diff --git a/src/aiunn/inference.py b/src/aiunn/inference.py index 182c8da..991b708 100644 --- a/src/aiunn/inference.py +++ b/src/aiunn/inference.py @@ -7,6 +7,7 @@ import io from torch import nn from aiia import AIIABase + class Upscaler(nn.Module): """ Transforms the base model's final feature map using a transposed convolution. @@ -30,19 +31,11 @@ class Upscaler(nn.Module): def forward(self, x): features = self.base_model(x) return self.last_transform(features) - class ImageUpscaler: def __init__(self, model_path: str, device: str = 'cuda' if torch.cuda.is_available() else 'cpu'): - """ - Initialize the ImageUpscaler with the trained model. - - Args: - model_path (str): Path to the trained model directory. - device (str): Device to run inference on ('cuda' or 'cpu'). - """ self.device = torch.device(device) self.model = self.load_model(model_path) - self.model.eval() # Set the model to evaluation mode + self.model.eval() # Set to evaluation mode # Define preprocessing transformations self.preprocess = Compose([ @@ -53,34 +46,20 @@ class ImageUpscaler: def load_model(self, model_path: str): """ Load the trained model from the specified path. - - Args: - model_path (str): Path to the saved model. - - Returns: - nn.Module: Loaded PyTorch model. """ - # Load the base model and wrap it with Upscaler - base_model = AIIABase.load(model_path) - model = Upscaler(base_model) - - # Move the model to the appropriate device + base_model = AIIABase.load(model_path) # Load base model + model = Upscaler(base_model) # Wrap with Upscaler return model.to(self.device) def preprocess_image(self, image: Image.Image): """ - Preprocess the input image for inference. - - Args: - image (PIL.Image.Image): Input image in PIL format. - - Returns: - torch.Tensor: Preprocessed image tensor. + Preprocess input image for inference. """ - # Convert PIL image to numpy array - image_array = np.array(image) + if not isinstance(image, Image.Image): + raise ValueError("Input must be a PIL.Image.Image object") - # Apply preprocessing transformations + # Convert to numpy array and apply preprocessing + image_array = np.array(image) augmented = self.preprocess(image=image_array) # Add batch dimension and move to device @@ -88,48 +67,27 @@ class ImageUpscaler: def postprocess_image(self, output_tensor: torch.Tensor): """ - Postprocess the output tensor to convert it back to an image. - - Args: - output_tensor (torch.Tensor): Model output tensor. - - Returns: - PIL.Image.Image: Upscaled image in PIL format. + Convert output tensor back to an image. """ - # Remove batch dimension and move to CPU - output_tensor = output_tensor.squeeze(0).cpu() - - # Denormalize and convert to numpy array - output_array = (output_tensor * 0.5 + 0.5).clamp(0, 1).numpy() - - # Convert from CHW (Channels-Height-Width) to HWC (Height-Width-Channels) format - output_array = (output_array.transpose(1, 2, 0) * 255).astype(np.uint8) - - # Convert numpy array back to PIL image + output_tensor = output_tensor.squeeze(0).cpu() # Remove batch dimension + output_array = (output_tensor * 0.5 + 0.5).clamp(0, 1).numpy() * 255 + output_array = output_array.transpose(1, 2, 0).astype(np.uint8) # CHW -> HWC return Image.fromarray(output_array) def upscale_image(self, input_image_path: str): """ Perform upscaling on an input image. - - Args: - input_image_path (str): Path to the input low-resolution image. - - Returns: - PIL.Image.Image: Upscaled high-resolution image. """ - # Load and preprocess the input image - input_image = Image.open(input_image_path).convert('RGB') + input_image = Image.open(input_image_path).convert('RGB') # Ensure RGB format preprocessed_image = self.preprocess_image(input_image) - # Perform inference with the model with torch.no_grad(): with torch.amp.autocast(device_type="cuda"): output_tensor = self.model(preprocessed_image) - # Postprocess and return the upscaled image return self.postprocess_image(output_tensor) + # Example usage: upscaler = ImageUpscaler(model_path="/root/vision/aiuNN/best_model") upscaled_image = upscaler.upscale_image("/root/vision/aiuNN/input.jpg") From 0c0372794e1e066dab0810ae29974f2f081ace48 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 20:54:30 +0100 Subject: [PATCH 051/100] new alternative version --- src/aiunn/finetune.py | 251 +++++++++++------------------------------- 1 file changed, 62 insertions(+), 189 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index b0dd634..23211da 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -1,200 +1,73 @@ -import torch import pandas as pd -from albumentations import ( - Compose, Resize, Normalize, RandomBrightnessContrast, - HorizontalFlip, VerticalFlip, Rotate, GaussianBlur -) -from albumentations.pytorch import ToTensorV2 -from PIL import Image, ImageFile import io -import base64 -import numpy as np -from torch import nn -from torch.utils.data import random_split, DataLoader -from aiia import AIIA, AIIAConfig, AIIABase, AIIABaseShared, AIIAmoe, AIIAchunked, AIIArecursive -from torch.amp import autocast, GradScaler -from tqdm import tqdm -from torch.utils.checkpoint import checkpoint +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms +from aiia import AIIA + +class UpscaleDataset(Dataset): + def __init__(self, parquet_file, transform=None): + self.df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) + self.transform = transform -class aiuNNDataset(torch.utils.data.Dataset): - def __init__(self, parquet_path): - self.df = pd.read_parquet(parquet_path, columns=['image_512', 'image_1024']).head(10000) - self.augmentation = Compose([ - RandomBrightnessContrast(p=0.5), - HorizontalFlip(p=0.5), - VerticalFlip(p=0.5), - Rotate(limit=45, p=0.5), - GaussianBlur(blur_limit=(3, 7), p=0.5), - Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ToTensorV2() - ]) - def __len__(self): return len(self.df) - - def load_image(self, image_data): - try: - if isinstance(image_data, str): - image_data = base64.b64decode(image_data) - if not isinstance(image_data, bytes): - raise ValueError("Invalid image data format") - image_stream = io.BytesIO(image_data) - ImageFile.LOAD_TRUNCATED_IMAGES = True - image = Image.open(image_stream).convert('RGB') - image_array = np.array(image) - return image_array - except Exception as e: - raise RuntimeError(f"Error loading image: {str(e)}") - finally: - if 'image_stream' in locals(): - image_stream.close() - + def __getitem__(self, idx): row = self.df.iloc[idx] - low_res_image = self.load_image(row['image_512']) - high_res_image = self.load_image(row['image_1024']) - augmented_low = self.augmentation(image=low_res_image) - augmented_high = self.augmentation(image=high_res_image) - return { - 'low_res': augmented_low['image'], - 'high_res': augmented_high['image'] - } + # Decode the byte strings into images + low_res_bytes = row['image_512'] + high_res_bytes = row['image_1024'] + low_res_image = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') + high_res_image = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') + if self.transform: + low_res_image = self.transform(low_res_image) + high_res_image = self.transform(high_res_image) + return low_res_image, high_res_image -class Upscaler(nn.Module): - """ - Transforms the base model's final feature map using a transposed convolution. - The base model produces a feature map of size 512x512. - This layer upsamples by a factor of 2 (yielding 1024x1024) and maps the hidden features - to the output channels using a single ConvTranspose2d layer. - """ - def __init__(self, base_model: AIIABase): - super(Upscaler, self).__init__() - self.base_model = base_model - # Instead of adding separate upsampling and convolutional layers, we use a ConvTranspose2d layer. - self.last_transform = nn.ConvTranspose2d( - in_channels=base_model.config.hidden_size, - out_channels=base_model.config.num_channels, - kernel_size=base_model.config.kernel_size, - stride=2, - padding=1, - output_padding=1 - ) - - def forward(self, x): - features = self.base_model(x) - return self.last_transform(features) +# Example transform: converting PIL images to tensors +transform = transforms.Compose([ + transforms.ToTensor(), +]) + + +import torch + +# Replace with your actual pretrained model path +pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" + +# Load the model using the AIIA.load class method (the implementation copied in your query) +model = AIIA.load(pretrained_model_path) +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +model = model.to(device) +from torch import nn, optim +from torch.utils.data import DataLoader + +# Create your dataset and dataloader +dataset = UpscaleDataset("/root/training_data/vision-dataset/image_upscaler.parquet", transform=transform) +data_loader = DataLoader(dataset, batch_size=16, shuffle=True) + +# Define a loss function and optimizer +criterion = nn.MSELoss() +optimizer = optim.Adam(model.parameters(), lr=1e-4) + +num_epochs = 10 +model.train() # Set model in training mode + +for epoch in range(num_epochs): + epoch_loss = 0.0 + for low_res, high_res in data_loader: + low_res = low_res.to(device) + high_res = high_res.to(device) -def finetune_model(model: nn.Module, datasets: list[str], batch_size=1, epochs=10, accumulation_steps=8, use_checkpoint=False): - # Load and concatenate datasets. - loaded_datasets = [aiuNNDataset(d) for d in datasets] - combined_dataset = torch.utils.data.ConcatDataset(loaded_datasets) - train_size = int(0.8 * len(combined_dataset)) - val_size = len(combined_dataset) - train_size - train_dataset, val_dataset = random_split(combined_dataset, [train_size, val_size]) - - train_loader = DataLoader( - train_dataset, - batch_size=batch_size, - shuffle=True, - num_workers=4, - pin_memory=True, - persistent_workers=True - ) - - val_loader = DataLoader( - val_dataset, - batch_size=batch_size, - shuffle=False, - num_workers=4, - pin_memory=True, - persistent_workers=True - ) - - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - if device.type == 'cuda': - current_device = torch.cuda.current_device() - torch.cuda.set_per_process_memory_fraction(0.95, device=current_device) - - model = model.to(device) - criterion = nn.MSELoss() - optimizer = torch.optim.Adam(model.parameters(), lr=model.base_model.config.learning_rate) - scaler = GradScaler() - best_val_loss = float('inf') - - for epoch in range(epochs): - model.train() - train_loss = 0.0 optimizer.zero_grad() - for i, batch in enumerate(tqdm(train_loader, desc=f"Epoch {epoch+1}/Training"), start=1): - if torch.cuda.is_available(): - torch.cuda.empty_cache() - low_res = batch['low_res'].to(device) - high_res = batch['high_res'].to(device) - with autocast(device_type="cuda"): - if use_checkpoint: - low_res = batch['low_res'].to(device).requires_grad_() - features = checkpoint(lambda x: model(x), low_res) - else: - features = model(low_res) - loss = criterion(features, high_res) / accumulation_steps - scaler.scale(loss).backward() - train_loss += loss.item() * accumulation_steps - if i % accumulation_steps == 0: - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad() - if (i % accumulation_steps) != 0: - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad() - - avg_train_loss = train_loss / len(train_loader) - print(f"Epoch {epoch+1}, Training Loss: {avg_train_loss:.4f}") - - model.eval() - val_loss = 0.0 - with torch.no_grad(): - for batch in tqdm(val_loader, desc="Validation"): - if torch.cuda.is_available(): - torch.cuda.empty_cache() - low_res = batch['low_res'].to(device) - high_res = batch['high_res'].to(device) - with autocast(device_type="cuda"): - outputs = model(low_res) - loss = criterion(outputs, high_res) - val_loss += loss.item() - avg_val_loss = val_loss / len(val_loader) - print(f"Epoch {epoch+1}, Validation Loss: {avg_val_loss:.4f}") - if avg_val_loss < best_val_loss: - best_val_loss = avg_val_loss - model.base_model.save("best_model") - return model - -def main(): - BATCH_SIZE = 1 - ACCUMULATION_STEPS = 8 - USE_CHECKPOINT = False - - # Load the base model using the provided configuration (e.g., hidden_size=512, num_channels=3, etc.) - base_model = AIIABase.load("/root/vision/AIIA/AIIA-base-512") + outputs = model(low_res) + loss = criterion(outputs, high_res) + loss.backward() + optimizer.step() + epoch_loss += loss.item() + print(f"Epoch {epoch + 1}, Loss: {epoch_loss / len(data_loader)}") - # Wrap the base model with our modified Upscaler that transforms its last layer. - model = Upscaler(base_model) - - print("Modified model architecture with transformed final layer:") - print(base_model.config) - - finetune_model( - model=model, - datasets=[ - "/root/training_data/vision-dataset/image_upscaler.parquet", - "/root/training_data/vision-dataset/image_vec_upscaler.parquet" - ], - batch_size=BATCH_SIZE, - epochs=10, - accumulation_steps=ACCUMULATION_STEPS, - use_checkpoint=USE_CHECKPOINT - ) - -if __name__ == '__main__': - main() +# Optionally, save the finetuned model to a new directory +finetuned_model_path = "aiuNN" +model.save(finetuned_model_path) From 36f9b3066683088f8c3976a76ef2a6e78987bfde Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 20:55:46 +0100 Subject: [PATCH 052/100] new saving --- src/aiunn/finetune.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 23211da..b4584d8 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -4,6 +4,8 @@ from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from aiia import AIIA +import csv + class UpscaleDataset(Dataset): def __init__(self, parquet_file, transform=None): @@ -54,6 +56,16 @@ optimizer = optim.Adam(model.parameters(), lr=1e-4) num_epochs = 10 model.train() # Set model in training mode + +csv_file = 'losses.csv' + +# Create or open the CSV file and write the header if it doesn't exist +with open(csv_file, mode='a', newline='') as file: + writer = csv.writer(file) + # Write the header only if the file is empty + if file.tell() == 0: + writer.writerow(['Epoch', 'Train Loss']) + for epoch in range(num_epochs): epoch_loss = 0.0 for low_res, high_res in data_loader: @@ -66,7 +78,14 @@ for epoch in range(num_epochs): loss.backward() optimizer.step() epoch_loss += loss.item() - print(f"Epoch {epoch + 1}, Loss: {epoch_loss / len(data_loader)}") + + avg_epoch_loss = epoch_loss / len(data_loader) + print(f"Epoch {epoch + 1}, Loss: {avg_epoch_loss}") + + # Append the training loss to the CSV file + with open(csv_file, mode='a', newline='') as file: + writer = csv.writer(file) + writer.writerow([epoch + 1, avg_epoch_loss]) # Optionally, save the finetuned model to a new directory finetuned_model_path = "aiuNN" From eacef6af650e05f0785ca394800e5b3ffed77643 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 20:57:11 +0100 Subject: [PATCH 053/100] added tqdm --- src/aiunn/finetune.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index b4584d8..f53ee73 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -5,7 +5,7 @@ from torch.utils.data import Dataset from torchvision import transforms from aiia import AIIA import csv - +from tqdm import tqdm class UpscaleDataset(Dataset): def __init__(self, parquet_file, transform=None): @@ -68,7 +68,10 @@ with open(csv_file, mode='a', newline='') as file: for epoch in range(num_epochs): epoch_loss = 0.0 - for low_res, high_res in data_loader: + # Wrap the data_loader with tqdm for progress tracking + data_loader_with_progress = tqdm(data_loader, desc=f"Epoch {epoch + 1}") + print(f"Epoche: {epoch}") + for low_res, high_res in data_loader_with_progress: low_res = low_res.to(device) high_res = high_res.to(device) From 6a9b4afd91f34328a606bba1d976d04417faf05b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 20:58:35 +0100 Subject: [PATCH 054/100] use correct model loading --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index f53ee73..80dad0b 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -3,7 +3,7 @@ import io from PIL import Image from torch.utils.data import Dataset from torchvision import transforms -from aiia import AIIA +from aiia import AIIABase import csv from tqdm import tqdm @@ -39,7 +39,7 @@ import torch pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" # Load the model using the AIIA.load class method (the implementation copied in your query) -model = AIIA.load(pretrained_model_path) +model = AIIABase.load(pretrained_model_path) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) from torch import nn, optim From 2f12fcb863877c5e866abe17546d82b65cbd48a9 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:03:37 +0100 Subject: [PATCH 055/100] updated loading --- src/aiunn/finetune.py | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 80dad0b..3e0441f 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -8,24 +8,39 @@ import csv from tqdm import tqdm class UpscaleDataset(Dataset): - def __init__(self, parquet_file, transform=None): - self.df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) + def __init__(self, parquet_files: list, transform=None): + # Initialize an empty DataFrame to hold the combined data + combined_df = pd.DataFrame() + + # Iterate through each Parquet file in the list and load it into a DataFrame + for parquet_file in parquet_files: + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) + combined_df = pd.concat([combined_df, df], ignore_index=True) + self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): - row = self.df.iloc[idx] - # Decode the byte strings into images - low_res_bytes = row['image_512'] - high_res_bytes = row['image_1024'] - low_res_image = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') - high_res_image = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') - if self.transform: - low_res_image = self.transform(low_res_image) - high_res_image = self.transform(high_res_image) - return low_res_image, high_res_image + try: + row = self.df.iloc[idx] + # Convert string to bytes if necessary + low_res_bytes = row['image_512'].encode('latin-1') if isinstance(row['image_512'], str) else row['image_512'] + high_res_bytes = row['image_1024'].encode('latin-1') if isinstance(row['image_1024'], str) else row['image_1024'] + + # Decode the bytes into images + low_res_image = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') + high_res_image = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') + + if self.transform: + low_res_image = self.transform(low_res_image) + high_res_image = self.transform(high_res_image) + return low_res_image, high_res_image + except Exception as e: + print(f"Error processing index {idx}: {str(e)}") + # You might want to either skip this sample or return a default value + raise e # Example transform: converting PIL images to tensors transform = transforms.Compose([ @@ -46,7 +61,7 @@ from torch import nn, optim from torch.utils.data import DataLoader # Create your dataset and dataloader -dataset = UpscaleDataset("/root/training_data/vision-dataset/image_upscaler.parquet", transform=transform) +dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) data_loader = DataLoader(dataset, batch_size=16, shuffle=True) # Define a loss function and optimizer From 2583d2f01f32aaac53440b05ad11eb0a111ad002 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:05:48 +0100 Subject: [PATCH 056/100] bugfix --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 3e0441f..e3bdf46 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -16,7 +16,7 @@ class UpscaleDataset(Dataset): for parquet_file in parquet_files: df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) combined_df = pd.concat([combined_df, df], ignore_index=True) - + self.df = combined_df self.transform = transform def __len__(self): From fca74fb8d2cd6ebd76a4b4769091fd0908ea2bb3 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:12:14 +0100 Subject: [PATCH 057/100] new loading --- src/aiunn/finetune.py | 68 +++++++++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 18 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index e3bdf46..b47478c 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -1,46 +1,78 @@ import pandas as pd import io -from PIL import Image +from PIL import Image, ImageFile from torch.utils.data import Dataset from torchvision import transforms from aiia import AIIABase import csv from tqdm import tqdm +import base64 class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): - # Initialize an empty DataFrame to hold the combined data combined_df = pd.DataFrame() - - # Iterate through each Parquet file in the list and load it into a DataFrame for parquet_file in parquet_files: + # Load data with chunking for memory efficiency df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) combined_df = pd.concat([combined_df, df], ignore_index=True) - self.df = combined_df + + # Validate data format + self.df = combined_df.apply(self._validate_row, axis=1) self.transform = transform + self.failed_indices = set() + + def _validate_row(self, row): + """Ensure both images exist and have correct dimensions""" + for col in ['image_512', 'image_1024']: + if not isinstance(row[col], (bytes, str)): + raise ValueError(f"Invalid data type in column {col}: {type(row[col])}") + return row + + def _decode_image(self, data): + """Universal decoder handling both base64 strings and bytes""" + try: + if isinstance(data, str): + # Handle base64 encoded strings + return base64.b64decode(data) + elif isinstance(data, bytes): + return data + raise ValueError(f"Unsupported data type: {type(data)}") + except Exception as e: + raise RuntimeError(f"Decoding failed: {str(e)}") def __len__(self): return len(self.df) def __getitem__(self, idx): + if idx in self.failed_indices: + return self[(idx + 1) % len(self)] # Skip failed indices + try: row = self.df.iloc[idx] - # Convert string to bytes if necessary - low_res_bytes = row['image_512'].encode('latin-1') if isinstance(row['image_512'], str) else row['image_512'] - high_res_bytes = row['image_1024'].encode('latin-1') if isinstance(row['image_1024'], str) else row['image_1024'] - - # Decode the bytes into images - low_res_image = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') - high_res_image = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') + # Decode both images + low_res_bytes = self._decode_image(row['image_512']) + high_res_bytes = self._decode_image(row['image_1024']) + + # Load images with truncation handling + ImageFile.LOAD_TRUNCATED_IMAGES = True + low_res = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') + high_res = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') + + # Validate image sizes + if low_res.size != (512, 512) or high_res.size != (1024, 1024): + raise ValueError(f"Size mismatch: LowRes={low_res.size}, HighRes={high_res.size}") + if self.transform: - low_res_image = self.transform(low_res_image) - high_res_image = self.transform(high_res_image) - return low_res_image, high_res_image + low_res = self.transform(low_res) + high_res = self.transform(high_res) + + return low_res, high_res + except Exception as e: - print(f"Error processing index {idx}: {str(e)}") - # You might want to either skip this sample or return a default value - raise e + print(f"\nError at index {idx}: {str(e)}") + self.failed_indices.add(idx) + return self[(idx + 1) % len(self)] # Return next valid sample # Example transform: converting PIL images to tensors transform = transforms.Compose([ From d66b167b857542130b8bc46f660f42f9db3b1c58 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:17:40 +0100 Subject: [PATCH 058/100] downsized abtchsize to 8 --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index b47478c..b1ee2e5 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -94,7 +94,7 @@ from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=16, shuffle=True) +data_loader = DataLoader(dataset, batch_size=8, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss() From 6a31e8fa7f3e5c8357cd241326f12f5526515c1d Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:43:01 +0100 Subject: [PATCH 059/100] downsized to 2 --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index b1ee2e5..e29d36d 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -94,7 +94,7 @@ from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=8, shuffle=True) +data_loader = DataLoader(dataset, batch_size=2, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss() From b06dad23affeaa2495f694c99d7b5849b3292d8c Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:47:06 +0100 Subject: [PATCH 060/100] downsized samples to max of 10k --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index e29d36d..10cedba 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -13,7 +13,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(5000) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format From 0166d8feeb92292744e089321314b9e84b6fa74b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:49:16 +0100 Subject: [PATCH 061/100] single batch --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 10cedba..46faf1a 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -94,7 +94,7 @@ from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=2, shuffle=True) +data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss() From 992bc0eb7fa33c7a696d90157a3ed4336738eadc Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:51:48 +0100 Subject: [PATCH 062/100] downsized two 8k samples --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 46faf1a..6300266 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -13,7 +13,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(5000) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(4000) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format From 4af12873f27acb2c9b57569e5a9acbcacbe4fe62 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:54:33 +0100 Subject: [PATCH 063/100] cpu training --- src/aiunn/finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6300266..d675057 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -13,7 +13,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(4000) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format @@ -87,14 +87,14 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" # Load the model using the AIIA.load class method (the implementation copied in your query) model = AIIABase.load(pretrained_model_path) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = 'cpu' #torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) from torch import nn, optim from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=1, shuffle=True) +data_loader = DataLoader(dataset, batch_size=4, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss() From 742a3e6f7040b32bddc7a7266919e953db180789 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 21:59:16 +0100 Subject: [PATCH 064/100] fiexd script --- src/aiunn/finetune.py | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index d675057..6ea2bdb 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -7,13 +7,15 @@ from aiia import AIIABase import csv from tqdm import tqdm import base64 +from torch.amp import autocast, GradScaler + class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(10000) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(5000) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format @@ -80,8 +82,6 @@ transform = transforms.Compose([ ]) -import torch - # Replace with your actual pretrained model path pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" @@ -94,7 +94,7 @@ from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=4, shuffle=True) +data_loader = DataLoader(dataset, batch_size=2, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss() @@ -113,29 +113,34 @@ with open(csv_file, mode='a', newline='') as file: if file.tell() == 0: writer.writerow(['Epoch', 'Train Loss']) +# Create a gradient scaler (for scaling gradients when using AMP) +scaler = GradScaler() + for epoch in range(num_epochs): epoch_loss = 0.0 - # Wrap the data_loader with tqdm for progress tracking data_loader_with_progress = tqdm(data_loader, desc=f"Epoch {epoch + 1}") - print(f"Epoche: {epoch}") for low_res, high_res in data_loader_with_progress: - low_res = low_res.to(device) - high_res = high_res.to(device) + low_res = low_res.to(device, non_blocking=True) + high_res = high_res.to(device, non_blocking=True) optimizer.zero_grad() - outputs = model(low_res) - loss = criterion(outputs, high_res) - loss.backward() - optimizer.step() - epoch_loss += loss.item() + + # Use automatic mixed precision context + with autocast(): + outputs = model(low_res) + loss = criterion(outputs, high_res) + + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() - avg_epoch_loss = epoch_loss / len(data_loader) - print(f"Epoch {epoch + 1}, Loss: {avg_epoch_loss}") + epoch_loss += loss.item() + print(f"Epoch {epoch + 1}, Loss: {epoch_loss}") # Append the training loss to the CSV file with open(csv_file, mode='a', newline='') as file: writer = csv.writer(file) - writer.writerow([epoch + 1, avg_epoch_loss]) + writer.writerow([epoch + 1, epoch_loss]) # Optionally, save the finetuned model to a new directory finetuned_model_path = "aiuNN" From 81ceddff3b4470284d62981fb98a1055f3a3a6d1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Fri, 21 Feb 2025 22:01:54 +0100 Subject: [PATCH 065/100] addded device gpu and fixed autocast --- src/aiunn/finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6ea2bdb..1f18e4d 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -8,7 +8,7 @@ import csv from tqdm import tqdm import base64 from torch.amp import autocast, GradScaler - +import torch class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): @@ -87,7 +87,7 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" # Load the model using the AIIA.load class method (the implementation copied in your query) model = AIIABase.load(pretrained_model_path) -device = 'cpu' #torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) from torch import nn, optim from torch.utils.data import DataLoader @@ -126,7 +126,7 @@ for epoch in range(num_epochs): optimizer.zero_grad() # Use automatic mixed precision context - with autocast(): + with autocast(device_type=device): outputs = model(low_res) loss = criterion(outputs, high_res) From f576a0d789cf49a15eb2d15647fd7381729a3b32 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 22 Feb 2025 10:43:10 +0100 Subject: [PATCH 066/100] fixed autocast --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 1f18e4d..13cef3e 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(5000) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format @@ -126,7 +126,7 @@ for epoch in range(num_epochs): optimizer.zero_grad() # Use automatic mixed precision context - with autocast(device_type=device): + with autocast(device_type="cuda"): outputs = model(low_res) loss = criterion(outputs, high_res) From 90bcdd346a2f300655cf81fa0e2a7d8285608e6b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 22 Feb 2025 10:45:30 +0100 Subject: [PATCH 067/100] even more downsized --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 13cef3e..8ef0fa1 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -15,7 +15,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with chunking for memory efficiency - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate data format @@ -94,7 +94,7 @@ from torch.utils.data import DataLoader # Create your dataset and dataloader dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) -data_loader = DataLoader(dataset, batch_size=2, shuffle=True) +data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Define a loss function and optimizer criterion = nn.MSELoss() From 933238a5302ccd8647aea2631f30513a2a9ba66c Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 22 Feb 2025 17:24:02 +0100 Subject: [PATCH 068/100] added extra psampling layer [not working] --- src/aiunn/Upsampler.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 src/aiunn/Upsampler.py diff --git a/src/aiunn/Upsampler.py b/src/aiunn/Upsampler.py new file mode 100644 index 0000000..cf95c8e --- /dev/null +++ b/src/aiunn/Upsampler.py @@ -0,0 +1,40 @@ +import torch.nn as nn +from aiia import AIIA + +class Upsampler(AIIA): + def __init__(self, base_model: AIIA): + super().__init__(base_model.config) + self.base_model = base_model + + # Upsample to double the spatial dimensions using bilinear interpolation + self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) + + # Update the base model's configuration to include the upsample layer details + print(self.base_model.config) + if hasattr(self.base_model, 'config'): + # Check if layers attribute exists, if not create it + if not hasattr(self.base_model.config, 'layers'): + setattr(self.base_model.config, 'layers', []) + + # Add the upsample layer configuration + current_layers = getattr(self.base_model.config, 'layers', []) + current_layers.append({ + 'name': 'Upsample', + 'type': 'nn.Upsample', + 'scale_factor': 2, + 'mode': 'bilinear', + 'align_corners': False + }) + setattr(self.base_model.config, 'layers', current_layers) + self.config = self.base_model.config + else: + self.config = {} + + def forward(self, x): + x = self.base_model(x) + x = self.upsample(x) + return x + +if __name__ == "__main__": + upsampler = Upsampler.load("test2") + print("Updated configuration:", upsampler.config.__dict__) From 2aba93caea15eaf006f5db5cace652518273181c Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 22 Feb 2025 17:52:37 +0100 Subject: [PATCH 069/100] updated upsampler --- src/aiunn/Upsampler.py | 79 +++++++++++++++++++++++++++++------------- 1 file changed, 54 insertions(+), 25 deletions(-) diff --git a/src/aiunn/Upsampler.py b/src/aiunn/Upsampler.py index cf95c8e..af13c82 100644 --- a/src/aiunn/Upsampler.py +++ b/src/aiunn/Upsampler.py @@ -1,40 +1,69 @@ +import torch import torch.nn as nn -from aiia import AIIA +from aiia import AIIA, AIIAConfig, AIIABase class Upsampler(AIIA): - def __init__(self, base_model: AIIA): - super().__init__(base_model.config) + def init(self, base_model: AIIA): + # base_model must be a fully instantiated model (with a .config attribute) + super().init(base_model.config) self.base_model = base_model # Upsample to double the spatial dimensions using bilinear interpolation self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) # Update the base model's configuration to include the upsample layer details - print(self.base_model.config) - if hasattr(self.base_model, 'config'): - # Check if layers attribute exists, if not create it - if not hasattr(self.base_model.config, 'layers'): - setattr(self.base_model.config, 'layers', []) - - # Add the upsample layer configuration - current_layers = getattr(self.base_model.config, 'layers', []) - current_layers.append({ - 'name': 'Upsample', - 'type': 'nn.Upsample', - 'scale_factor': 2, - 'mode': 'bilinear', - 'align_corners': False - }) - setattr(self.base_model.config, 'layers', current_layers) - self.config = self.base_model.config - else: - self.config = {} + if not hasattr(self.base_model.config, 'layers'): + self.base_model.config.layers = [] + + self.base_model.config.layers.append({ + 'name': 'Upsample', + 'type': 'nn.Upsample', + 'scale_factor': 2, + 'mode': 'bilinear', + 'align_corners': False + }) + self.config = self.base_model.config def forward(self, x): x = self.base_model(x) x = self.upsample(x) return x -if __name__ == "__main__": - upsampler = Upsampler.load("test2") - print("Updated configuration:", upsampler.config.__dict__) + @classmethod + def load(cls, path: str): + """ + Override the default load method: + - First, load the base model (which includes its configuration and state_dict) + - Then instantiate the Upsampler with that base model + - Finally, load the Upsampler-specific state dictionary + """ + # Load the full base model from the given path. + # (Assuming AIIABase.load is implemented to load the base model correctly.) + base_model = AIIABase.load(path) + + # Create a new instance of Upsampler using the loaded base model. + instance = cls(base_model) + + # Choose your device mapping (cuda if available, otherwise cpu) + device = 'cuda' if torch.cuda.is_available() else 'cpu' + + # Load the saved state dictionary that contains weights for both the base model and upsample layer. + state_dict = torch.load(f"{path}/model.pth", map_location=device) + instance.load_state_dict(state_dict) + + return instance + +if __name__ == "main": + from aiia import AIIABase, AIIAConfig + # Create a configuration and build a base model. + config = AIIAConfig() + base_model = AIIABase("test2") + # Instantiate Upsampler from the base model (works correctly). + upsampler = Upsampler(base_model) + + # Save the model (both configuration and weights). + upsampler.save("test2") + + # Now load using the overridden load method; this will load the complete model. + upsampler_loaded = Upsampler.load("test2") + print("Updated configuration:", upsampler_loaded.config.__dict__) From 736886021c7858ed5d1cd7d013321a2640eba03b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sat, 22 Feb 2025 17:53:06 +0100 Subject: [PATCH 070/100] updated finetuning script to work with Upsamler and added Early Stopping --- src/aiunn/finetune.py | 121 +++++++++++++++++++++++++----------------- 1 file changed, 71 insertions(+), 50 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 8ef0fa1..d785030 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -1,40 +1,63 @@ +import torch +import torch.nn as nn +import torch.optim as optim import pandas as pd import io -from PIL import Image, ImageFile -from torch.utils.data import Dataset -from torchvision import transforms -from aiia import AIIABase import csv -from tqdm import tqdm import base64 +from PIL import Image, ImageFile from torch.amp import autocast, GradScaler -import torch +from torch.utils.data import Dataset, DataLoader +from torchvision import transforms +from tqdm import tqdm +from aiia import AIIABase +from upsampler import Upsampler + +# Define a simple EarlyStopping class to monitor the epoch loss. +class EarlyStopping: + def __init__(self, patience=3, min_delta=0.001): + self.patience = patience # Number of epochs with no significant improvement before stopping. + self.min_delta = min_delta # Minimum change in loss required to count as an improvement. + self.best_loss = float('inf') + self.counter = 0 + self.early_stop = False + + def __call__(self, epoch_loss): + # If current loss is lower than the best loss minus min_delta, update best loss and reset counter. + if epoch_loss < self.best_loss - self.min_delta: + self.best_loss = epoch_loss + self.counter = 0 + else: + # No significant improvement: increment counter. + self.counter += 1 + if self.counter >= self.patience: + self.early_stop = True + return self.early_stop + +# UpscaleDataset to load and preprocess your data. class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): combined_df = pd.DataFrame() for parquet_file in parquet_files: - # Load data with chunking for memory efficiency + # Load data with head() to limit rows for memory efficiency. df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250) combined_df = pd.concat([combined_df, df], ignore_index=True) - # Validate data format + # Validate that each row has proper image formats. self.df = combined_df.apply(self._validate_row, axis=1) self.transform = transform self.failed_indices = set() def _validate_row(self, row): - """Ensure both images exist and have correct dimensions""" for col in ['image_512', 'image_1024']: if not isinstance(row[col], (bytes, str)): raise ValueError(f"Invalid data type in column {col}: {type(row[col])}") return row def _decode_image(self, data): - """Universal decoder handling both base64 strings and bytes""" try: if isinstance(data, str): - # Handle base64 encoded strings return base64.b64decode(data) elif isinstance(data, bytes): return data @@ -46,102 +69,100 @@ class UpscaleDataset(Dataset): return len(self.df) def __getitem__(self, idx): + # Skip indices that have previously failed. if idx in self.failed_indices: - return self[(idx + 1) % len(self)] # Skip failed indices - + return self[(idx + 1) % len(self)] try: row = self.df.iloc[idx] - - # Decode both images low_res_bytes = self._decode_image(row['image_512']) high_res_bytes = self._decode_image(row['image_1024']) - - # Load images with truncation handling ImageFile.LOAD_TRUNCATED_IMAGES = True low_res = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') high_res = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') - - # Validate image sizes + # Validate expected sizes if low_res.size != (512, 512) or high_res.size != (1024, 1024): raise ValueError(f"Size mismatch: LowRes={low_res.size}, HighRes={high_res.size}") - if self.transform: low_res = self.transform(low_res) high_res = self.transform(high_res) - return low_res, high_res - except Exception as e: print(f"\nError at index {idx}: {str(e)}") self.failed_indices.add(idx) - return self[(idx + 1) % len(self)] # Return next valid sample + return self[(idx + 1) % len(self)] -# Example transform: converting PIL images to tensors +# Define any transformations you require (e.g., converting PIL images to tensors) transform = transforms.Compose([ transforms.ToTensor(), ]) - -# Replace with your actual pretrained model path -pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" - -# Load the model using the AIIA.load class method (the implementation copied in your query) -model = AIIABase.load(pretrained_model_path) +# Load the base AIIABase model and wrap it with the Upsampler. +pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" +base_model = AIIABase.load(pretrained_model_path) +model = Upsampler(base_model) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = model.to(device) -from torch import nn, optim -from torch.utils.data import DataLoader -# Create your dataset and dataloader -dataset = UpscaleDataset(["/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet"], transform=transform) +# Create the dataset and dataloader. +dataset = UpscaleDataset([ + "/root/training_data/vision-dataset/image_upscaler.parquet", + "/root/training_data/vision-dataset/image_vec_upscaler.parquet" +], transform=transform) data_loader = DataLoader(dataset, batch_size=1, shuffle=True) -# Define a loss function and optimizer +# Define loss function and optimizer. criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=1e-4) num_epochs = 10 -model.train() # Set model in training mode - +model.train() +# Prepare a CSV file for logging training loss. csv_file = 'losses.csv' - -# Create or open the CSV file and write the header if it doesn't exist with open(csv_file, mode='a', newline='') as file: writer = csv.writer(file) - # Write the header only if the file is empty if file.tell() == 0: writer.writerow(['Epoch', 'Train Loss']) -# Create a gradient scaler (for scaling gradients when using AMP) +# Initialize automatic mixed precision scaler and EarlyStopping. scaler = GradScaler() +early_stopping = EarlyStopping(patience=3, min_delta=0.001) +# Training loop with early stopping. for epoch in range(num_epochs): epoch_loss = 0.0 - data_loader_with_progress = tqdm(data_loader, desc=f"Epoch {epoch + 1}") - for low_res, high_res in data_loader_with_progress: + progress_bar = tqdm(data_loader, desc=f"Epoch {epoch + 1}") + print(f"Epoch: {epoch}") + for low_res, high_res in progress_bar: low_res = low_res.to(device, non_blocking=True) high_res = high_res.to(device, non_blocking=True) - + optimizer.zero_grad() - # Use automatic mixed precision context - with autocast(device_type="cuda"): + # Use automatic mixed precision to speed up training on supported hardware. + with autocast(device_type=device.type): outputs = model(low_res) loss = criterion(outputs, high_res) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() - + epoch_loss += loss.item() + progress_bar.set_postfix({'loss': loss.item()}) + print(f"Epoch {epoch + 1}, Loss: {epoch_loss}") - - # Append the training loss to the CSV file + + # Record the loss in the CSV log. with open(csv_file, mode='a', newline='') as file: writer = csv.writer(file) writer.writerow([epoch + 1, epoch_loss]) -# Optionally, save the finetuned model to a new directory + # Check early stopping criteria. + if early_stopping(epoch_loss): + print(f"Early stopping triggered at epoch {epoch + 1} with loss {epoch_loss}") + break + +# Optionally, save the finetuned model using your library's save method. finetuned_model_path = "aiuNN" model.save(finetuned_model_path) From 2883e67e1ca209585db3478351d2ebbea79f5273 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 19:26:36 +0100 Subject: [PATCH 071/100] temporary commit for lowercase fix --- src/aiunn/finetune.py | 2 +- src/aiunn/{Upsampler.py => upsample.py} | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) rename src/aiunn/{Upsampler.py => upsample.py} (99%) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index d785030..9729208 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -12,7 +12,7 @@ from torchvision import transforms from tqdm import tqdm from aiia import AIIABase -from upsampler import Upsampler +from aiunn.upsample import Upsampler # Define a simple EarlyStopping class to monitor the epoch loss. class EarlyStopping: diff --git a/src/aiunn/Upsampler.py b/src/aiunn/upsample.py similarity index 99% rename from src/aiunn/Upsampler.py rename to src/aiunn/upsample.py index af13c82..d2473c3 100644 --- a/src/aiunn/Upsampler.py +++ b/src/aiunn/upsample.py @@ -2,6 +2,7 @@ import torch import torch.nn as nn from aiia import AIIA, AIIAConfig, AIIABase + class Upsampler(AIIA): def init(self, base_model: AIIA): # base_model must be a fully instantiated model (with a .config attribute) From e52a2a4b81d214df86495da54b0258460129818d Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 19:26:55 +0100 Subject: [PATCH 072/100] fixed spelling --- src/aiunn/finetune.py | 2 +- src/aiunn/{upsample.py => upsampler.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/aiunn/{upsample.py => upsampler.py} (100%) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 9729208..3f7b941 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -12,7 +12,7 @@ from torchvision import transforms from tqdm import tqdm from aiia import AIIABase -from aiunn.upsample import Upsampler +from aiunn.upsampler import Upsampler # Define a simple EarlyStopping class to monitor the epoch loss. class EarlyStopping: diff --git a/src/aiunn/upsample.py b/src/aiunn/upsampler.py similarity index 100% rename from src/aiunn/upsample.py rename to src/aiunn/upsampler.py From b74081975756bc9294abf45923986a13c1524f5b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 19:27:34 +0100 Subject: [PATCH 073/100] fixed paths --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 3f7b941..d785030 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -12,7 +12,7 @@ from torchvision import transforms from tqdm import tqdm from aiia import AIIABase -from aiunn.upsampler import Upsampler +from upsampler import Upsampler # Define a simple EarlyStopping class to monitor the epoch loss. class EarlyStopping: From 048a8d98616fafa55263e7cf0c5c3889f06a1ae6 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 19:56:17 +0100 Subject: [PATCH 074/100] added extra config --- src/aiunn/config.py | 36 +++++++++++++++++++++++++++++++++ src/aiunn/upsampler.py | 45 ++++++++++++++---------------------------- 2 files changed, 51 insertions(+), 30 deletions(-) create mode 100644 src/aiunn/config.py diff --git a/src/aiunn/config.py b/src/aiunn/config.py new file mode 100644 index 0000000..1a7d3a5 --- /dev/null +++ b/src/aiunn/config.py @@ -0,0 +1,36 @@ +from aiia import AIIAConfig + + +class UpsamplerConfig(AIIAConfig): + def __init__( + self, + upsample_scale: int = 2, + upsample_mode: str = 'bilinear', + upsample_align_corners: bool = False, + layers=None, + **kwargs + ): + # Initialize base configuration. + super().__init__(**kwargs) + self.layers = layers if layers is not None else [] + + # Upsampler-specific parameters. + self.upsample_scale = upsample_scale + self.upsample_mode = upsample_mode + self.upsample_align_corners = upsample_align_corners + + # Automatically add the upsample layer details. + self.add_upsample_layer() + + def add_upsample_layer(self): + upsample_layer = { + 'name': 'Upsample', + 'type': 'nn.Upsample', + 'scale_factor': self.upsample_scale, + 'mode': self.upsample_mode, + 'align_corners': self.upsample_align_corners + } + # Add the upsample layer only if not already present. + if not any(layer.get('name') == 'Upsample' for layer in self.layers): + self.layers.append(upsample_layer) + diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index d2473c3..fa3b595 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -3,27 +3,19 @@ import torch.nn as nn from aiia import AIIA, AIIAConfig, AIIABase +# Upsampler model that uses the configuration from the base model. class Upsampler(AIIA): - def init(self, base_model: AIIA): - # base_model must be a fully instantiated model (with a .config attribute) - super().init(base_model.config) + def __init__(self, base_model: AIIABase): + # Assume that base_model.config is an instance of UpsamplerConfig. + super().__init__(base_model.config) self.base_model = base_model - - # Upsample to double the spatial dimensions using bilinear interpolation - self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) - # Update the base model's configuration to include the upsample layer details - if not hasattr(self.base_model.config, 'layers'): - self.base_model.config.layers = [] - - self.base_model.config.layers.append({ - 'name': 'Upsample', - 'type': 'nn.Upsample', - 'scale_factor': 2, - 'mode': 'bilinear', - 'align_corners': False - }) - self.config = self.base_model.config + # Create the upsample layer using values from the configuration. + self.upsample = nn.Upsample( + scale_factor=self.config.upsample_scale, + mode=self.config.upsample_mode, + align_corners=self.config.upsample_align_corners + ) def forward(self, x): x = self.base_model(x) @@ -33,27 +25,20 @@ class Upsampler(AIIA): @classmethod def load(cls, path: str): """ - Override the default load method: - - First, load the base model (which includes its configuration and state_dict) - - Then instantiate the Upsampler with that base model - - Finally, load the Upsampler-specific state dictionary + Load the model: + - First, load the base model (including its configuration and state_dict). + - Then, wrap it with the Upsampler class. + - Finally, load the combined state dictionary. """ - # Load the full base model from the given path. - # (Assuming AIIABase.load is implemented to load the base model correctly.) base_model = AIIABase.load(path) - - # Create a new instance of Upsampler using the loaded base model. instance = cls(base_model) - # Choose your device mapping (cuda if available, otherwise cpu) device = 'cuda' if torch.cuda.is_available() else 'cpu' - - # Load the saved state dictionary that contains weights for both the base model and upsample layer. state_dict = torch.load(f"{path}/model.pth", map_location=device) instance.load_state_dict(state_dict) - return instance + if __name__ == "main": from aiia import AIIABase, AIIAConfig # Create a configuration and build a base model. From 86bb74835d36f5d41e705c487f154e2c74ffa3ca Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 19:59:18 +0100 Subject: [PATCH 075/100] updated Upsampler config --- src/aiunn/upsampler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index fa3b595..efc5c6f 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn from aiia import AIIA, AIIAConfig, AIIABase - +from config import UpsamplerConfig # Upsampler model that uses the configuration from the base model. class Upsampler(AIIA): @@ -9,7 +9,7 @@ class Upsampler(AIIA): # Assume that base_model.config is an instance of UpsamplerConfig. super().__init__(base_model.config) self.base_model = base_model - + self.config = UpsamplerConfig(self.base_model.config) # Create the upsample layer using values from the configuration. self.upsample = nn.Upsample( scale_factor=self.config.upsample_scale, From 74c4adbff061cf9d97059b67abbfd3fd26c5b0f3 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 20:02:31 +0100 Subject: [PATCH 076/100] debug print --- src/aiunn/upsampler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index efc5c6f..bc48e97 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -11,6 +11,7 @@ class Upsampler(AIIA): self.base_model = base_model self.config = UpsamplerConfig(self.base_model.config) # Create the upsample layer using values from the configuration. + print(self.config.upsample_scale) self.upsample = nn.Upsample( scale_factor=self.config.upsample_scale, mode=self.config.upsample_mode, From ad27ea2fa2296be8e3f75a8db0e517704122c260 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 20:04:44 +0100 Subject: [PATCH 077/100] fixed to load kwargs --- src/aiunn/upsampler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index bc48e97..d14133a 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -9,7 +9,7 @@ class Upsampler(AIIA): # Assume that base_model.config is an instance of UpsamplerConfig. super().__init__(base_model.config) self.base_model = base_model - self.config = UpsamplerConfig(self.base_model.config) + self.config = UpsamplerConfig(kwargs=self.base_model.config) # Create the upsample layer using values from the configuration. print(self.config.upsample_scale) self.upsample = nn.Upsample( From 7603ce8851fda41a8efb48925e3e60fd5e681616 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 20:09:28 +0100 Subject: [PATCH 078/100] massivly downsized data --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index d785030..9d83ba9 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -41,7 +41,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load data with head() to limit rows for memory efficiency. - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(500) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate that each row has proper image formats. From 86664b10a6f870950b8489d347f0137f6ebb83d1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 22:26:48 +0100 Subject: [PATCH 079/100] improved vram usage? --- src/aiunn/finetune.py | 42 ++++++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 9d83ba9..f0bb3e8 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -10,6 +10,8 @@ from torch.amp import autocast, GradScaler from torch.utils.data import Dataset, DataLoader from torchvision import transforms from tqdm import tqdm +from torch.utils.checkpoint import checkpoint +import gc from aiia import AIIABase from upsampler import Upsampler @@ -24,12 +26,10 @@ class EarlyStopping: self.early_stop = False def __call__(self, epoch_loss): - # If current loss is lower than the best loss minus min_delta, update best loss and reset counter. if epoch_loss < self.best_loss - self.min_delta: self.best_loss = epoch_loss self.counter = 0 else: - # No significant improvement: increment counter. self.counter += 1 if self.counter >= self.patience: self.early_stop = True @@ -40,11 +40,9 @@ class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): combined_df = pd.DataFrame() for parquet_file in parquet_files: - # Load data with head() to limit rows for memory efficiency. df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(500) combined_df = pd.concat([combined_df, df], ignore_index=True) - # Validate that each row has proper image formats. self.df = combined_df.apply(self._validate_row, axis=1) self.transform = transform self.failed_indices = set() @@ -69,7 +67,6 @@ class UpscaleDataset(Dataset): return len(self.df) def __getitem__(self, idx): - # Skip indices that have previously failed. if idx in self.failed_indices: return self[(idx + 1) % len(self)] try: @@ -79,7 +76,6 @@ class UpscaleDataset(Dataset): ImageFile.LOAD_TRUNCATED_IMAGES = True low_res = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') high_res = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') - # Validate expected sizes if low_res.size != (512, 512) or high_res.size != (1024, 1024): raise ValueError(f"Size mismatch: LowRes={low_res.size}, HighRes={high_res.size}") if self.transform: @@ -91,7 +87,7 @@ class UpscaleDataset(Dataset): self.failed_indices.add(idx) return self[(idx + 1) % len(self)] -# Define any transformations you require (e.g., converting PIL images to tensors) +# Define any transformations you require. transform = transforms.Compose([ transforms.ToTensor(), ]) @@ -100,15 +96,20 @@ transform = transforms.Compose([ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" base_model = AIIABase.load(pretrained_model_path) model = Upsampler(base_model) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model = model.to(device) +# Move model to device using channels_last memory format. +model = model.to(device, memory_format=torch.channels_last) + +# Optional: flag to enable gradient checkpointing. +use_checkpointing = True # Create the dataset and dataloader. dataset = UpscaleDataset([ "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], transform=transform) -data_loader = DataLoader(dataset, batch_size=1, shuffle=True) +data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Consider adjusting num_workers if needed. # Define loss function and optimizer. criterion = nn.MSELoss() @@ -124,7 +125,6 @@ with open(csv_file, mode='a', newline='') as file: if file.tell() == 0: writer.writerow(['Epoch', 'Train Loss']) -# Initialize automatic mixed precision scaler and EarlyStopping. scaler = GradScaler() early_stopping = EarlyStopping(patience=3, min_delta=0.001) @@ -132,16 +132,20 @@ early_stopping = EarlyStopping(patience=3, min_delta=0.001) for epoch in range(num_epochs): epoch_loss = 0.0 progress_bar = tqdm(data_loader, desc=f"Epoch {epoch + 1}") - print(f"Epoch: {epoch}") + print(f"Epoch: {epoch + 1}") for low_res, high_res in progress_bar: - low_res = low_res.to(device, non_blocking=True) + # Move data to GPU with channels_last format where possible. + low_res = low_res.to(device, non_blocking=True).to(memory_format=torch.channels_last) high_res = high_res.to(device, non_blocking=True) optimizer.zero_grad() - # Use automatic mixed precision to speed up training on supported hardware. with autocast(device_type=device.type): - outputs = model(low_res) + if use_checkpointing: + # Wrap the forward pass with checkpointing to trade compute for memory. + outputs = checkpoint(lambda x: model(x), low_res) + else: + outputs = model(low_res) loss = criterion(outputs, high_res) scaler.scale(loss).backward() @@ -150,6 +154,13 @@ for epoch in range(num_epochs): epoch_loss += loss.item() progress_bar.set_postfix({'loss': loss.item()}) + + # Optionally delete variables to free memory. + del low_res, high_res, outputs, loss + + # Perform garbage collection and clear GPU cache after each epoch. + gc.collect() + torch.cuda.empty_cache() print(f"Epoch {epoch + 1}, Loss: {epoch_loss}") @@ -158,11 +169,10 @@ for epoch in range(num_epochs): writer = csv.writer(file) writer.writerow([epoch + 1, epoch_loss]) - # Check early stopping criteria. if early_stopping(epoch_loss): print(f"Early stopping triggered at epoch {epoch + 1} with loss {epoch_loss}") break -# Optionally, save the finetuned model using your library's save method. +# Optionally save the fine-tuned model. finetuned_model_path = "aiuNN" model.save(finetuned_model_path) From c88961aee7d5c3e92de4f7b78c814c860ae3773d Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 22:42:36 +0100 Subject: [PATCH 080/100] convert back to images --- src/aiunn/upsampler.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index d14133a..e5f17ce 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -6,23 +6,24 @@ from config import UpsamplerConfig # Upsampler model that uses the configuration from the base model. class Upsampler(AIIA): def __init__(self, base_model: AIIABase): - # Assume that base_model.config is an instance of UpsamplerConfig. super().__init__(base_model.config) self.base_model = base_model self.config = UpsamplerConfig(kwargs=self.base_model.config) - # Create the upsample layer using values from the configuration. - print(self.config.upsample_scale) self.upsample = nn.Upsample( scale_factor=self.config.upsample_scale, mode=self.config.upsample_mode, align_corners=self.config.upsample_align_corners ) + # Add a final conversion layer to change channels from 512 to 3. + self.to_rgb = nn.Conv2d(in_channels=512, out_channels=3, kernel_size=1) def forward(self, x): x = self.base_model(x) x = self.upsample(x) + x = self.to_rgb(x) # Convert feature map to RGB image. return x + @classmethod def load(cls, path: str): """ From a51300c77ca7d78b979bae882c4aacc6a7aa4267 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 22:50:17 +0100 Subject: [PATCH 081/100] base model fix --- src/aiunn/upsampler.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index e5f17ce..d2eeb82 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -14,16 +14,15 @@ class Upsampler(AIIA): mode=self.config.upsample_mode, align_corners=self.config.upsample_align_corners ) - # Add a final conversion layer to change channels from 512 to 3. - self.to_rgb = nn.Conv2d(in_channels=512, out_channels=3, kernel_size=1) + # Conversion layer: change from 512 channels to 3 channels. + self.to_rgb = nn.Conv2d(in_channels=self.base_model.config.hidden_size, out_channels=3, kernel_size=1) def forward(self, x): x = self.base_model(x) x = self.upsample(x) - x = self.to_rgb(x) # Convert feature map to RGB image. + x = self.to_rgb(x) # Ensures output has 3 channels. return x - - + @classmethod def load(cls, path: str): """ From fde8bdcb6f4aa72258736b1073a9ad1777b01cac Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 23:13:27 +0100 Subject: [PATCH 082/100] test --- src/aiunn/finetune.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index f0bb3e8..6a2f9cc 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -142,11 +142,13 @@ for epoch in range(num_epochs): with autocast(device_type=device.type): if use_checkpointing: - # Wrap the forward pass with checkpointing to trade compute for memory. - outputs = checkpoint(lambda x: model(x), low_res) + # Ensure the input tensor requires gradient so that checkpointing records the computation graph. + low_res.requires_grad_() + outputs = checkpoint(model, low_res) else: outputs = model(low_res) loss = criterion(outputs, high_res) + scaler.scale(loss).backward() scaler.step(optimizer) From e114023cbc2820e6ac519294499b5c0544440e9f Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Sun, 23 Feb 2025 23:21:31 +0100 Subject: [PATCH 083/100] go to cpu --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 6a2f9cc..80088bb 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -97,7 +97,7 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" base_model = AIIABase.load(pretrained_model_path) model = Upsampler(base_model) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = torch.device("cpu")#torch.device("cuda" if torch.cuda.is_available() else "cpu") # Move model to device using channels_last memory format. model = model.to(device, memory_format=torch.channels_last) From 8935fa5e13a746a20051e67b88fb3d281fc1d8f1 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 14:57:55 +0100 Subject: [PATCH 084/100] doubled config --- src/aiunn/config.py | 3 ++- src/aiunn/upsampler.py | 60 +++++++++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/src/aiunn/config.py b/src/aiunn/config.py index 1a7d3a5..2d5926e 100644 --- a/src/aiunn/config.py +++ b/src/aiunn/config.py @@ -1,4 +1,6 @@ from aiia import AIIAConfig +import os +import json class UpsamplerConfig(AIIAConfig): @@ -33,4 +35,3 @@ class UpsamplerConfig(AIIAConfig): # Add the upsample layer only if not already present. if not any(layer.get('name') == 'Upsample' for layer in self.layers): self.layers.append(upsample_layer) - diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index d2eeb82..d2ae0f0 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -1,9 +1,12 @@ +import os import torch import torch.nn as nn +import warnings from aiia import AIIA, AIIAConfig, AIIABase from config import UpsamplerConfig +import warnings + -# Upsampler model that uses the configuration from the base model. class Upsampler(AIIA): def __init__(self, base_model: AIIABase): super().__init__(base_model.config) @@ -14,43 +17,62 @@ class Upsampler(AIIA): mode=self.config.upsample_mode, align_corners=self.config.upsample_align_corners ) - # Conversion layer: change from 512 channels to 3 channels. - self.to_rgb = nn.Conv2d(in_channels=self.base_model.config.hidden_size, out_channels=3, kernel_size=1) + # Conversion layer: change from hidden size channels to 3 channels. + self.to_rgb = nn.Conv2d( + in_channels=self.base_model.config.hidden_size, + out_channels=3, + kernel_size=1 + ) def forward(self, x): x = self.base_model(x) x = self.upsample(x) x = self.to_rgb(x) # Ensures output has 3 channels. return x - + @classmethod - def load(cls, path: str): - """ - Load the model: - - First, load the base model (including its configuration and state_dict). - - Then, wrap it with the Upsampler class. - - Finally, load the combined state dictionary. - """ - base_model = AIIABase.load(path) - instance = cls(base_model) + def load(cls, path, precision: str = None): + # Load the configuration from disk. + config = AIIAConfig.load(path) + # Reconstruct the base model from the loaded configuration. + base_model = AIIABase(config) + # Instantiate the Upsampler using the proper base model. + upsampler = cls(base_model) + # Load state dict and handle precision conversion if needed. device = 'cuda' if torch.cuda.is_available() else 'cpu' state_dict = torch.load(f"{path}/model.pth", map_location=device) - instance.load_state_dict(state_dict) - return instance + if precision is not None: + if precision.lower() == 'fp16': + dtype = torch.float16 + elif precision.lower() == 'bf16': + if device == 'cuda' and not torch.cuda.is_bf16_supported(): + warnings.warn("BF16 is not supported on this GPU. Falling back to FP16.") + dtype = torch.float16 + else: + dtype = torch.bfloat16 + else: + raise ValueError("Unsupported precision. Use 'fp16', 'bf16', or leave as None.") + + for key, param in state_dict.items(): + if torch.is_tensor(param): + state_dict[key] = param.to(dtype) + upsampler.load_state_dict(state_dict) + return upsampler -if __name__ == "main": + +if __name__ == "__main__": from aiia import AIIABase, AIIAConfig # Create a configuration and build a base model. config = AIIAConfig() - base_model = AIIABase("test2") + base_model = AIIABase(config) # Instantiate Upsampler from the base model (works correctly). upsampler = Upsampler(base_model) # Save the model (both configuration and weights). - upsampler.save("test2") + upsampler.save("hehe") # Now load using the overridden load method; this will load the complete model. - upsampler_loaded = Upsampler.load("test2") + upsampler_loaded = Upsampler.load("hehe", precision="bf16") print("Updated configuration:", upsampler_loaded.config.__dict__) From 33a3626b74cba879f0cf63d7b2ffe0687c082668 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 15:16:05 +0100 Subject: [PATCH 085/100] fixed config --- src/aiunn/config.py | 27 ++++++++++++++++++++------- src/aiunn/upsampler.py | 6 +++++- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/aiunn/config.py b/src/aiunn/config.py index 2d5926e..6fc72f1 100644 --- a/src/aiunn/config.py +++ b/src/aiunn/config.py @@ -1,27 +1,40 @@ from aiia import AIIAConfig -import os -import json class UpsamplerConfig(AIIAConfig): def __init__( self, + base_config=None, upsample_scale: int = 2, upsample_mode: str = 'bilinear', upsample_align_corners: bool = False, layers=None, **kwargs ): - # Initialize base configuration. - super().__init__(**kwargs) - self.layers = layers if layers is not None else [] + # Start with a single configuration dictionary. + config_data = {} + if base_config is not None: + # If base_config is an object with a to_dict method, use it. + if hasattr(base_config, "to_dict"): + config_data.update(base_config.to_dict()) + elif isinstance(base_config, dict): + config_data.update(base_config) + + # Update with any additional keyword arguments (if needed). + config_data.update(kwargs) + + # Initialize base AIIAConfig with a single merged configuration. + super().__init__(**config_data) # Upsampler-specific parameters. self.upsample_scale = upsample_scale self.upsample_mode = upsample_mode self.upsample_align_corners = upsample_align_corners - # Automatically add the upsample layer details. + # Use layers from the argument or initialize an empty list. + self.layers = layers if layers is not None else [] + + # Add the upsample layer details only once. self.add_upsample_layer() def add_upsample_layer(self): @@ -32,6 +45,6 @@ class UpsamplerConfig(AIIAConfig): 'mode': self.upsample_mode, 'align_corners': self.upsample_align_corners } - # Add the upsample layer only if not already present. + # Append the layer only if it isn’t already present. if not any(layer.get('name') == 'Upsample' for layer in self.layers): self.layers.append(upsample_layer) diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler.py index d2ae0f0..657e6b3 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler.py @@ -11,7 +11,10 @@ class Upsampler(AIIA): def __init__(self, base_model: AIIABase): super().__init__(base_model.config) self.base_model = base_model - self.config = UpsamplerConfig(kwargs=self.base_model.config) + + # Pass the unified base configuration using the new parameter. + self.config = UpsamplerConfig(base_config=base_model.config) + self.upsample = nn.Upsample( scale_factor=self.config.upsample_scale, mode=self.config.upsample_mode, @@ -24,6 +27,7 @@ class Upsampler(AIIA): kernel_size=1 ) + def forward(self, x): x = self.base_model(x) x = self.upsample(x) From b89670ad6c1089c4fda8d7332a20fa8f34956a58 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 15:21:08 +0100 Subject: [PATCH 086/100] loaded with bf16 --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 80088bb..19a2aaa 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -94,7 +94,7 @@ transform = transforms.Compose([ # Load the base AIIABase model and wrap it with the Upsampler. pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" -base_model = AIIABase.load(pretrained_model_path) +base_model = AIIABase.load(pretrained_model_path, precision="bf16") model = Upsampler(base_model) device = torch.device("cpu")#torch.device("cuda" if torch.cuda.is_available() else "cpu") From 0fa14c4a6f120a94f7fdf6846a210c4fa1115e1f Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 15:22:41 +0100 Subject: [PATCH 087/100] added gpu support --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 19a2aaa..ec18832 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -97,7 +97,7 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" base_model = AIIABase.load(pretrained_model_path, precision="bf16") model = Upsampler(base_model) -device = torch.device("cpu")#torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Move model to device using channels_last memory format. model = model.to(device, memory_format=torch.channels_last) From 443b9f5589d25575fc6a44b1a50aa04742ea968f Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 15:44:28 +0100 Subject: [PATCH 088/100] use cpu --- src/aiunn/finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index ec18832..59e3bf9 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -40,7 +40,7 @@ class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): combined_df = pd.DataFrame() for parquet_file in parquet_files: - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(500) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) combined_df = pd.concat([combined_df, df], ignore_index=True) self.df = combined_df.apply(self._validate_row, axis=1) @@ -97,7 +97,7 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" base_model = AIIABase.load(pretrained_model_path, precision="bf16") model = Upsampler(base_model) -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +device = torch.device("cpu") # Move model to device using channels_last memory format. model = model.to(device, memory_format=torch.channels_last) @@ -109,7 +109,7 @@ dataset = UpscaleDataset([ "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], transform=transform) -data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Consider adjusting num_workers if needed. +data_loader = DataLoader(dataset, batch_size=2, shuffle=True) # Consider adjusting num_workers if needed. # Define loss function and optimizer. criterion = nn.MSELoss() From 2360e23cc776084521009b9161a7f46bf8443166 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 16:20:45 +0100 Subject: [PATCH 089/100] downsized imageset --- src/aiunn/finetune.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 59e3bf9..64a4ac1 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -40,9 +40,11 @@ class UpscaleDataset(Dataset): def __init__(self, parquet_files: list, transform=None): combined_df = pd.DataFrame() for parquet_file in parquet_files: + # Load a subset (head(2500)) from each parquet file df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) combined_df = pd.concat([combined_df, df], ignore_index=True) - + + # Validate rows (ensuring each value is bytes or str) self.df = combined_df.apply(self._validate_row, axis=1) self.transform = transform self.failed_indices = set() @@ -67,6 +69,7 @@ class UpscaleDataset(Dataset): return len(self.df) def __getitem__(self, idx): + # If previous call failed for this index, use a different index. if idx in self.failed_indices: return self[(idx + 1) % len(self)] try: @@ -74,10 +77,17 @@ class UpscaleDataset(Dataset): low_res_bytes = self._decode_image(row['image_512']) high_res_bytes = self._decode_image(row['image_1024']) ImageFile.LOAD_TRUNCATED_IMAGES = True - low_res = Image.open(io.BytesIO(low_res_bytes)).convert('RGB') - high_res = Image.open(io.BytesIO(high_res_bytes)).convert('RGB') - if low_res.size != (512, 512) or high_res.size != (1024, 1024): - raise ValueError(f"Size mismatch: LowRes={low_res.size}, HighRes={high_res.size}") + + # Open image bytes with Pillow and convert to RGBA + low_res = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA') + high_res = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA') + + # Resize the images to reduce VRAM usage. + # Using Image.ANTIALIAS which is equivalent to LANCZOS in current Pillow versions. + low_res = low_res.resize((384, 384), Image.ANTIALIAS) + high_res = high_res.resize((768, 768), Image.ANTIALIAS) + + # If a transform is provided (e.g. conversion to Tensor), apply it. if self.transform: low_res = self.transform(low_res) high_res = self.transform(high_res) @@ -97,7 +107,7 @@ pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" base_model = AIIABase.load(pretrained_model_path, precision="bf16") model = Upsampler(base_model) -device = torch.device("cpu") +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Move model to device using channels_last memory format. model = model.to(device, memory_format=torch.channels_last) From 64fb7a199ded5e73289118592e8157465eccee68 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 16:23:57 +0100 Subject: [PATCH 090/100] antialisaing not available --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 64a4ac1..e03960b 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -84,8 +84,8 @@ class UpscaleDataset(Dataset): # Resize the images to reduce VRAM usage. # Using Image.ANTIALIAS which is equivalent to LANCZOS in current Pillow versions. - low_res = low_res.resize((384, 384), Image.ANTIALIAS) - high_res = high_res.resize((768, 768), Image.ANTIALIAS) + low_res = low_res.resize((384, 384), Image.LANCZOS) + high_res = high_res.resize((768, 768), Image.LANCZOS) # If a transform is provided (e.g. conversion to Tensor), apply it. if self.transform: From ecb2694415b0a3bc33bd7bca200e37606aa57a6b Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 16:28:18 +0100 Subject: [PATCH 091/100] update color channels --- src/aiunn/finetune.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index e03960b..03efb8f 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -77,16 +77,25 @@ class UpscaleDataset(Dataset): low_res_bytes = self._decode_image(row['image_512']) high_res_bytes = self._decode_image(row['image_1024']) ImageFile.LOAD_TRUNCATED_IMAGES = True - - # Open image bytes with Pillow and convert to RGBA - low_res = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA') - high_res = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA') - + # Open image bytes with Pillow and convert to RGBA first + low_res_rgba = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA') + high_res_rgba = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA') + + # Create a new RGB image with black background + low_res_rgb = Image.new("RGB", low_res_rgba.size, (0, 0, 0)) + high_res_rgb = Image.new("RGB", high_res_rgba.size, (0, 0, 0)) + + # Composite the original image over the black background + low_res_rgb.paste(low_res_rgba, mask=low_res_rgba.split()[3]) + high_res_rgb.paste(high_res_rgba, mask=high_res_rgba.split()[3]) + + # Now we have true 3-channel RGB images with transparent areas converted to black + low_res = low_res_rgb + high_res = high_res_rgb + # Resize the images to reduce VRAM usage. - # Using Image.ANTIALIAS which is equivalent to LANCZOS in current Pillow versions. low_res = low_res.resize((384, 384), Image.LANCZOS) high_res = high_res.resize((768, 768), Image.LANCZOS) - # If a transform is provided (e.g. conversion to Tensor), apply it. if self.transform: low_res = self.transform(low_res) @@ -96,7 +105,7 @@ class UpscaleDataset(Dataset): print(f"\nError at index {idx}: {str(e)}") self.failed_indices.add(idx) return self[(idx + 1) % len(self)] - + # Define any transformations you require. transform = transforms.Compose([ transforms.ToTensor(), From 153ead15b7cc7e33bdc4a714c2f71e10f4390036 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 16:32:23 +0100 Subject: [PATCH 092/100] downsized image amount --- src/aiunn/finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 03efb8f..0013ec9 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -41,7 +41,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load a subset (head(2500)) from each parquet file - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate rows (ensuring each value is bytes or str) From 79e0aeb269277ec956313f11183c966d35cb7743 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Mon, 24 Feb 2025 16:39:25 +0100 Subject: [PATCH 093/100] downsized batchsize increased images --- src/aiunn/finetune.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py index 0013ec9..508a4e8 100644 --- a/src/aiunn/finetune.py +++ b/src/aiunn/finetune.py @@ -41,7 +41,7 @@ class UpscaleDataset(Dataset): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load a subset (head(2500)) from each parquet file - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(1250) + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) combined_df = pd.concat([combined_df, df], ignore_index=True) # Validate rows (ensuring each value is bytes or str) @@ -128,7 +128,7 @@ dataset = UpscaleDataset([ "/root/training_data/vision-dataset/image_upscaler.parquet", "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], transform=transform) -data_loader = DataLoader(dataset, batch_size=2, shuffle=True) # Consider adjusting num_workers if needed. +data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Consider adjusting num_workers if needed. # Define loss function and optimizer. criterion = nn.MSELoss() From 0611a26564390c512d87b5d6c9f7a9db45939fe5 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 25 Feb 2025 15:04:38 +0100 Subject: [PATCH 094/100] updated project info --- pyproject.toml | 13 ++++++++----- setup.py | 17 +++-------------- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4c8acdb..b0e5e10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,11 +4,14 @@ build-backend = "setuptools.build_meta" [project] name = "aiunn" -version = "0.1.0" -description = "A brief description of your package" +version = "0.1.1" +description = "Finetuner for image upscaling using AIIA" readme = "README.md" -requires-python = ">=3.7" +requires-python = ">=3.10" license = {file = "LICENSE"} authors = [ - {name = "Your Name", email = "your.email@example.com"}, -] \ No newline at end of file + {name = "Falko Habel", email = "falko.habel@gmx.de"}, +] + +[project.urls] +"Homepage" = "https://gitea.fabelous.app/Machine-Learning/aiuNN" \ No newline at end of file diff --git a/setup.py b/setup.py index aa53ea8..4a4a835 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name="aiunn", - version="0.1.0", + version="0.1.1", packages=find_packages(where="src"), package_dir={"": "src"}, install_requires=[ @@ -10,16 +10,5 @@ setup( for line in open("requirements.txt") if line.strip() and not line.startswith("#") ], - author="Falko Habel", - author_email="falko.habel@gmx.de", - description="Finetuner for image upscaling using AIIA", - long_description=open("README.md").read(), - long_description_content_type="text/markdown", - url="https://github.com/yourusername/aiunn", - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - python_requires=">=3.7", -) + python_requires=">=3.10", +) \ No newline at end of file From 09f196294c528a0f103a2dd99cbb75e57031aeeb Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 25 Feb 2025 15:47:10 +0100 Subject: [PATCH 095/100] new upsampler scripts --- example.py | 106 ++++++++++++++++++++++++++++++++++++++++++++ src/aiunn/config.py | 2 +- 2 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 example.py diff --git a/example.py b/example.py new file mode 100644 index 0000000..8492d51 --- /dev/null +++ b/example.py @@ -0,0 +1,106 @@ +from aiia import AIIABase +from aiunn import aiuNN +from aiunn import aiuNNTrainer +import pandas as pd +import io +import base64 +from PIL import Image, ImageFile +from torch.utils.data import Dataset +from torchvision import transforms + + + +class UpscaleDataset(Dataset): + def __init__(self, parquet_files: list, transform=None, samples_per_file=2500): + combined_df = pd.DataFrame() + for parquet_file in parquet_files: + # Load a subset from each parquet file + df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(samples_per_file) + combined_df = pd.concat([combined_df, df], ignore_index=True) + + # Validate rows (ensuring each value is bytes or str) + self.df = combined_df.apply(self._validate_row, axis=1) + self.transform = transform + self.failed_indices = set() + + def _validate_row(self, row): + for col in ['image_512', 'image_1024']: + if not isinstance(row[col], (bytes, str)): + raise ValueError(f"Invalid data type in column {col}: {type(row[col])}") + return row + + def _decode_image(self, data): + try: + if isinstance(data, str): + return base64.b64decode(data) + elif isinstance(data, bytes): + return data + raise ValueError(f"Unsupported data type: {type(data)}") + except Exception as e: + raise RuntimeError(f"Decoding failed: {str(e)}") + + def __len__(self): + return len(self.df) + + def __getitem__(self, idx): + # If previous call failed for this index, use a different index + if idx in self.failed_indices: + return self[(idx + 1) % len(self)] + try: + row = self.df.iloc[idx] + low_res_bytes = self._decode_image(row['image_512']) + high_res_bytes = self._decode_image(row['image_1024']) + ImageFile.LOAD_TRUNCATED_IMAGES = True + # Open image bytes with Pillow and convert to RGBA first + low_res_rgba = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA') + high_res_rgba = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA') + + # Create a new RGB image with black background + low_res_rgb = Image.new("RGB", low_res_rgba.size, (0, 0, 0)) + high_res_rgb = Image.new("RGB", high_res_rgba.size, (0, 0, 0)) + + # Composite the original image over the black background + low_res_rgb.paste(low_res_rgba, mask=low_res_rgba.split()[3]) + high_res_rgb.paste(high_res_rgba, mask=high_res_rgba.split()[3]) + + # Now we have true 3-channel RGB images with transparent areas converted to black + low_res = low_res_rgb + high_res = high_res_rgb + + # Resize the images to reduce VRAM usage + low_res = low_res.resize((384, 384), Image.LANCZOS) + high_res = high_res.resize((768, 768), Image.LANCZOS) + + # If a transform is provided (e.g. conversion to Tensor), apply it + if self.transform: + low_res = self.transform(low_res) + high_res = self.transform(high_res) + return low_res, high_res + except Exception as e: + print(f"\nError at index {idx}: {str(e)}") + self.failed_indices.add(idx) + return self[(idx + 1) % len(self)] + + +if __name__ =="__main__": + # Load your base model and upscaler + pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" + base_model = AIIABase.load(pretrained_model_path, precision="bf16") + upscaler = aiuNN(base_model) + + # Create trainer with your dataset class + trainer = aiuNNTrainer(upscaler, dataset_class=UpscaleDataset) + + # Load data using parameters for your dataset + dataset_params = { + 'parquet_files': [ + "/root/training_data/vision-dataset/image_upscaler.parquet", + "/root/training_data/vision-dataset/image_vec_upscaler.parquet" + ], + 'transform': transforms.Compose([transforms.ToTensor()]), + 'samples_per_file': 2500 + } + trainer.load_data(dataset_params=dataset_params, batch_size=1) + + # Fine-tune the model + trainer.finetune(output_path="trained_models") \ No newline at end of file diff --git a/src/aiunn/config.py b/src/aiunn/config.py index 6fc72f1..b56699b 100644 --- a/src/aiunn/config.py +++ b/src/aiunn/config.py @@ -1,7 +1,7 @@ from aiia import AIIAConfig -class UpsamplerConfig(AIIAConfig): +class aiuNNConfig(AIIAConfig): def __init__( self, base_config=None, From fcebc103b8a25eb1e0cc68c73ce81edf9d60e7a4 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 25 Feb 2025 15:47:21 +0100 Subject: [PATCH 096/100] fixed paths --- src/aiunn/__init__.py | 9 +- src/aiunn/finetune.py | 199 ------------ src/aiunn/finetune/__init__.py | 3 + src/aiunn/finetune/trainer.py | 289 ++++++++++++++++++ src/aiunn/inference/__init__.py | 0 src/aiunn/{ => inference}/inference.py | 2 + src/aiunn/upsampler/__init__.py | 5 + .../{upsampler.py => upsampler/aiunn.py} | 10 +- src/aiunn/{ => upsampler}/config.py | 0 9 files changed, 308 insertions(+), 209 deletions(-) delete mode 100644 src/aiunn/finetune.py create mode 100644 src/aiunn/finetune/__init__.py create mode 100644 src/aiunn/finetune/trainer.py create mode 100644 src/aiunn/inference/__init__.py rename src/aiunn/{ => inference}/inference.py (99%) create mode 100644 src/aiunn/upsampler/__init__.py rename src/aiunn/{upsampler.py => upsampler/aiunn.py} (92%) rename src/aiunn/{ => upsampler}/config.py (100%) diff --git a/src/aiunn/__init__.py b/src/aiunn/__init__.py index a8013f3..2e6f021 100644 --- a/src/aiunn/__init__.py +++ b/src/aiunn/__init__.py @@ -1,6 +1,5 @@ +from .finetune.trainer import aiuNNTrainer +from .upsampler.aiunn import aiuNN +from .upsampler.config import aiuNNConfig -from .finetune import * -from .inference import UpScaler - -__version__ = "0.1.0" - +__version__ = "0.1.1" \ No newline at end of file diff --git a/src/aiunn/finetune.py b/src/aiunn/finetune.py deleted file mode 100644 index 508a4e8..0000000 --- a/src/aiunn/finetune.py +++ /dev/null @@ -1,199 +0,0 @@ -import torch -import torch.nn as nn -import torch.optim as optim -import pandas as pd -import io -import csv -import base64 -from PIL import Image, ImageFile -from torch.amp import autocast, GradScaler -from torch.utils.data import Dataset, DataLoader -from torchvision import transforms -from tqdm import tqdm -from torch.utils.checkpoint import checkpoint -import gc - -from aiia import AIIABase -from upsampler import Upsampler - -# Define a simple EarlyStopping class to monitor the epoch loss. -class EarlyStopping: - def __init__(self, patience=3, min_delta=0.001): - self.patience = patience # Number of epochs with no significant improvement before stopping. - self.min_delta = min_delta # Minimum change in loss required to count as an improvement. - self.best_loss = float('inf') - self.counter = 0 - self.early_stop = False - - def __call__(self, epoch_loss): - if epoch_loss < self.best_loss - self.min_delta: - self.best_loss = epoch_loss - self.counter = 0 - else: - self.counter += 1 - if self.counter >= self.patience: - self.early_stop = True - return self.early_stop - -# UpscaleDataset to load and preprocess your data. -class UpscaleDataset(Dataset): - def __init__(self, parquet_files: list, transform=None): - combined_df = pd.DataFrame() - for parquet_file in parquet_files: - # Load a subset (head(2500)) from each parquet file - df = pd.read_parquet(parquet_file, columns=['image_512', 'image_1024']).head(2500) - combined_df = pd.concat([combined_df, df], ignore_index=True) - - # Validate rows (ensuring each value is bytes or str) - self.df = combined_df.apply(self._validate_row, axis=1) - self.transform = transform - self.failed_indices = set() - - def _validate_row(self, row): - for col in ['image_512', 'image_1024']: - if not isinstance(row[col], (bytes, str)): - raise ValueError(f"Invalid data type in column {col}: {type(row[col])}") - return row - - def _decode_image(self, data): - try: - if isinstance(data, str): - return base64.b64decode(data) - elif isinstance(data, bytes): - return data - raise ValueError(f"Unsupported data type: {type(data)}") - except Exception as e: - raise RuntimeError(f"Decoding failed: {str(e)}") - - def __len__(self): - return len(self.df) - - def __getitem__(self, idx): - # If previous call failed for this index, use a different index. - if idx in self.failed_indices: - return self[(idx + 1) % len(self)] - try: - row = self.df.iloc[idx] - low_res_bytes = self._decode_image(row['image_512']) - high_res_bytes = self._decode_image(row['image_1024']) - ImageFile.LOAD_TRUNCATED_IMAGES = True - # Open image bytes with Pillow and convert to RGBA first - low_res_rgba = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA') - high_res_rgba = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA') - - # Create a new RGB image with black background - low_res_rgb = Image.new("RGB", low_res_rgba.size, (0, 0, 0)) - high_res_rgb = Image.new("RGB", high_res_rgba.size, (0, 0, 0)) - - # Composite the original image over the black background - low_res_rgb.paste(low_res_rgba, mask=low_res_rgba.split()[3]) - high_res_rgb.paste(high_res_rgba, mask=high_res_rgba.split()[3]) - - # Now we have true 3-channel RGB images with transparent areas converted to black - low_res = low_res_rgb - high_res = high_res_rgb - - # Resize the images to reduce VRAM usage. - low_res = low_res.resize((384, 384), Image.LANCZOS) - high_res = high_res.resize((768, 768), Image.LANCZOS) - # If a transform is provided (e.g. conversion to Tensor), apply it. - if self.transform: - low_res = self.transform(low_res) - high_res = self.transform(high_res) - return low_res, high_res - except Exception as e: - print(f"\nError at index {idx}: {str(e)}") - self.failed_indices.add(idx) - return self[(idx + 1) % len(self)] - -# Define any transformations you require. -transform = transforms.Compose([ - transforms.ToTensor(), -]) - -# Load the base AIIABase model and wrap it with the Upsampler. -pretrained_model_path = "/root/vision/AIIA/AIIA-base-512" -base_model = AIIABase.load(pretrained_model_path, precision="bf16") -model = Upsampler(base_model) - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -# Move model to device using channels_last memory format. -model = model.to(device, memory_format=torch.channels_last) - -# Optional: flag to enable gradient checkpointing. -use_checkpointing = True - -# Create the dataset and dataloader. -dataset = UpscaleDataset([ - "/root/training_data/vision-dataset/image_upscaler.parquet", - "/root/training_data/vision-dataset/image_vec_upscaler.parquet" -], transform=transform) -data_loader = DataLoader(dataset, batch_size=1, shuffle=True) # Consider adjusting num_workers if needed. - -# Define loss function and optimizer. -criterion = nn.MSELoss() -optimizer = optim.Adam(model.parameters(), lr=1e-4) - -num_epochs = 10 -model.train() - -# Prepare a CSV file for logging training loss. -csv_file = 'losses.csv' -with open(csv_file, mode='a', newline='') as file: - writer = csv.writer(file) - if file.tell() == 0: - writer.writerow(['Epoch', 'Train Loss']) - -scaler = GradScaler() -early_stopping = EarlyStopping(patience=3, min_delta=0.001) - -# Training loop with early stopping. -for epoch in range(num_epochs): - epoch_loss = 0.0 - progress_bar = tqdm(data_loader, desc=f"Epoch {epoch + 1}") - print(f"Epoch: {epoch + 1}") - for low_res, high_res in progress_bar: - # Move data to GPU with channels_last format where possible. - low_res = low_res.to(device, non_blocking=True).to(memory_format=torch.channels_last) - high_res = high_res.to(device, non_blocking=True) - - optimizer.zero_grad() - - with autocast(device_type=device.type): - if use_checkpointing: - # Ensure the input tensor requires gradient so that checkpointing records the computation graph. - low_res.requires_grad_() - outputs = checkpoint(model, low_res) - else: - outputs = model(low_res) - loss = criterion(outputs, high_res) - - - scaler.scale(loss).backward() - scaler.step(optimizer) - scaler.update() - - epoch_loss += loss.item() - progress_bar.set_postfix({'loss': loss.item()}) - - # Optionally delete variables to free memory. - del low_res, high_res, outputs, loss - - # Perform garbage collection and clear GPU cache after each epoch. - gc.collect() - torch.cuda.empty_cache() - - print(f"Epoch {epoch + 1}, Loss: {epoch_loss}") - - # Record the loss in the CSV log. - with open(csv_file, mode='a', newline='') as file: - writer = csv.writer(file) - writer.writerow([epoch + 1, epoch_loss]) - - if early_stopping(epoch_loss): - print(f"Early stopping triggered at epoch {epoch + 1} with loss {epoch_loss}") - break - -# Optionally save the fine-tuned model. -finetuned_model_path = "aiuNN" -model.save(finetuned_model_path) diff --git a/src/aiunn/finetune/__init__.py b/src/aiunn/finetune/__init__.py new file mode 100644 index 0000000..33239b1 --- /dev/null +++ b/src/aiunn/finetune/__init__.py @@ -0,0 +1,3 @@ +from .trainer import aiuNNTrainer + +__all__ = ["aiuNNTrainer" ] \ No newline at end of file diff --git a/src/aiunn/finetune/trainer.py b/src/aiunn/finetune/trainer.py new file mode 100644 index 0000000..01047b9 --- /dev/null +++ b/src/aiunn/finetune/trainer.py @@ -0,0 +1,289 @@ +import torch +import torch.nn as nn +import torch.optim as optim +import os +import csv +from torch.amp import autocast, GradScaler +from torch.utils.data import DataLoader +from tqdm import tqdm +from torch.utils.checkpoint import checkpoint +import gc +import time +import shutil + + +class EarlyStopping: + def __init__(self, patience=3, min_delta=0.001): + # Number of epochs with no significant improvement before stopping + # Minimum change in loss required to count as an improvement + self.patience = patience + self.min_delta = min_delta + self.best_loss = float('inf') + self.counter = 0 + self.early_stop = False + + def __call__(self, epoch_loss): + if epoch_loss < self.best_loss - self.min_delta: + self.best_loss = epoch_loss + self.counter = 0 + return True # Improved + else: + self.counter += 1 + if self.counter >= self.patience: + self.early_stop = True + return False # Not improved + +class aiuNNTrainer: + def __init__(self, upscaler_model, dataset_class=None): + """ + Initialize the upscaler trainer + + Args: + upscaler_model: The model to fine-tune + dataset_class: The dataset class to use for loading data (optional) + """ + self.model = upscaler_model + self.dataset_class = dataset_class + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.model = self.model.to(self.device, memory_format=torch.channels_last) + self.criterion = nn.MSELoss() + self.optimizer = None + self.scaler = GradScaler() + self.best_loss = float('inf') + self.use_checkpointing = True + self.data_loader = None + self.validation_loader = None + self.log_dir = None + + def load_data(self, dataset_params=None, batch_size=1, validation_split=0.2, custom_train_dataset=None, custom_val_dataset=None): + """ + Load data using either a custom dataset instance or the dataset class provided at initialization + + Args: + dataset_params (dict/list): Parameters to pass to the dataset class constructor + batch_size (int): Batch size for training + validation_split (float): Proportion of data to use for validation + custom_train_dataset: A pre-instantiated dataset to use for training (optional) + custom_val_dataset: A pre-instantiated dataset to use for validation (optional) + """ + # If custom datasets are provided directly, use them + if custom_train_dataset is not None: + train_dataset = custom_train_dataset + val_dataset = custom_val_dataset if custom_val_dataset is not None else None + else: + # Otherwise instantiate dataset using the class and parameters + if self.dataset_class is None: + raise ValueError("No dataset class provided. Either provide a dataset class at initialization or custom datasets.") + + # Create dataset instance + dataset = self.dataset_class(**dataset_params if isinstance(dataset_params, dict) else {'parquet_files': dataset_params}) + + # Split into train and validation sets + dataset_size = len(dataset) + val_size = int(validation_split * dataset_size) + train_size = dataset_size - val_size + + train_dataset, val_dataset = torch.utils.data.random_split( + dataset, [train_size, val_size] + ) + + # Create data loaders + self.data_loader = DataLoader( + train_dataset, + batch_size=batch_size, + shuffle=True, + pin_memory=True + ) + + if val_dataset is not None: + self.validation_loader = DataLoader( + val_dataset, + batch_size=batch_size, + shuffle=False, + pin_memory=True + ) + print(f"Loaded {len(train_dataset)} training samples and {len(val_dataset)} validation samples") + else: + self.validation_loader = None + print(f"Loaded {len(train_dataset)} training samples (no validation set)") + + return self.data_loader, self.validation_loader + + def _setup_logging(self, output_path): + """Set up directory structure for logging and model checkpoints""" + timestamp = time.strftime("%Y%m%d-%H%M%S") + self.log_dir = os.path.join(output_path, f"training_run_{timestamp}") + os.makedirs(self.log_dir, exist_ok=True) + + # Create checkpoint directory + self.checkpoint_dir = os.path.join(self.log_dir, "checkpoints") + os.makedirs(self.checkpoint_dir, exist_ok=True) + + # Set up CSV logging + self.csv_path = os.path.join(self.log_dir, 'training_log.csv') + with open(self.csv_path, mode='w', newline='') as file: + writer = csv.writer(file) + if self.validation_loader: + writer.writerow(['Epoch', 'Train Loss', 'Validation Loss', 'Improved']) + else: + writer.writerow(['Epoch', 'Train Loss', 'Improved']) + + def _evaluate(self): + """Evaluate the model on validation data""" + if self.validation_loader is None: + return 0.0 + + self.model.eval() + val_loss = 0.0 + + with torch.no_grad(): + for low_res, high_res in tqdm(self.validation_loader, desc="Validating"): + low_res = low_res.to(self.device, non_blocking=True).to(memory_format=torch.channels_last) + high_res = high_res.to(self.device, non_blocking=True) + + with autocast(device_type=self.device.type): + outputs = self.model(low_res) + loss = self.criterion(outputs, high_res) + + val_loss += loss.item() + + del low_res, high_res, outputs, loss + + self.model.train() + return val_loss + + def _save_checkpoint(self, epoch, is_best=False): + """Save model checkpoint""" + checkpoint_path = os.path.join(self.checkpoint_dir, f"epoch_{epoch}.pt") + best_model_path = os.path.join(self.log_dir, "best_model") + + # Save the model checkpoint + self.model.save(checkpoint_path) + + # If this is the best model so far, copy it to best_model + if is_best: + if os.path.exists(best_model_path): + shutil.rmtree(best_model_path) + self.model.save(best_model_path) + print(f"Saved new best model with loss: {self.best_loss:.6f}") + + def finetune(self, output_path, epochs=10, lr=1e-4, patience=3, min_delta=0.001): + """ + Finetune the upscaler model + + Args: + output_path (str): Directory to save models and logs + epochs (int): Maximum number of training epochs + lr (float): Learning rate + patience (int): Early stopping patience + min_delta (float): Minimum improvement for early stopping + """ + # Check if data is loaded + if self.data_loader is None: + raise ValueError("Data not loaded. Call load_data first.") + + # Setup optimizer + self.optimizer = optim.Adam(self.model.parameters(), lr=lr) + + # Set up logging + self._setup_logging(output_path) + + # Setup early stopping + early_stopping = EarlyStopping(patience=patience, min_delta=min_delta) + + # Training loop + self.model.train() + + for epoch in range(epochs): + # Training phase + epoch_loss = 0.0 + progress_bar = tqdm(self.data_loader, desc=f"Epoch {epoch + 1}/{epochs}") + + for low_res, high_res in progress_bar: + # Move data to GPU with channels_last format where possible + low_res = low_res.to(self.device, non_blocking=True).to(memory_format=torch.channels_last) + high_res = high_res.to(self.device, non_blocking=True) + + self.optimizer.zero_grad() + + with autocast(device_type=self.device.type): + if self.use_checkpointing: + # Ensure the input tensor requires gradient so that checkpointing records the computation graph + low_res.requires_grad_() + outputs = checkpoint(self.model, low_res) + else: + outputs = self.model(low_res) + loss = self.criterion(outputs, high_res) + + self.scaler.scale(loss).backward() + self.scaler.step(self.optimizer) + self.scaler.update() + + epoch_loss += loss.item() + progress_bar.set_postfix({'loss': loss.item()}) + + # Optionally delete variables to free memory + del low_res, high_res, outputs, loss + + # Calculate average epoch loss + avg_train_loss = epoch_loss / len(self.data_loader) + + # Validation phase (if validation loader exists) + if self.validation_loader: + val_loss = self._evaluate() / len(self.validation_loader) + is_improved = val_loss < self.best_loss + if is_improved: + self.best_loss = val_loss + + # Log results + print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {avg_train_loss:.6f}, Val Loss: {val_loss:.6f}") + with open(self.csv_path, mode='a', newline='') as file: + writer = csv.writer(file) + writer.writerow([epoch + 1, avg_train_loss, val_loss, "Yes" if is_improved else "No"]) + else: + # If no validation, use training loss for improvement tracking + is_improved = avg_train_loss < self.best_loss + if is_improved: + self.best_loss = avg_train_loss + + # Log results + print(f"Epoch {epoch + 1}/{epochs}, Train Loss: {avg_train_loss:.6f}") + with open(self.csv_path, mode='a', newline='') as file: + writer = csv.writer(file) + writer.writerow([epoch + 1, avg_train_loss, "Yes" if is_improved else "No"]) + + # Save checkpoint + self._save_checkpoint(epoch + 1, is_best=is_improved) + + # Perform garbage collection and clear GPU cache after each epoch + gc.collect() + torch.cuda.empty_cache() + + # Check early stopping + if early_stopping(val_loss if self.validation_loader else avg_train_loss): + print(f"Early stopping triggered at epoch {epoch + 1}") + break + + return self.best_loss + + def save(self, output_path=None): + """ + Save the best model to the specified path + + Args: + output_path (str, optional): Path to save the model. If None, uses the best model from training. + """ + if output_path is None and self.log_dir is not None: + best_model_path = os.path.join(self.log_dir, "best_model") + if os.path.exists(best_model_path): + print(f"Best model already saved at {best_model_path}") + return best_model_path + else: + output_path = os.path.join(self.log_dir, "final_model") + + if output_path is None: + raise ValueError("No output path specified and no training has been done yet.") + + self.model.save(output_path) + print(f"Model saved to {output_path}") + return output_path \ No newline at end of file diff --git a/src/aiunn/inference/__init__.py b/src/aiunn/inference/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiunn/inference.py b/src/aiunn/inference/inference.py similarity index 99% rename from src/aiunn/inference.py rename to src/aiunn/inference/inference.py index 991b708..6ed5b0d 100644 --- a/src/aiunn/inference.py +++ b/src/aiunn/inference/inference.py @@ -31,6 +31,8 @@ class Upscaler(nn.Module): def forward(self, x): features = self.base_model(x) return self.last_transform(features) + + class ImageUpscaler: def __init__(self, model_path: str, device: str = 'cuda' if torch.cuda.is_available() else 'cpu'): self.device = torch.device(device) diff --git a/src/aiunn/upsampler/__init__.py b/src/aiunn/upsampler/__init__.py new file mode 100644 index 0000000..e179503 --- /dev/null +++ b/src/aiunn/upsampler/__init__.py @@ -0,0 +1,5 @@ +from .aiunn import aiuNN +from .config import aiuNNConfig + + +__all__ = ["aiuNN", "aiuNNConfig"] \ No newline at end of file diff --git a/src/aiunn/upsampler.py b/src/aiunn/upsampler/aiunn.py similarity index 92% rename from src/aiunn/upsampler.py rename to src/aiunn/upsampler/aiunn.py index 657e6b3..ca7ba2b 100644 --- a/src/aiunn/upsampler.py +++ b/src/aiunn/upsampler/aiunn.py @@ -3,17 +3,17 @@ import torch import torch.nn as nn import warnings from aiia import AIIA, AIIAConfig, AIIABase -from config import UpsamplerConfig +from .config import aiuNNConfig import warnings -class Upsampler(AIIA): +class aiuNN(AIIA): def __init__(self, base_model: AIIABase): super().__init__(base_model.config) self.base_model = base_model # Pass the unified base configuration using the new parameter. - self.config = UpsamplerConfig(base_config=base_model.config) + self.config = aiuNNConfig(base_config=base_model.config) self.upsample = nn.Upsample( scale_factor=self.config.upsample_scale, @@ -72,11 +72,11 @@ if __name__ == "__main__": config = AIIAConfig() base_model = AIIABase(config) # Instantiate Upsampler from the base model (works correctly). - upsampler = Upsampler(base_model) + upsampler = aiuNN(base_model) # Save the model (both configuration and weights). upsampler.save("hehe") # Now load using the overridden load method; this will load the complete model. - upsampler_loaded = Upsampler.load("hehe", precision="bf16") + upsampler_loaded = aiuNN.load("hehe", precision="bf16") print("Updated configuration:", upsampler_loaded.config.__dict__) diff --git a/src/aiunn/config.py b/src/aiunn/upsampler/config.py similarity index 100% rename from src/aiunn/config.py rename to src/aiunn/upsampler/config.py From 6aad1a38f5bc0435bef752a627d6624d87583d72 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 25 Feb 2025 15:47:50 +0100 Subject: [PATCH 097/100] increased trainingset amount --- example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example.py b/example.py index 8492d51..b2c26c8 100644 --- a/example.py +++ b/example.py @@ -98,7 +98,7 @@ if __name__ =="__main__": "/root/training_data/vision-dataset/image_vec_upscaler.parquet" ], 'transform': transforms.Compose([transforms.ToTensor()]), - 'samples_per_file': 2500 + 'samples_per_file': 5000 } trainer.load_data(dataset_params=dataset_params, batch_size=1) From 2e1556d30651f5fdb1e96f9b69ecaa1b97497b55 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 25 Feb 2025 19:36:58 +0100 Subject: [PATCH 098/100] added aiunn inference --- src/aiunn/__init__.py | 1 + src/aiunn/inference/__init__.py | 3 + src/aiunn/inference/inference.py | 274 +++++++++++++++++++++++-------- 3 files changed, 206 insertions(+), 72 deletions(-) diff --git a/src/aiunn/__init__.py b/src/aiunn/__init__.py index 2e6f021..a097c72 100644 --- a/src/aiunn/__init__.py +++ b/src/aiunn/__init__.py @@ -1,5 +1,6 @@ from .finetune.trainer import aiuNNTrainer from .upsampler.aiunn import aiuNN from .upsampler.config import aiuNNConfig +from .inference.inference import aiuNNInference __version__ = "0.1.1" \ No newline at end of file diff --git a/src/aiunn/inference/__init__.py b/src/aiunn/inference/__init__.py index e69de29..798de24 100644 --- a/src/aiunn/inference/__init__.py +++ b/src/aiunn/inference/__init__.py @@ -0,0 +1,3 @@ +from .inference import aiuNNInference + +__all__ = ["aiuNNInference"] \ No newline at end of file diff --git a/src/aiunn/inference/inference.py b/src/aiunn/inference/inference.py index 6ed5b0d..d288931 100644 --- a/src/aiunn/inference/inference.py +++ b/src/aiunn/inference/inference.py @@ -1,96 +1,226 @@ +import os import torch -from albumentations import Compose, Normalize -from albumentations.pytorch import ToTensorV2 -from PIL import Image import numpy as np +from PIL import Image import io -from torch import nn -from aiia import AIIABase +from typing import Union, Optional, Tuple, List +from ..upsampler.aiunn import aiuNN -class Upscaler(nn.Module): +class aiuNNInference: """ - Transforms the base model's final feature map using a transposed convolution. - The base model produces a feature map of size 512x512. - This layer upsamples by a factor of 2 (yielding 1024x1024) and maps the hidden features - to the output channels using a single ConvTranspose2d layer. + Inference class for aiuNN upsampling model. + Handles model loading, image upscaling, and output processing. """ - def __init__(self, base_model: AIIABase): - super(Upscaler, self).__init__() - self.base_model = base_model - # Instead of adding separate upsampling and convolutional layers, we use a ConvTranspose2d layer. - self.last_transform = nn.ConvTranspose2d( - in_channels=base_model.config.hidden_size, - out_channels=base_model.config.num_channels, - kernel_size=base_model.config.kernel_size, - stride=2, - padding=1, - output_padding=1 - ) + def __init__(self, model_path: str, precision: Optional[str] = None, device: Optional[str] = None): + """ + Initialize the inference class by loading the aiuNN model. - def forward(self, x): - features = self.base_model(x) - return self.last_transform(features) + Args: + model_path: Path to the saved model directory + precision: Optional precision setting ('fp16', 'bf16', or None for default) + device: Optional device specification ('cuda', 'cpu', or None for auto-detection) + """ + + + # Set device + if device is None: + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' + else: + self.device = device + + # Load the model with specified precision + self.model = aiuNN.load(model_path, precision=precision) + self.model.to(self.device) + self.model.eval() + + # Store configuration for reference + self.config = self.model.config + + def preprocess_image(self, image: Union[str, Image.Image, np.ndarray, torch.Tensor]) -> torch.Tensor: + """ + Preprocess the input image to match model requirements. + + Args: + image: Input image as file path, PIL Image, numpy array, or torch tensor + + Returns: + Preprocessed tensor ready for model input + """ + # Handle different input types + if isinstance(image, str): + # Load from file path + image = Image.open(image).convert('RGB') + + if isinstance(image, Image.Image): + # Convert PIL Image to tensor + image = np.array(image) + image = image.transpose(2, 0, 1) # HWC to CHW + image = torch.from_numpy(image).float() + + if isinstance(image, np.ndarray): + # Convert numpy array to tensor + if image.shape[0] == 3: + # Already in CHW format + pass + elif image.shape[-1] == 3: + # HWC to CHW format + image = image.transpose(2, 0, 1) + image = torch.from_numpy(image).float() + + # Normalize to [0, 1] range if needed + if image.max() > 1.0: + image = image / 255.0 + + # Add batch dimension if not present + if len(image.shape) == 3: + image = image.unsqueeze(0) + + # Move to device + image = image.to(self.device) + + return image - -class ImageUpscaler: - def __init__(self, model_path: str, device: str = 'cuda' if torch.cuda.is_available() else 'cpu'): - self.device = torch.device(device) - self.model = self.load_model(model_path) - self.model.eval() # Set to evaluation mode + def postprocess_tensor(self, tensor: torch.Tensor) -> Image.Image: + """ + Convert output tensor to PIL Image. - # Define preprocessing transformations - self.preprocess = Compose([ - Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), - ToTensorV2() - ]) + Args: + tensor: Output tensor from model + + Returns: + Processed PIL Image + """ + # Move to CPU and convert to numpy + output = tensor.detach().cpu().squeeze(0).numpy() + + # Ensure proper range [0, 255] + output = np.clip(output * 255, 0, 255).astype(np.uint8) + + # Convert from CHW to HWC for PIL + output = output.transpose(1, 2, 0) + + # Create PIL Image + return Image.fromarray(output) - def load_model(self, model_path: str): + @torch.no_grad() + def upscale(self, image: Union[str, Image.Image, np.ndarray, torch.Tensor]) -> Image.Image: """ - Load the trained model from the specified path. + Upscale an image using the aiuNN model. + + Args: + image: Input image to upscale + + Returns: + Upscaled image as PIL Image """ - base_model = AIIABase.load(model_path) # Load base model - model = Upscaler(base_model) # Wrap with Upscaler - return model.to(self.device) + # Preprocess input + input_tensor = self.preprocess_image(image) + + # Run inference + output_tensor = self.model(input_tensor) + + # Postprocess output + upscaled_image = self.postprocess_tensor(output_tensor) + + return upscaled_image - def preprocess_image(self, image: Image.Image): + def save(self, image: Image.Image, output_path: str, format: Optional[str] = None) -> None: """ - Preprocess input image for inference. + Save the upscaled image to a file. + + Args: + image: PIL Image to save + output_path: Path where the image should be saved + format: Optional format override (e.g., 'PNG', 'JPEG') """ - if not isinstance(image, Image.Image): - raise ValueError("Input must be a PIL.Image.Image object") + # Create directory if it doesn't exist + os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True) - # Convert to numpy array and apply preprocessing - image_array = np.array(image) - augmented = self.preprocess(image=image_array) + # Save the image + image.save(output_path, format=format) - # Add batch dimension and move to device - return augmented['image'].unsqueeze(0).to(self.device) + def convert_to_binary(self, image: Image.Image, format: str = 'PNG') -> bytes: + """ + Convert the image to binary data. + + Args: + image: PIL Image to convert + format: Image format to use for binary conversion + + Returns: + Binary representation of the image + """ + # Use BytesIO to convert to binary + binary_output = io.BytesIO() + image.save(binary_output, format=format) + + # Get the binary data + binary_data = binary_output.getvalue() + + return binary_data - def postprocess_image(self, output_tensor: torch.Tensor): + def process_batch(self, + images: List[Union[str, Image.Image]], + output_dir: Optional[str] = None, + save_format: str = 'PNG', + return_binary: bool = False) -> Union[List[Image.Image], List[bytes], None]: """ - Convert output tensor back to an image. - """ - output_tensor = output_tensor.squeeze(0).cpu() # Remove batch dimension - output_array = (output_tensor * 0.5 + 0.5).clamp(0, 1).numpy() * 255 - output_array = output_array.transpose(1, 2, 0).astype(np.uint8) # CHW -> HWC - return Image.fromarray(output_array) - - def upscale_image(self, input_image_path: str): - """ - Perform upscaling on an input image. - """ - input_image = Image.open(input_image_path).convert('RGB') # Ensure RGB format - preprocessed_image = self.preprocess_image(input_image) + Process multiple images in batch. - with torch.no_grad(): - with torch.amp.autocast(device_type="cuda"): - output_tensor = self.model(preprocessed_image) + Args: + images: List of input images (paths or PIL Images) + output_dir: Optional directory to save results + save_format: Format to use when saving images + return_binary: Whether to return binary data instead of PIL Images + + Returns: + List of processed images or binary data, or None if only saving + """ + results = [] - return self.postprocess_image(output_tensor) + for i, img in enumerate(images): + # Upscale the image + upscaled = self.upscale(img) + + # Save if output directory is provided + if output_dir: + # Extract filename if input is a path + if isinstance(img, str): + filename = os.path.basename(img) + base, _ = os.path.splitext(filename) + else: + base = f"upscaled_{i}" + + output_path = os.path.join(output_dir, f"{base}.{save_format.lower()}") + self.save(upscaled, output_path, format=save_format) + + # Add to results based on return type + if return_binary: + results.append(self.convert_to_binary(upscaled, format=save_format)) + else: + results.append(upscaled) + + return results if (not output_dir or return_binary or not save_format) else None -# Example usage: -upscaler = ImageUpscaler(model_path="/root/vision/aiuNN/best_model") -upscaled_image = upscaler.upscale_image("/root/vision/aiuNN/input.jpg") -upscaled_image.save("upscaled_image.jpg") +# Example usage (can be removed) +if __name__ == "__main__": + # Initialize inference with a model path + inferencer = aiuNNInference("path/to/model", precision="bf16") + + # Upscale a single image + upscaled_image = inferencer.upscale("input_image.jpg") + + # Save the result + inferencer.save(upscaled_image, "output_image.png") + + # Convert to binary + binary_data = inferencer.convert_to_binary(upscaled_image) + + # Process a batch of images + inferencer.process_batch( + ["image1.jpg", "image2.jpg"], + output_dir="output_folder", + save_format="PNG" + ) \ No newline at end of file From 7fd2af6f1242c53af86ca8a61cbc5a1ea43382a3 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Tue, 25 Feb 2025 19:47:18 +0100 Subject: [PATCH 099/100] improved early stoppping --- example.py | 2 +- src/aiunn/finetune/trainer.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/example.py b/example.py index b2c26c8..d03e931 100644 --- a/example.py +++ b/example.py @@ -11,7 +11,7 @@ from torchvision import transforms class UpscaleDataset(Dataset): - def __init__(self, parquet_files: list, transform=None, samples_per_file=2500): + def __init__(self, parquet_files: list, transform=None, samples_per_file=5000): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load a subset from each parquet file diff --git a/src/aiunn/finetune/trainer.py b/src/aiunn/finetune/trainer.py index 01047b9..b94d57d 100644 --- a/src/aiunn/finetune/trainer.py +++ b/src/aiunn/finetune/trainer.py @@ -260,7 +260,8 @@ class aiuNNTrainer: torch.cuda.empty_cache() # Check early stopping - if early_stopping(val_loss if self.validation_loader else avg_train_loss): + early_stopping(val_loss if self.validation_loader else avg_train_loss) + if early_stopping.early_stop: print(f"Early stopping triggered at epoch {epoch + 1}") break From ed80b0b06852e3450fe3d44c2afe57a9cabd66c5 Mon Sep 17 00:00:00 2001 From: Falko Habel Date: Wed, 26 Feb 2025 12:33:25 +0100 Subject: [PATCH 100/100] improved quality --- example.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/example.py b/example.py index d03e931..58470b0 100644 --- a/example.py +++ b/example.py @@ -11,7 +11,7 @@ from torchvision import transforms class UpscaleDataset(Dataset): - def __init__(self, parquet_files: list, transform=None, samples_per_file=5000): + def __init__(self, parquet_files: list, transform=None, samples_per_file=10_000): combined_df = pd.DataFrame() for parquet_file in parquet_files: # Load a subset from each parquet file @@ -68,8 +68,8 @@ class UpscaleDataset(Dataset): high_res = high_res_rgb # Resize the images to reduce VRAM usage - low_res = low_res.resize((384, 384), Image.LANCZOS) - high_res = high_res.resize((768, 768), Image.LANCZOS) + low_res = low_res.resize((410, 410), Image.LANCZOS) + high_res = high_res.resize((820, 820), Image.LANCZOS) # If a transform is provided (e.g. conversion to Tensor), apply it if self.transform: