updated readme to feature tf support
Gitea Actions For AIIA / Explore-Gitea-Actions (push) Successful in 39s
Details
Gitea Actions For AIIA / Explore-Gitea-Actions (push) Successful in 39s
Details
This commit is contained in:
parent
38530d5d44
commit
f3e59a6586
23
README.md
23
README.md
|
@ -26,15 +26,22 @@ pip install git+https://gitea.fabelous.app/Machine-Learning/aiuNN.git
|
||||||
Here's a basic example of how to use `aiuNN` for image upscaling:
|
Here's a basic example of how to use `aiuNN` for image upscaling:
|
||||||
|
|
||||||
```python src/main.py
|
```python src/main.py
|
||||||
from aiia import AIIABase
|
from aiia import AIIABase, AIIAConfig
|
||||||
from aiunn import aiuNN, aiuNNTrainer
|
from aiunn import aiuNN, aiuNNTrainer
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from torchvision import transforms
|
from torchvision import transforms
|
||||||
|
|
||||||
|
# Create a configuration and build a base model.
|
||||||
|
config = AIIAConfig()
|
||||||
|
ai_config = aiuNNConfig()
|
||||||
|
|
||||||
|
base_model = AIIABase(config)
|
||||||
|
upscaler = aiuNN(config=ai_config)
|
||||||
|
|
||||||
# Load your base model and upscaler
|
# Load your base model and upscaler
|
||||||
pretrained_model_path = "path/to/aiia/model"
|
pretrained_model_path = "path/to/aiia/model"
|
||||||
base_model = AIIABase.load(pretrained_model_path, precision="bf16")
|
base_model = AIIABase.from_pretrained(pretrained_model_path)
|
||||||
upscaler = aiuNN(base_model)
|
upscaler.load_base_model(base_model)
|
||||||
|
|
||||||
# Create trainer with your dataset class
|
# Create trainer with your dataset class
|
||||||
trainer = aiuNNTrainer(upscaler, dataset_class=UpscaleDataset)
|
trainer = aiuNNTrainer(upscaler, dataset_class=UpscaleDataset)
|
||||||
|
@ -105,19 +112,19 @@ class UpscaleDataset(Dataset):
|
||||||
# Open image bytes with Pillow and convert to RGBA first
|
# Open image bytes with Pillow and convert to RGBA first
|
||||||
low_res_rgba = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA')
|
low_res_rgba = Image.open(io.BytesIO(low_res_bytes)).convert('RGBA')
|
||||||
high_res_rgba = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA')
|
high_res_rgba = Image.open(io.BytesIO(high_res_bytes)).convert('RGBA')
|
||||||
|
|
||||||
# Create a new RGB image with black background
|
# Create a new RGB image with black background
|
||||||
low_res_rgb = Image.new("RGB", low_res_rgba.size, (0, 0, 0))
|
low_res_rgb = Image.new("RGB", low_res_rgba.size, (0, 0, 0))
|
||||||
high_res_rgb = Image.new("RGB", high_res_rgba.size, (0, 0, 0))
|
high_res_rgb = Image.new("RGB", high_res_rgba.size, (0, 0, 0))
|
||||||
|
|
||||||
# Composite the original image over the black background
|
# Composite the original image over the black background
|
||||||
low_res_rgb.paste(low_res_rgba, mask=low_res_rgba.split()[3])
|
low_res_rgb.paste(low_res_rgba, mask=low_res_rgba.split()[3])
|
||||||
high_res_rgb.paste(high_res_rgba, mask=high_res_rgba.split()[3])
|
high_res_rgb.paste(high_res_rgba, mask=high_res_rgba.split()[3])
|
||||||
|
|
||||||
# Now we have true 3-channel RGB images with transparent areas converted to black
|
# Now we have true 3-channel RGB images with transparent areas converted to black
|
||||||
low_res = low_res_rgb
|
low_res = low_res_rgb
|
||||||
high_res = high_res_rgb
|
high_res = high_res_rgb
|
||||||
|
|
||||||
# If a transform is provided (e.g. conversion to Tensor), apply it
|
# If a transform is provided (e.g. conversion to Tensor), apply it
|
||||||
if self.transform:
|
if self.transform:
|
||||||
low_res = self.transform(low_res)
|
low_res = self.transform(low_res)
|
||||||
|
@ -127,4 +134,4 @@ class UpscaleDataset(Dataset):
|
||||||
print(f"\nError at index {idx}: {str(e)}")
|
print(f"\nError at index {idx}: {str(e)}")
|
||||||
self.failed_indices.add(idx)
|
self.failed_indices.add(idx)
|
||||||
return self[(idx + 1) % len(self)]
|
return self[(idx + 1) % len(self)]
|
||||||
```
|
```
|
Loading…
Reference in New Issue