dropped batch processing and dropped fp16 loading
Gitea Actions For AIIA / Explore-Gitea-Actions (push) Has been cancelled Details

This commit is contained in:
Falko Victor Habel 2025-04-19 22:53:13 +02:00
parent ced7e8a214
commit ac3fabd55f
1 changed files with 5 additions and 54 deletions

View File

@ -12,13 +12,13 @@ class aiuNNInference:
Inference class for aiuNN upsampling model.
Handles model loading, image upscaling, and output processing.
"""
def __init__(self, model_path: str, precision: Optional[str] = None, device: Optional[str] = None):
def __init__(self, model_path: str, device: Optional[str] = None):
"""
Initialize the inference class by loading the aiuNN model.
Args:
model_path: Path to the saved model directory
precision: Optional precision setting ('fp16', 'bf16', or None for default)
device: Optional device specification ('cuda', 'cpu', or None for auto-detection)
"""
@ -30,7 +30,7 @@ class aiuNNInference:
self.device = device
# Load the model with specified precision
self.model = aiuNN.load(model_path, precision=precision)
self.model = aiuNN.from_pretrained(model_path)
self.model.to(self.device)
self.model.eval()
@ -160,54 +160,11 @@ class aiuNNInference:
return binary_data
def process_batch(self,
images: List[Union[str, Image.Image]],
output_dir: Optional[str] = None,
save_format: str = 'PNG',
return_binary: bool = False) -> Union[List[Image.Image], List[bytes], None]:
"""
Process multiple images in batch.
Args:
images: List of input images (paths or PIL Images)
output_dir: Optional directory to save results
save_format: Format to use when saving images
return_binary: Whether to return binary data instead of PIL Images
Returns:
List of processed images or binary data, or None if only saving
"""
results = []
for i, img in enumerate(images):
# Upscale the image
upscaled = self.upscale(img)
# Save if output directory is provided
if output_dir:
# Extract filename if input is a path
if isinstance(img, str):
filename = os.path.basename(img)
base, _ = os.path.splitext(filename)
else:
base = f"upscaled_{i}"
output_path = os.path.join(output_dir, f"{base}.{save_format.lower()}")
self.save(upscaled, output_path, format=save_format)
# Add to results based on return type
if return_binary:
results.append(self.convert_to_binary(upscaled, format=save_format))
else:
results.append(upscaled)
return results if (not output_dir or return_binary or not save_format) else None
# Example usage (can be removed)
if __name__ == "__main__":
# Initialize inference with a model path
inferencer = aiuNNInference("path/to/model", precision="bf16")
inferencer = aiuNNInference("path/to/model")
# Upscale a single image
upscaled_image = inferencer.upscale("input_image.jpg")
@ -218,9 +175,3 @@ if __name__ == "__main__":
# Convert to binary
binary_data = inferencer.convert_to_binary(upscaled_image)
# Process a batch of images
inferencer.process_batch(
["image1.jpg", "image2.jpg"],
output_dir="output_folder",
save_format="PNG"
)