develop #27

Merged
Fabel merged 2 commits from develop into main 2025-03-11 21:26:42 +00:00
4 changed files with 5 additions and 8 deletions

View File

@ -10,7 +10,7 @@ include = '\.pyi?$'
[project]
name = "aiia"
version = "0.1.5"
version = "0.1.6"
description = "AIIA Deep Learning Model Implementation"
readme = "README.md"
authors = [

View File

@ -1,6 +1,6 @@
[metadata]
name = aiia
version = 0.1.5
version = 0.1.6
author = Falko Habel
author_email = falko.habel@gmx.de
description = AIIA deep learning model implementation

View File

@ -4,4 +4,4 @@ from .data.DataLoader import DataLoader
from .pretrain.pretrainer import Pretrainer, ProjectionHead
__version__ = "0.1.5"
__version__ = "0.1.6"

View File

@ -23,9 +23,9 @@ class AIIA(nn.Module):
self.config.save(path)
@classmethod
def load(cls, path, precision: str = None):
def load(cls, path, precision: str = None, **kwargs):
config = AIIAConfig.load(path)
model = cls(config)
model = cls(config, **kwargs) # Pass kwargs here!
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dtype = None
@ -41,10 +41,7 @@ class AIIA(nn.Module):
else:
raise ValueError("Unsupported precision. Use 'fp16', 'bf16', or leave as None.")
# Load the state dictionary normally (without dtype argument)
model_dict = torch.load(f"{path}/model.pth", map_location=device)
# If a precision conversion is requested, cast each tensor in the state dict to the target dtype.
if dtype is not None:
for key, param in model_dict.items():
if torch.is_tensor(param):