Compare commits

..

No commits in common. "7f55ede5871c626021f47589a8ea6e335dac5fc7" and "2eee8732479ac4fcf8e42b00bfa34c45cd97f298" have entirely different histories.

4 changed files with 8 additions and 5 deletions

View File

@ -10,7 +10,7 @@ include = '\.pyi?$'
[project]
name = "aiia"
version = "0.1.6"
version = "0.1.5"
description = "AIIA Deep Learning Model Implementation"
readme = "README.md"
authors = [

View File

@ -1,6 +1,6 @@
[metadata]
name = aiia
version = 0.1.6
version = 0.1.5
author = Falko Habel
author_email = falko.habel@gmx.de
description = AIIA deep learning model implementation

View File

@ -4,4 +4,4 @@ from .data.DataLoader import DataLoader
from .pretrain.pretrainer import Pretrainer, ProjectionHead
__version__ = "0.1.6"
__version__ = "0.1.5"

View File

@ -23,9 +23,9 @@ class AIIA(nn.Module):
self.config.save(path)
@classmethod
def load(cls, path, precision: str = None, **kwargs):
def load(cls, path, precision: str = None):
config = AIIAConfig.load(path)
model = cls(config, **kwargs) # Pass kwargs here!
model = cls(config)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dtype = None
@ -41,7 +41,10 @@ class AIIA(nn.Module):
else:
raise ValueError("Unsupported precision. Use 'fp16', 'bf16', or leave as None.")
# Load the state dictionary normally (without dtype argument)
model_dict = torch.load(f"{path}/model.pth", map_location=device)
# If a precision conversion is requested, cast each tensor in the state dict to the target dtype.
if dtype is not None:
for key, param in model_dict.items():
if torch.is_tensor(param):