Module neurop.base

Classes

class NeuralOperator (readin: torch.nn.modules.module.Module,
kernel_integral: torch.nn.modules.module.Module,
readout: torch.nn.modules.module.Module,
optimizer: torch.optim.optimizer.Optimizer | None,
activation_function: Callable[[torch.Tensor], torch.Tensor] = <built-in method relu of type object>)
Expand source code
@dataclass
class NeuralOperator(torch.nn.Module, ABC):
    """
    Abstract class for Neural Operators.

    Instance Attributes: 

        readin: (torch.nn.Module) 
            Reads in input data and projects to higher dimensional space 

        kernel_integral: (torch.nn.Module) 
            Performs the kernel operator on the data

        readout: (torch.nn.Module)
            Reads out data to lower dimensional space

        optimizer: (torch.optim.Optimizer) 
            Optimization algorithm to choose. Defaults to Adam(lr=1e-3)
        
        parameters: (torch.nn.Parameter)
            Neural operator parameters
        
        activation_function: (Callable[[Tensor], Tensor])
            Activation to introduce nonlinearity between kernel operations
    """

    readin: torch.nn.Module
    kernel_integral: torch.nn.Module
    readout: torch.nn.Module
    optimizer: Optional[torch.optim.Optimizer]
    activation_function: Callable[[Tensor], Tensor] = torch.relu

    def __post_init__(self,):
        super().__init__()

        if not self.optimizer:
            self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
    
    @abstractmethod
    def forward(self, x: Tensor) -> Tensor:
        """
        Forward pass to be implemented by subclasses.
        """
        pass

    @abstractmethod
    def loss(self, prediction: Tensor, target: Tensor) -> Tensor:
        """
        Loss function specific to the problem/operator.
        """
        pass

    @abstractmethod
    def train_step(self, x: Tensor, y: Tensor) -> Tensor:
        """
        One training step: forward + loss + backward + optimizer step.
        """
        pass

    @abstractmethod
    def evaluate(self, x: Tensor, y: Tensor) -> float:
        """
        Evaluate model performance on validation/test data.
        """
        pass

    @abstractmethod
    def save(self, path: Path): 
        """
        Write model parameters to a file  
        """
        pass

    @abstractmethod
    def load(self, path: Path):
        """
        Load model parameters from a file 
        """
        pass

    def to_device(self, device: torch.device): 
        """
        Send data to a Torch device 
        """
        self.readin.to(device)
        self.kernel_integral.to(device)
        self.readout.to(device)

    @abstractmethod
    def calculate_metrics(self, ground_truth: Tensor, predicted: Tensor): 
        """
        Compute the desired metrics and output a TypedDict 
        """
        pass

Abstract class for Neural Operators.

Instance Attributes:

readin: (torch.nn.Module) 
    Reads in input data and projects to higher dimensional space

kernel_integral: (torch.nn.Module) 
    Performs the kernel operator on the data

readout: (torch.nn.Module)
    Reads out data to lower dimensional space

optimizer: (torch.optim.Optimizer) 
    Optimization algorithm to choose. Defaults to Adam(lr=1e-3)

parameters: (torch.nn.Parameter)
    Neural operator parameters

activation_function: (Callable[[Tensor], Tensor])
    Activation to introduce nonlinearity between kernel operations

Ancestors

  • torch.nn.modules.module.Module
  • abc.ABC

Subclasses

Instance variables

var kernel_integral : torch.nn.modules.module.Module
var optimizer : torch.optim.optimizer.Optimizer | None
var readin : torch.nn.modules.module.Module
var readout : torch.nn.modules.module.Module

Methods

def activation_function(...) ‑> Callable[[torch.Tensor], torch.Tensor]
def calculate_metrics(self, ground_truth: torch.Tensor, predicted: torch.Tensor)
Expand source code
@abstractmethod
def calculate_metrics(self, ground_truth: Tensor, predicted: Tensor): 
    """
    Compute the desired metrics and output a TypedDict 
    """
    pass

Compute the desired metrics and output a TypedDict

def evaluate(self, x: torch.Tensor, y: torch.Tensor) ‑> float
Expand source code
@abstractmethod
def evaluate(self, x: Tensor, y: Tensor) -> float:
    """
    Evaluate model performance on validation/test data.
    """
    pass

Evaluate model performance on validation/test data.

def forward(self, x: torch.Tensor) ‑> torch.Tensor
Expand source code
@abstractmethod
def forward(self, x: Tensor) -> Tensor:
    """
    Forward pass to be implemented by subclasses.
    """
    pass

Forward pass to be implemented by subclasses.

def load(self, path: pathlib.Path)
Expand source code
@abstractmethod
def load(self, path: Path):
    """
    Load model parameters from a file 
    """
    pass

Load model parameters from a file

def loss(self, prediction: torch.Tensor, target: torch.Tensor) ‑> torch.Tensor
Expand source code
@abstractmethod
def loss(self, prediction: Tensor, target: Tensor) -> Tensor:
    """
    Loss function specific to the problem/operator.
    """
    pass

Loss function specific to the problem/operator.

def save(self, path: pathlib.Path)
Expand source code
@abstractmethod
def save(self, path: Path): 
    """
    Write model parameters to a file  
    """
    pass

Write model parameters to a file

def to_device(self, device: torch.device)
Expand source code
def to_device(self, device: torch.device): 
    """
    Send data to a Torch device 
    """
    self.readin.to(device)
    self.kernel_integral.to(device)
    self.readout.to(device)

Send data to a Torch device

def train_step(self, x: torch.Tensor, y: torch.Tensor) ‑> torch.Tensor
Expand source code
@abstractmethod
def train_step(self, x: Tensor, y: Tensor) -> Tensor:
    """
    One training step: forward + loss + backward + optimizer step.
    """
    pass

One training step: forward + loss + backward + optimizer step.