diff --git a/docs/getting_started.rst b/docs/getting_started.rst index 982f621b..1a706031 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -177,7 +177,7 @@ Here is an example of a training routine definition: interval = "epoch" # Interval for learning rate updates (per epoch) [training.loss_parameter] - loss_property = ['per_system_energy', 'per_atom_force'] # Properties to include in the loss function + loss_components = ['per_system_energy', 'per_atom_force'] # Properties to include in the loss function [training.loss_parameter.weight] per_system_energy = 0.999 # Weight for per molecule energy in the loss calculation diff --git a/docs/training.rst b/docs/training.rst index 43ab6451..dbb756d6 100644 --- a/docs/training.rst +++ b/docs/training.rst @@ -37,9 +37,9 @@ Loss function ^^^^^^^^^^^^^^^^^^^^^^^^ The loss function quantifies the discrepancy between the model's predictions and the target properties, providing a scalar value that guides the optimizer in updating the model's parameters. This function is configured in the `[training.loss]` section of the training TOML file. -Depending on the specified `loss_property`` section, the loss function can combine various individual loss functions. *Modelforge* always includes the mean squared error (MSE) for energy prediction, and may also incorporate MSE for force prediction, dipole moment prediction, and partial charge prediction. +Depending on the specified `loss_components`` section, the loss function can combine various individual loss functions. *Modelforge* always includes the mean squared error (MSE) for energy prediction, and may also incorporate MSE for force prediction, dipole moment prediction, and partial charge prediction. -The design of the loss function is intrinsically linked to the structure of the energy function. For instance, if the energy function aggregates atomic energies, then loss_property should include `per_system_energy` and optionally, `per_atom_force`. +The design of the loss function is intrinsically linked to the structure of the energy function. For instance, if the energy function aggregates atomic energies, then loss_components should include `per_system_energy` and optionally, `per_atom_force`. Predicting Short-Range Atomic Energies diff --git a/modelforge/dataset/dataset.py b/modelforge/dataset/dataset.py index 301e2845..23aa267b 100644 --- a/modelforge/dataset/dataset.py +++ b/modelforge/dataset/dataset.py @@ -970,6 +970,10 @@ def __init__( ) self.lock_file = f"{self.cache_processed_dataset_filename}.lockfile" + def transfer_batch_to_device(self, batch, device, dataloader_idx): + # move all tensors to the device + return batch.to_device(device) + @lock_with_attribute("lock_file") def prepare_data( self, diff --git a/modelforge/potential/ani.py b/modelforge/potential/ani.py index 2d1d2c9a..141da7ef 100644 --- a/modelforge/potential/ani.py +++ b/modelforge/potential/ani.py @@ -7,10 +7,7 @@ using a neural network model. """ -from __future__ import annotations - -from typing import TYPE_CHECKING, Dict, Tuple - +from typing import Dict, Tuple, List import torch from loguru import logger as log from torch import nn diff --git a/modelforge/potential/painn.py b/modelforge/potential/painn.py index c2e846fa..23022275 100644 --- a/modelforge/potential/painn.py +++ b/modelforge/potential/painn.py @@ -298,10 +298,8 @@ def forward( # featurize pairwise distances using radial basis functions (RBF) f_ij = self.radial_symmetry_function_module(d_ij) - f_ij_cut = self.cutoff_module(d_ij) # Apply the filter network and cutoff function - filters = torch.mul(self.filter_net(f_ij), f_ij_cut) - + filters = torch.mul(self.filter_net(f_ij), self.cutoff_module(d_ij)) # depending on whether we share filters or not filters have different # shape at dim=1 (dim=0 is always the number of atom pairs) if we share # filters, we copy the filters and use the same filters for all blocks diff --git a/modelforge/potential/physnet.py b/modelforge/potential/physnet.py index e3fd3cc3..12ee27e0 100644 --- a/modelforge/potential/physnet.py +++ b/modelforge/potential/physnet.py @@ -220,14 +220,20 @@ def forward(self, data: Dict[str, torch.Tensor]) -> torch.Tensor: # first term in equation 6 in the PhysNet paper embedding_atom_i = self.activation_function( self.interaction_i(data["atomic_embedding"]) - ) + ) # shape (nr_of_atoms_in_batch, atomic_embedding_dim) # second term in equation 6 in the PhysNet paper # apply attention mask G to radial basis functions f_ij - g = self.attention_mask(data["f_ij"]) + g = self.attention_mask( + data["f_ij"] + ) # shape (nr_of_atom_pairs_in_batch, atomic_embedding_dim) # calculate the updated embedding for atom j + # NOTE: this changes the 2nd dimension from number_of_radial_basis_functions to atomic_embedding_dim embedding_atom_j = self.activation_function( - self.interaction_j(data["atomic_embedding"][idx_j]) + self.interaction_j(data["atomic_embedding"])[ + idx_j + ] # NOTE this is the same as the embedding_atom_i, but then we are selecting the embedding of atom j + # shape (nr_of_atom_pairs_in_batch, atomic_embedding_dim) ) updated_embedding_atom_j = torch.mul( g, embedding_atom_j diff --git a/modelforge/potential/potential.py b/modelforge/potential/potential.py index aeff3505..670aaa0a 100644 --- a/modelforge/potential/potential.py +++ b/modelforge/potential/potential.py @@ -233,7 +233,6 @@ def __init__( super().__init__() - self.eval() self.core_network = torch.jit.script(core_network) if jit else core_network self.neighborlist = ( torch.jit.script(neighborlist) if jit_neighborlist else neighborlist @@ -430,7 +429,6 @@ def load_state_dict( strict=strict, assign=assign, ) - self.eval() # Set the model to evaluation mode def setup_potential( @@ -507,7 +505,6 @@ def setup_potential( jit=jit, jit_neighborlist=False if use_training_mode_neighborlist else True, ) - model.eval() return model @@ -611,6 +608,12 @@ def generate_potential( neighborlist_strategy=inference_neighborlist_strategy, verlet_neighborlist_skin=verlet_neighborlist_skin, ) + # Disable gradients for model parameters + for param in potential.parameters(): + param.requires_grad = False + # Set model to eval + potential.eval() + if simulation_environment == "JAX": # register nnp_input as pytree from modelforge.utils.io import import_ diff --git a/modelforge/potential/representation.py b/modelforge/potential/representation.py index 07e00d2e..63aaf7a9 100644 --- a/modelforge/potential/representation.py +++ b/modelforge/potential/representation.py @@ -151,37 +151,72 @@ def forward(self, r_ij: torch.Tensor) -> torch.Tensor: return sub_aev def compute_angular_sub_aev(self, vectors12: torch.Tensor) -> torch.Tensor: - """Compute the angular subAEV terms of the center atom given neighbor pairs. + """ + Compute the angular subAEV terms of the center atom given neighbor + pairs. This correspond to equation (4) in the ANI paper. This function just compute the terms. The sum in the equation is not computed. - The input tensor have shape (conformations, atoms, N), where N - is the number of neighbor atom pairs within the cutoff radius and - output tensor should have shape - (conformations, atoms, ``self.angular_sublength()``) + + Parameters + ---------- + vectors12: torch.Tensor + Pairwise distance vectors. Shape: [2, n_pairs, 3] + Returns + ------- + torch.Tensor + Angular subAEV terms. Shape: [n_pairs, ShfZ_size * ShfA_size] """ - vectors12 = vectors12.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) - distances12 = vectors12.norm(2, dim=-5) + # vectors12 has shape: (2, n_pairs, 3) + distances12 = vectors12.norm(p=2, dim=-1) # Shape: (2, n_pairs) + distances_sum = distances12.sum(dim=0) / 2 # Shape: (n_pairs,) + fcj12 = self.cosine_cutoff(distances12) # Shape: (2, n_pairs) + fcj12_prod = fcj12.prod(dim=0) # Shape: (n_pairs,) + + # cos_angles: (n_pairs,) - # 0.95 is multiplied to the cos values to prevent acos from - # returning NaN. cos_angles = 0.95 * torch.nn.functional.cosine_similarity( - vectors12[0], vectors12[1], dim=-5 + vectors12[0], vectors12[1], dim=-1 ) - angles = torch.acos(cos_angles) - fcj12 = self.cosine_cutoff(distances12) - factor1 = ((1 + torch.cos(angles - self.ShfZ)) / 2) ** self.Zeta + + angles = torch.acos(cos_angles) # Shape: (n_pairs,) + + # Prepare shifts for broadcasting + angles = angles.unsqueeze(-1) # Shape: (n_pairs, 1) + distances_sum = distances_sum.unsqueeze(-1) # Shape: (n_pairs, 1) + + # Compute factor1 + delta_angles = angles - self.ShfZ.view(1, -1) # Shape: (n_pairs, ShfZ_size) + factor1 = ( + (1 + torch.cos(delta_angles)) / 2 + ) ** self.Zeta # Shape: (n_pairs, ShfZ_size) + + # Compute factor2 + delta_distances = distances_sum - self.ShfA.view( + 1, -1 + ) # Shape: (n_pairs, ShfA_size) factor2 = torch.exp( - -self.EtaA * (distances12.sum(0) / 2 - self.ShfA) ** 2 - ).unsqueeze(-1) - factor2 = factor2.squeeze(4).squeeze(3) - ret = 2 * factor1 * factor2 * fcj12.prod(0) - # At this point, ret now have shape - # (conformations, atoms, N, ?, ?, ?, ?) where ? depend on constants. - # We then should flat the last 4 dimensions to view the subAEV as one - # dimension vector - return ret.flatten(start_dim=-4) + -self.EtaA * delta_distances**2 + ) # Shape: (n_pairs, ShfA_size) + + # Compute the outer product of factor1 and factor2 efficiently + # fcj12_prod: (n_pairs, 1, 1) + fcj12_prod = fcj12_prod.unsqueeze(-1).unsqueeze(-1) # Shape: (n_pairs, 1, 1) + + # factor1: (n_pairs, ShfZ_size, 1) + factor1 = factor1.unsqueeze(-1) + # factor2: (n_pairs, 1, ShfA_size) + factor2 = factor2.unsqueeze(-2) + + # Compute ret: (n_pairs, ShfZ_size, ShfA_size) + ret = 2 * fcj12_prod * factor1 * factor2 + + # Flatten the last two dimensions to get the final subAEV + # ret: (n_pairs, ShfZ_size * ShfA_size) + ret = ret.reshape(distances12.size(dim=1), -1) + + return ret import math diff --git a/modelforge/potential/schnet.py b/modelforge/potential/schnet.py index b532e228..fd22d3ae 100644 --- a/modelforge/potential/schnet.py +++ b/modelforge/potential/schnet.py @@ -143,16 +143,17 @@ def compute_properties( # Compute the atomic representation representation = self.schnet_representation_module(data, pairlist_output) atomic_embedding = representation["atomic_embedding"] + f_ij = representation["f_ij"] + f_cutoff = representation["f_cutoff"] # Apply interaction modules to update the atomic embedding for interaction in self.interaction_modules: - v = interaction( + atomic_embedding = atomic_embedding + interaction( atomic_embedding, pairlist_output, - representation["f_ij"], - representation["f_cutoff"], + f_ij, + f_cutoff, ) - atomic_embedding = atomic_embedding + v # Update atomic features return { "per_atom_scalar_representation": atomic_embedding, @@ -293,14 +294,13 @@ def forward( # Generate interaction filters based on radial basis functions W_ij = self.filter_network(f_ij.squeeze(1)) - W_ij = W_ij * f_ij_cutoff + W_ij = W_ij * f_ij_cutoff # Shape: [n_pairs, number_of_filters] # Perform continuous-filter convolution x_j = atomic_embedding[idx_j] x_ij = x_j * W_ij # Element-wise multiplication - out = torch.zeros_like(atomic_embedding) - out.scatter_add_( + out = torch.zeros_like(atomic_embedding).scatter_add_( 0, idx_i.unsqueeze(-1).expand_as(x_ij), x_ij ) # Aggregate per-atom pair to per-atom diff --git a/modelforge/tests/data/config.toml b/modelforge/tests/data/config.toml index ec0a2149..69d81acc 100644 --- a/modelforge/tests/data/config.toml +++ b/modelforge/tests/data/config.toml @@ -60,7 +60,7 @@ threshold_mode = "abs" interval = "epoch" [training.loss_parameter] -loss_property = ['per_system_energy', 'per_atom_force'] # use +loss_components = ['per_system_energy', 'per_atom_force'] # use [training.loss_parameter.weight] per_system_energy = 0.999 #NOTE: reciprocal units diff --git a/modelforge/tests/data/training_defaults/default.toml b/modelforge/tests/data/training_defaults/default.toml index 62f930dd..6a9a886a 100644 --- a/modelforge/tests/data/training_defaults/default.toml +++ b/modelforge/tests/data/training_defaults/default.toml @@ -32,7 +32,7 @@ threshold_mode = "abs" interval = "epoch" # ------------------------------------------------------------ # [training.loss_parameter] -loss_property = ['per_system_energy'] #, 'per_atom_force'] # use +loss_components = ['per_system_energy'] #, 'per_atom_force'] # use # ------------------------------------------------------------ # [training.loss_parameter.weight] per_system_energy = 1.0 #NOTE: reciprocal units diff --git a/modelforge/tests/test_parameter_models.py b/modelforge/tests/test_parameter_models.py index b4bc5277..f0d681ac 100644 --- a/modelforge/tests/test_parameter_models.py +++ b/modelforge/tests/test_parameter_models.py @@ -142,9 +142,9 @@ def test_training_parameter_model(): with pytest.raises(ValidationError): training_parameters.splitting_strategy.dataset_split = [0.7, 0.1, 0.1, 0.1] - # this will throw an error because the datafile has 1 entries for the loss_property dictionary + # this will throw an error because the datafile has 1 entries for the loss_components dictionary with pytest.raises(ValidationError): - training_parameters.loss_parameter.loss_property = [ + training_parameters.loss_parameter.loss_components = [ "per_system_energy", "per_atom_force", ] diff --git a/modelforge/tests/test_training.py b/modelforge/tests/test_training.py index bcc02b0d..0602de10 100644 --- a/modelforge/tests/test_training.py +++ b/modelforge/tests/test_training.py @@ -87,7 +87,7 @@ def get_trainer(config): def add_force_to_loss_parameter(config): """ [training.loss_parameter] - loss_property = ['per_system_energy', 'per_atom_force'] + loss_components = ['per_system_energy', 'per_atom_force'] # ------------------------------------------------------------ # [training.loss_parameter.weight] per_system_energy = 0.999 #NOTE: reciprocal units @@ -95,14 +95,14 @@ def add_force_to_loss_parameter(config): """ t_config = config["training"] - t_config.loss_parameter.loss_property.append("per_atom_force") + t_config.loss_parameter.loss_components.append("per_atom_force") t_config.loss_parameter.weight["per_atom_force"] = 0.001 def add_dipole_moment_to_loss_parameter(config): """ [training.loss_parameter] - loss_property = [ + loss_components = [ "per_system_energy", "per_atom_force", "per_system_dipole_moment", @@ -116,8 +116,8 @@ def add_dipole_moment_to_loss_parameter(config): """ t_config = config["training"] - t_config.loss_parameter.loss_property.append("per_system_dipole_moment") - t_config.loss_parameter.loss_property.append("per_system_total_charge") + t_config.loss_parameter.loss_components.append("per_system_dipole_moment") + t_config.loss_parameter.loss_components.append("per_system_total_charge") t_config.loss_parameter.weight["per_system_dipole_moment"] = 0.01 t_config.loss_parameter.weight["per_system_total_charge"] = 0.01 @@ -130,8 +130,8 @@ def add_dipole_moment_to_loss_parameter(config): def replace_per_system_with_per_atom_loss(config): t_config = config["training"] - t_config.loss_parameter.loss_property.remove("per_system_energy") - t_config.loss_parameter.loss_property.append("per_atom_energy") + t_config.loss_parameter.loss_components.remove("per_system_energy") + t_config.loss_parameter.loss_components.append("per_atom_energy") t_config.loss_parameter.weight.pop("per_system_energy") t_config.loss_parameter.weight["per_atom_energy"] = 0.999 diff --git a/modelforge/train/losses.py b/modelforge/train/losses.py index 78b1f560..dfceb75d 100644 --- a/modelforge/train/losses.py +++ b/modelforge/train/losses.py @@ -297,13 +297,13 @@ class Loss(nn.Module): "per_system_dipole_moment", ] - def __init__(self, loss_property: List[str], weights: Dict[str, float]): + def __init__(self, loss_components: List[str], weights: Dict[str, float]): """ Calculates the combined loss for energy and force predictions. Parameters ---------- - loss_property : List[str] + loss_components : List[str] List of properties to include in the loss calculation. weights : Dict[str, float] Dictionary containing the weights for each property in the loss calculation. @@ -316,11 +316,11 @@ def __init__(self, loss_property: List[str], weights: Dict[str, float]): super().__init__() from torch.nn import ModuleDict - self.loss_property = loss_property + self.loss_components = loss_components self.weights = weights self.loss_functions = ModuleDict() - for prop in loss_property: + for prop in loss_components: if prop not in self._SUPPORTED_PROPERTIES: raise NotImplementedError(f"Loss type {prop} not implemented.") log.info(f"Using loss function for {prop}") @@ -330,7 +330,7 @@ def __init__(self, loss_property: List[str], weights: Dict[str, float]): scale_by_number_of_atoms=True ) elif prop == "per_atom_energy": - log.info("Creating per atom energy loss with weight: {weights[prop]}") + log.info(f"Creating per atom energy loss with weight: {weights[prop]}") self.loss_functions[prop] = EnergySquaredError( scale_by_number_of_atoms=True @@ -383,7 +383,7 @@ def forward( total_loss = torch.zeros_like(batch.metadata.per_system_energy) # Iterate over loss properties - for prop in self.loss_property: + for prop in self.loss_components: loss_fn = self.loss_functions[prop] prop_ = _exchange_per_atom_energy_for_per_system_energy(prop) @@ -415,13 +415,13 @@ class LossFactory: """ @staticmethod - def create_loss(loss_property: List[str], weight: Dict[str, float]) -> Loss: + def create_loss(loss_components: List[str], weight: Dict[str, float]) -> Loss: """ Creates an instance of the specified loss type. Parameters ---------- - loss_property : List[str] + loss_components : List[str] List of properties to include in the loss calculation. weight : Dict[str, float] Dictionary containing the weights for each property in the loss calculation. @@ -431,7 +431,7 @@ def create_loss(loss_property: List[str], weight: Dict[str, float]) -> Loss: Loss An instance of the specified loss function. """ - return Loss(loss_property, weight) + return Loss(loss_components, weight) from torch.nn import ModuleDict diff --git a/modelforge/train/parameters.py b/modelforge/train/parameters.py index 71076eaf..5a065e3a 100644 --- a/modelforge/train/parameters.py +++ b/modelforge/train/parameters.py @@ -164,23 +164,23 @@ class LossParameter(ParametersBase): class to hold the loss properties args: - loss_property (List[str]): The loss property. + loss_components (List[str]): The loss property. The length of this list must match the length of loss_weight weight (Dict[str,float]): The loss weight. - The keys must correspond to entries in the loss_property list. + The keys must correspond to entries in the loss_components list. """ - loss_property: List + loss_components: List weight: Dict[str, float] @model_validator(mode="after") def ensure_length_match(self) -> "LossParameter": - loss_property = self.loss_property + loss_components = self.loss_components loss_weight = self.weight - if len(loss_property) != len(loss_weight): + if len(loss_components) != len(loss_weight): raise ValueError( - f"The length of loss_property ({len(loss_property)}) and weight ({len(loss_weight)}) must match." + f"The length of loss_components ({len(loss_components)}) and weight ({len(loss_weight)}) must match." ) return self @@ -280,7 +280,7 @@ def ensure_logger_configuration(self) -> "ExperimentLogger": @model_validator(mode="after") def validate_dipole_and_shift_com(self): - if "dipole_moment" in self.loss_parameter.loss_property: + if "dipole_moment" in self.loss_parameter.loss_components: if not self.shift_center_of_mass_to_origin: raise ValueError( "Use of dipole_moment in the loss requires shift_center_of_mass_to_origin to be True" diff --git a/modelforge/train/training.py b/modelforge/train/training.py index 802eb541..2698d551 100644 --- a/modelforge/train/training.py +++ b/modelforge/train/training.py @@ -384,11 +384,11 @@ def __init__( ) self.include_force = ( - "per_atom_force" in training_parameter.loss_parameter.loss_property + "per_atom_force" in training_parameter.loss_parameter.loss_components ) self.calculate_predictions = CalculateProperties( - training_parameter.loss_parameter.loss_property + training_parameter.loss_parameter.loss_components ) self.optimizer_class = optimizer_class self.learning_rate = training_parameter.lr @@ -409,17 +409,17 @@ def __init__( # Initialize performance metrics self.test_metrics = create_error_metrics( - training_parameter.loss_parameter.loss_property + training_parameter.loss_parameter.loss_components ) self.val_metrics = create_error_metrics( - training_parameter.loss_parameter.loss_property + training_parameter.loss_parameter.loss_components ) self.train_metrics = create_error_metrics( - training_parameter.loss_parameter.loss_property + training_parameter.loss_parameter.loss_components ) self.loss_metrics = create_error_metrics( - training_parameter.loss_parameter.loss_property, is_loss=True + training_parameter.loss_parameter.loss_components, is_loss=True ) def forward(self, batch: BatchData) -> Dict[str, torch.Tensor]: @@ -942,7 +942,7 @@ def setup_trainer(self) -> Trainer: benchmark=True, inference_mode=False, num_sanity_val_steps=2, - gradient_clip_val=10.0, # FIXME: hardcoded for now + gradient_clip_val=1.0, # FIXME: hardcoded for now log_every_n_steps=self.runtime_parameter.log_every_n_steps, enable_model_summary=True, enable_progress_bar=self.runtime_parameter.verbose, # if true will show progress bar @@ -1029,7 +1029,7 @@ def _generate_tags(self, tags: List[str]) -> List[str]: str(modelforge.__version__), self.dataset_parameter.dataset_name, self.potential_parameter.potential_name, - f"loss-{'-'.join(self.training_parameter.loss_parameter.loss_property)}", + f"loss-{'-'.join(self.training_parameter.loss_parameter.loss_components)}", ] ) return tags diff --git a/modelforge/utils/prop.py b/modelforge/utils/prop.py index 6021503f..2a0e64be 100644 --- a/modelforge/utils/prop.py +++ b/modelforge/utils/prop.py @@ -3,7 +3,6 @@ """ from dataclasses import dataclass -import jax.numpy as jnp import torch from typing import NamedTuple, Optional from loguru import logger as log @@ -61,7 +60,6 @@ def __init__( self.box_vectors = box_vectors self.is_periodic = is_periodic - self.__post_init__() # Validate inputs self._validate_inputs() @@ -97,25 +95,6 @@ def _validate_inputs(self): "The size of atomic_subsystem_indices and the first dimension of positions must match" ) - def __post_init__(self): - - # Set all integer tensors to int32 or JAX int32 - if isinstance(self.atomic_numbers, torch.Tensor): - self.atomic_numbers = self.atomic_numbers.to(torch.int32) - self.atomic_subsystem_indices = self.atomic_subsystem_indices.to( - torch.int32 - ) - self.per_system_total_charge = self.per_system_total_charge.to(torch.int32) - elif isinstance(self.atomic_numbers, jnp.ndarray): - self.atomic_numbers = self.atomic_numbers.astype(jnp.int32) - self.atomic_subsystem_indices = self.atomic_subsystem_indices.astype( - jnp.int32 - ) - self.per_system_total_charge = self.per_system_total_charge.astype( - jnp.int32 - ) - else: - raise TypeError("Unsupported array type in NNPInput") def to_device(self, device: torch.device): """Move all tensors in this instance to the specified device.""" @@ -212,6 +191,15 @@ def to_dtype(self, dtype: torch.dtype): class BatchData: nnp_input: NNPInput metadata: Metadata + + def to( + self, + device: torch.device, + ): # NOTE: this is required to move the data to device + """Move all data in this batch to the specified device and dtype.""" + self.nnp_input = self.nnp_input.to_device(device=device) + self.metadata = self.metadata.to_device(device=device) + return self def to_device( self, diff --git a/scripts/config.toml b/scripts/config.toml index 1048808c..64bc45c1 100644 --- a/scripts/config.toml +++ b/scripts/config.toml @@ -58,7 +58,7 @@ monitor = "val/per_system_energy/rmse" interval = "epoch" [training.loss_parameter] -loss_property = ['per_system_energy', 'per_atom_force'] # use +loss_components = ['per_system_energy', 'per_atom_force'] # use [training.loss_parameter.weight] per_system_energy = 0.999 #NOTE: reciprocal units