The fedbiomed.common.models
module includes model abstraction classes that can be used with plain framework specific models.
Please visit Declearn repository for the "TorchVector" and "NumpyVector" classes used in this module.
Classes
BaseSkLearnModel
BaseSkLearnModel(model)
Bases: Model
Wrapper of Scikit learn models.
This class implements all abstract methods from the Model
API, but adds some scikit-learn-specific ones that need implementing by its children.
Attributes:
Name | Type | Description |
---|---|---|
model | BaseEstimator | Wrapped model |
param_list | List[str] | List that contains layer attributes. Should be set when calling |
Class attributes:
Name | Type | Description |
---|---|---|
is_classification | bool | Boolean flag indicating whether the wrapped model is designed for classification or for regression supervised-learning tasks. |
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model | BaseEstimator | Model object as an instance of BaseEstimator | required |
Raises:
Type | Description |
---|---|
FedbiomedModelError | if model is not as scikit learn BaseEstimator object |
Source code in fedbiomed/common/models/_sklearn.py
def __init__(
self,
model: BaseEstimator,
) -> None:
"""Instantiate the wrapper over a scikit-learn BaseEstimator.
Args:
model: Model object as an instance of [BaseEstimator][sklearn.base.BaseEstimator]
Raises:
FedbiomedModelError: if model is not as scikit learn [BaseEstimator][sklearn.base.BaseEstimator] object
"""
super().__init__(model)
self._gradients: Dict[str, np.ndarray] = {}
self.param_list: List[str] = []
self._optim_params: Dict[str, Any] = {}
Attributes
is_classification class-attribute
is_classification
model instance-attribute
model
param_list instance-attribute
param_list = []
Functions
apply_updates
apply_updates(updates)
Apply incoming updates to the wrapped model's parameters.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
updates | Dict[str, ndarray] | Model parameters' updates to add (apply) to existing parameters' values. | required |
Source code in fedbiomed/common/models/_sklearn.py
def apply_updates(
self,
updates: Dict[str, np.ndarray],
) -> None:
"""Apply incoming updates to the wrapped model's parameters.
Args:
updates: Model parameters' updates to add (apply) to existing
parameters' values.
"""
self._assert_dict_inputs(updates)
for key, val in updates.items():
weights = getattr(self.model, key)
setattr(self.model, key, weights + val)
disable_internal_optimizer
disable_internal_optimizer()
Disable the scikit-learn internal optimizer.
Calling this method alters the wrapped model so that raw gradients are computed and attached to it (rather than relying on scikit-learn to apply a learning rate that may be scheduled to vary along time).
''' warning "Call it only if using an external optimizer"
Source code in fedbiomed/common/models/_sklearn.py
def disable_internal_optimizer(self) -> None:
"""Disable the scikit-learn internal optimizer.
Calling this method alters the wrapped model so that raw gradients are
computed and attached to it (rather than relying on scikit-learn to
apply a learning rate that may be scheduled to vary along time).
''' warning "Call it only if using an external optimizer"
"""
# Record initial params, then override optimizer ones.
self._optim_params = self.get_params()
self.set_params(**self._null_optim_params)
# Warn about overridden values.
changed_params: List[str] = []
for key, val in self._null_optim_params.items():
param = self._optim_params.get(key)
if param is not None and param != val:
changed_params.append(key)
if changed_params:
changed = ",\n\t".join(changed_params)
logger.warning(
"The following non-default model parameters were overridden "
f"due to the disabling of the scikit-learn internal optimizer:\n\t{changed}",
broadcast=True
)
enable_internal_optimizer
enable_internal_optimizer()
Enable the scikit-learn internal optimizer.
Calling this method restores any model parameter previously overridden due to calling the counterpart disable_internal_optimizer
method.
Source code in fedbiomed/common/models/_sklearn.py
def enable_internal_optimizer(self) -> None:
"""Enable the scikit-learn internal optimizer.
Calling this method restores any model parameter previously overridden
due to calling the counterpart `disable_internal_optimizer` method.
"""
if self._optim_params:
self.set_params(**self._optim_params)
logger.debug("Internal Optimizer restored")
export
export(filename)
Export the wrapped model to a dump file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
filename | str | path to the file where the model will be saved. | required |
!!! info "Notes": This method is designed to save the model to a local dump file for easy re-use by the same user, possibly outside of Fed-BioMed. It is not designed to produce trustworthy data dumps and is not used to exchange models and their weights as part of the federated learning process.
!!! warning "Warning": This method uses joblib.dump
, which relies on pickle and is therefore hard to trust by third-party loading methods.
Source code in fedbiomed/common/models/_sklearn.py
def export(self, filename: str) -> None:
"""Export the wrapped model to a dump file.
Args:
filename: path to the file where the model will be saved.
!!! info "Notes":
This method is designed to save the model to a local dump
file for easy re-use by the same user, possibly outside of
Fed-BioMed. It is not designed to produce trustworthy data
dumps and is not used to exchange models and their weights
as part of the federated learning process.
!!! warning "Warning":
This method uses `joblib.dump`, which relies on pickle and
is therefore hard to trust by third-party loading methods.
"""
with open(filename, "wb") as file:
joblib.dump(self.model, file)
flatten
flatten(only_trainable=False, exclude_buffers=True)
Gets weights as flatten vector
Parameters:
Name | Type | Description | Default |
---|---|---|---|
only_trainable | bool | Unused for scikit-learn models. (Whether to ignore non-trainable model parameters.) | False |
exclude_buffers | bool | Unused for scikit-learn models. (Whether to ignore buffers.) | True |
Returns:
Name | Type | Description |
---|---|---|
to_list | List[float] | Convert np.ndarray to a list if it is True. |
Source code in fedbiomed/common/models/_sklearn.py
def flatten(self,
only_trainable: bool = False,
exclude_buffers: bool = True) -> List[float]:
"""Gets weights as flatten vector
Args:
only_trainable: Unused for scikit-learn models. (Whether to ignore
non-trainable model parameters.)
exclude_buffers: Unused for scikit-learn models. (Whether to ignore
buffers.)
Returns:
to_list: Convert np.ndarray to a list if it is True.
"""
weights = self.get_weights()
flatten = []
for _, w in weights.items():
w_: List[float] = list(w.flatten().astype(float))
flatten.extend(w_)
return flatten
get_gradients
get_gradients()
Return computed gradients attached to the model.
Raises:
Type | Description |
---|---|
FedbiomedModelError | If no gradients have been computed yet (i.e. the model has not been trained). |
Returns:
Type | Description |
---|---|
Dict[str, ndarray] | Gradients, as a dict mapping parameters' names to their gradient's numpy array. |
Source code in fedbiomed/common/models/_sklearn.py
def get_gradients(
self,
) -> Dict[str, np.ndarray]:
"""Return computed gradients attached to the model.
Raises:
FedbiomedModelError: If no gradients have been computed yet
(i.e. the model has not been trained).
Returns:
Gradients, as a dict mapping parameters' names to their
gradient's numpy array.
"""
if not self._gradients:
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}. Cannot get gradients if the "
"model has not been trained beforehand."
)
gradients = self._gradients
return gradients
get_learning_rate abstractmethod
get_learning_rate()
Retrieves learning rate of the model. Method implementation will depend on the attribute used to set up these arbitrary arguments
Returns:
Type | Description |
---|---|
List[float] | Initial learning rate value(s); a single value if only on learning rate has been used, and a list of several learning rates, one for each layer of the model. |
Source code in fedbiomed/common/models/_sklearn.py
@abstractmethod
def get_learning_rate(self) -> List[float]:
"""Retrieves learning rate of the model. Method implementation will
depend on the attribute used to set up these arbitrary arguments
Returns:
Initial learning rate value(s); a single value if only on learning rate has been used, and
a list of several learning rates, one for each layer of the model.
"""
get_params
get_params(value=None)
Return the wrapped scikit-learn model's hyperparameters.
Please refer to [baseEstimator documentation
][https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html] get_params
method for further details.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
value | Any | if specified, returns a specific hyperparameter, otherwise, returns a dictionary with all the hyperparameters. Defaults to None. | None |
Returns:
Type | Description |
---|---|
Dict[str, Any] | Dictionary mapping model hyperparameter names to their values |
Source code in fedbiomed/common/models/_sklearn.py
def get_params(self, value: Any = None) -> Dict[str, Any]:
"""Return the wrapped scikit-learn model's hyperparameters.
Please refer to [`baseEstimator documentation`]
[https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html] `get_params` method
for further details.
Args:
value: if specified, returns a specific hyperparameter, otherwise, returns a dictionary
with all the hyperparameters. Defaults to None.
Returns:
Dictionary mapping model hyperparameter names to their values
"""
if value is not None:
return self.model.get_params().get(value)
return self.model.get_params()
get_weights
get_weights(only_trainable=False, exclude_buffers=True)
Return a copy of the model's trainable weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
only_trainable | bool | Unused for scikit-learn models. (Whether to ignore non-trainable model parameters.) | False |
exclude_buffers | bool | Unused for scikit-learn models. (Whether to ignore buffers.) | True |
Raises:
Type | Description |
---|---|
FedbiomedModelError | If the model parameters are not initialized. |
Returns:
Type | Description |
---|---|
Dict[str, ndarray] | Model weights, as a dictionary mapping parameters' names to their numpy array, or as a declearn NumpyVector wrapping such a dict. |
Source code in fedbiomed/common/models/_sklearn.py
def get_weights(
self,
only_trainable: bool = False,
exclude_buffers: bool = True
) -> Dict[str, np.ndarray]:
"""Return a copy of the model's trainable weights.
Args:
only_trainable: Unused for scikit-learn models. (Whether to ignore
non-trainable model parameters.)
exclude_buffers: Unused for scikit-learn models. (Whether to ignore
buffers.)
Raises:
FedbiomedModelError: If the model parameters are not initialized.
Returns:
Model weights, as a dictionary mapping parameters' names to their
numpy array, or as a declearn NumpyVector wrapping such a dict.
"""
if not self.param_list:
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}. Attribute `param_list` is empty. You should "
f"have initialized the model beforehand (try calling `set_init_params`)"
)
# Gather copies of the model weights.
weights = {} # type: Dict[str, np.ndarray]
try:
for key in self.param_list:
val = getattr(self.model, key)
if not isinstance(val, np.ndarray):
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}: SklearnModel parameter is not a numpy array."
)
weights[key] = val.copy()
except AttributeError as err:
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}. Unable to access weights of BaseEstimator "
f"model {self.model} (details {err})"
) from err
return weights
init_training
init_training()
Initialises the training by setting up attributes.
Raises:
Type | Description |
---|---|
FedbiomedModelError | raised if |
Source code in fedbiomed/common/models/_sklearn.py
def init_training(self):
"""Initialises the training by setting up attributes.
Raises:
FedbiomedModelError: raised if `param_list` has not been defined
"""
if not self.param_list:
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}. Attribute `param_list` is empty. You should "
f"have initialized the model beforehand (try calling `set_init_params`)"
)
predict
predict(inputs)
Computes prediction given input data.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
inputs | ndarray | input data | required |
Returns:
Type | Description |
---|---|
ndarray | Model predictions |
Source code in fedbiomed/common/models/_sklearn.py
def predict(
self,
inputs: np.ndarray,
) -> np.ndarray:
"""Computes prediction given input data.
Args:
inputs: input data
Returns:
Model predictions
"""
return self.model.predict(inputs)
reload
reload(filename)
Import and replace the wrapped model from a dump file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
filename | str | path to the file where the model has been exported. | required |
!!! info "Notes": This method is designed to load the model from a local dump file, that might not be in a trustworthy format. It should therefore only be used to re-load data exported locally and not received from someone else, including other FL peers.
Raises:
Type | Description |
---|---|
FedbiomedModelError | if the reloaded instance is of unproper type. |
Source code in fedbiomed/common/models/_sklearn.py
def reload(self, filename: str) -> None:
"""Import and replace the wrapped model from a dump file.
Args:
filename: path to the file where the model has been exported.
!!! info "Notes":
This method is designed to load the model from a local dump
file, that might not be in a trustworthy format. It should
therefore only be used to re-load data exported locally and
not received from someone else, including other FL peers.
Raises:
FedbiomedModelError: if the reloaded instance is of unproper type.
"""
model = self._reload(filename)
if not isinstance(model, self._model_type):
err_msg = (
f"{ErrorNumbers.FB622.value}: unproper type for imported model"
f": expected '{self._model_type}', but 'got {type(model)}'."
)
logger.critical(err_msg)
raise FedbiomedModelError(err_msg)
self.model = model
set_gradients
set_gradients(gradients)
Source code in fedbiomed/common/models/_sklearn.py
def set_gradients(self, gradients: Dict[str, np.ndarray]) -> None:
# TODO: either document or remove this (useless) method
self._gradients = gradients
set_init_params abstractmethod
set_init_params(model_args)
Zeroes scikit learn model parameters.
Should be used before any training, as it sets the scikit learn model parameters and makes them accessible through the use of attributes. Model parameter attribute names will depend on the scikit learn model wrapped.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model_args | Dict | dictionary that contains specifications for setting initial model | required |
Source code in fedbiomed/common/models/_sklearn.py
@abstractmethod
def set_init_params(self, model_args: Dict) -> None:
"""Zeroes scikit learn model parameters.
Should be used before any training, as it sets the scikit learn model parameters
and makes them accessible through the use of attributes. Model parameter attribute names
will depend on the scikit learn model wrapped.
Args:
model_args: dictionary that contains specifications for setting initial model
"""
set_params
set_params(**params)
Assign some hyperparameters to the wrapped scikit-learn model.
Please refer to BaseEstimator [https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html] set_params
method for further details.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
params | Any | new hyperparameters to assign to the model. | {} |
Returns:
Type | Description |
---|---|
Dict[str, Any] | Dict[str, Any]: dictionary containing new hyperparameter values. |
Source code in fedbiomed/common/models/_sklearn.py
def set_params(self, **params: Any) -> Dict[str, Any]:
"""Assign some hyperparameters to the wrapped scikit-learn model.
Please refer to [BaseEstimator][sklearn.base.BaseEstimator]
[https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html] `set_params` method
for further details.
Args:
params: new hyperparameters to assign to the model.
Returns:
Dict[str, Any]: dictionary containing new hyperparameter values.
"""
self.model.set_params(**params)
return params
set_weights
set_weights(weights)
Assign new values to the model's trainable weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weights | Dict[str, ndarray] | Model weights, as a dict mapping parameters' names to their numpy array. | required |
Source code in fedbiomed/common/models/_sklearn.py
def set_weights(
self,
weights: Dict[str, np.ndarray],
) -> None:
"""Assign new values to the model's trainable weights.
Args:
weights: Model weights, as a dict mapping parameters' names
to their numpy array.
"""
self._assert_dict_inputs(weights)
for key, val in weights.items():
setattr(self.model, key, val.copy())
train
train(inputs, targets, stdout=None, **kwargs)
Run a training step, and record associated gradients.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
inputs | ndarray | inputs data. | required |
targets | ndarray | targets, to be fit with inputs data. | required |
stdout | Optional[List[List[str]]] | list of console outputs that have been collected during training, that contains losses values. Used to plot model losses. Defaults to None. | None |
Raises:
Type | Description |
---|---|
FedbiomedModelError | if training has not been initialized. |
Source code in fedbiomed/common/models/_sklearn.py
def train(
self,
inputs: np.ndarray,
targets: np.ndarray,
stdout: Optional[List[List[str]]] = None,
**kwargs,
) -> None:
"""Run a training step, and record associated gradients.
Args:
inputs: inputs data.
targets: targets, to be fit with inputs data.
stdout: list of console outputs that have been collected
during training, that contains losses values.
Used to plot model losses. Defaults to None.
Raises:
FedbiomedModelError: if training has not been initialized.
"""
batch_size = inputs.shape[0]
w_init = self.get_weights()
w_updt = {key: np.zeros_like(val) for key, val in w_init.items()}
# Iterate over the batch; accumulate sample-wise gradients (and loss).
for idx in range(batch_size):
# Compute updated weights based on the sample. Capture loss prints.
with capture_stdout() as console:
self.model.partial_fit(inputs[idx : idx + 1], targets[idx])
if stdout is not None:
stdout.append(console)
# Accumulate updated weights (weights + sum of gradients).
# Reset the model's weights and iteration counter.
for key in self.param_list:
w_updt[key] += getattr(self.model, key)
setattr(self.model, key, w_init[key].copy())
self.model.n_iter_ -= 1
# Compute the batch-averaged, learning-rate-scaled gradients.
# Note: w_init: {w_t}, w_updt: {w_t - eta_t * sum_{s=1}^B(grad_s)}
# hence eta_t * avg(grad_s) = w_init - (w_updt / B)
self._gradients = {
key: w_init[key] - (w_updt[key] / batch_size)
for key in self.param_list
}
# ------------------------------ WARNINGS ----------------------------------
#
# Warning 1: if `disable_internal_optimizer` has not been called before, gradients won't be scaled
# (you will get un-scaled gradients, that need to be scaled back by dividing gradients by the learning rate)
# here is a way to do so (with `lrate` as the learning rate):
# ```python
# for key, val in self._gradients.items():
# val /= lrate
# ````
# Warning 2: `_gradients` has different meanings, when using `disable_internal_optimizer`
# if it is not called (ie when using native sklearn optimizer), it is not plain gradients,
# but rather the quantity `lr * grads`
# Finally, increment the model's iteration counter.
self.model.n_iter_ += 1
unflatten
unflatten(weights_vector, only_trainable=False, exclude_buffers=True)
Unflatten vectorized model weights
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weights_vector | List[float] | Vectorized model weights to convert dict | required |
only_trainable | bool | Unused for scikit-learn models. (Whether to ignore non-trainable model parameters.) | False |
exclude_buffers | bool | Unused for scikit-learn models. (Whether to ignore buffers.) | True |
Returns:
Type | Description |
---|---|
Dict[str, ndarray] | Model dictionary |
Source code in fedbiomed/common/models/_sklearn.py
def unflatten(
self,
weights_vector: List[float],
only_trainable: bool = False,
exclude_buffers: bool = True
) -> Dict[str, np.ndarray]:
"""Unflatten vectorized model weights
Args:
weights_vector: Vectorized model weights to convert dict
only_trainable: Unused for scikit-learn models. (Whether to ignore
non-trainable model parameters.)
exclude_buffers: Unused for scikit-learn models. (Whether to ignore
buffers.)
Returns:
Model dictionary
"""
super().unflatten(weights_vector, only_trainable, exclude_buffers)
weights_vector = np.array(weights_vector)
weights = self.get_weights()
pointer = 0
params = {}
for key, w in weights.items():
num_param = w.size
params[key] = weights_vector[pointer: pointer + num_param].reshape(w.shape)
pointer += num_param
return params
MLPSklearnModel
MLPSklearnModel(model)
Bases: BaseSkLearnModel
BaseSklearnModel abstract subclass for multi-layer perceptron models.
Source code in fedbiomed/common/models/_sklearn.py
def __init__(self, model: BaseEstimator) -> None:
self._null_optim_params: Dict[str, Any] = {
"learning_rate_init": 1.0,
"learning_rate": "constant",
}
super().__init__(model)
Attributes
model instance-attribute
model
Functions
get_learning_rate
get_learning_rate()
Source code in fedbiomed/common/models/_sklearn.py
def get_learning_rate(self) -> List[float]:
return [self.model.learning_rate_init]
Model
Model(model)
Bases: Generic[_MT, DT]
Model abstraction, that wraps and handles both native models
Attributes:
Name | Type | Description |
---|---|---|
model | Any | native model, written in a framework supported by Fed-BioMed. |
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model | _MT | native model wrapped, of child-class-specific type. | required |
Source code in fedbiomed/common/models/_model.py
def __init__(self, model: _MT):
"""Constructor of Model abstract class
Args:
model: native model wrapped, of child-class-specific type.
"""
self._validate_model_type(model)
self.model: Any = model
Attributes
model instance-attribute
model = model
Functions
apply_updates abstractmethod
apply_updates(updates)
export abstractmethod
export(filename)
Export the wrapped model to a dump file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
filename | str | path to the file where the model will be saved. | required |
!!! info "Notes": This method is designed to save the model to a local dump file for easy re-use by the same user, possibly outside of Fed-BioMed. It is not designed to produce trustworthy data dumps and is not used to exchange models and their weights as part of the federated learning process.
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def export(self, filename: str) -> None:
"""Export the wrapped model to a dump file.
Args:
filename: path to the file where the model will be saved.
!!! info "Notes":
This method is designed to save the model to a local dump
file for easy re-use by the same user, possibly outside of
Fed-BioMed. It is not designed to produce trustworthy data
dumps and is not used to exchange models and their weights
as part of the federated learning process.
"""
flatten abstractmethod
flatten(only_trainable=False, exclude_buffers=True)
Flattens model weights
Parameters:
Name | Type | Description | Default |
---|---|---|---|
only_trainable | bool | Whether to ignore non-trainable model parameters from outputs (e.g. frozen neural network layers' parameters), or include all model parameters (the default). | False |
exclude_buffers | bool | Whether to ignore buffers (the default), or include them. | True |
Returns:
Type | Description |
---|---|
List[float] | List of model weights as float. |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def flatten(self,
only_trainable: bool = False,
exclude_buffers: bool = True) -> List[float]:
"""Flattens model weights
Args:
only_trainable: Whether to ignore non-trainable model parameters
from outputs (e.g. frozen neural network layers' parameters),
or include all model parameters (the default).
exclude_buffers: Whether to ignore buffers (the default), or
include them.
Returns:
List of model weights as float.
"""
get_gradients abstractmethod
get_gradients()
Return computed gradients attached to the model.
Returns:
Type | Description |
---|---|
Dict[str, DT] | Gradients, as a dict mapping parameters' names to their gradient's value. |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def get_gradients(self) -> Dict[str, DT]:
"""Return computed gradients attached to the model.
Returns:
Gradients, as a dict mapping parameters' names to their
gradient's value.
"""
get_weights abstractmethod
get_weights(only_trainable=False, exclude_buffers=True)
Return a copy of the model's trainable weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
only_trainable | bool | Whether to ignore non-trainable model parameters from outputs (e.g. frozen neural network layers' parameters), or include all model parameters (the default). | False |
exclude_buffers | bool | Whether to ignore buffers (the default), or include them. | True |
Returns:
Type | Description |
---|---|
Dict[str, DT] | Model weights, as a dict mapping parameters' names to their value. |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def get_weights(self, only_trainable: bool = False, exclude_buffers: bool = True) -> Dict[str, DT]:
"""Return a copy of the model's trainable weights.
Args:
only_trainable: Whether to ignore non-trainable model parameters
from outputs (e.g. frozen neural network layers' parameters),
or include all model parameters (the default).
exclude_buffers: Whether to ignore buffers (the default), or
include them.
Returns:
Model weights, as a dict mapping parameters' names to their value.
"""
init_training abstractmethod
init_training()
Initialize parameters before model training.
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def init_training(self):
"""Initialize parameters before model training."""
predict abstractmethod
predict(inputs)
Return model predictions given input values.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
inputs | Any | input values. | required |
Returns:
Name | Type | Description |
---|---|---|
Any | Any | predictions. |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def predict(self, inputs: Any) -> Any:
"""Return model predictions given input values.
Args:
inputs: input values.
Returns:
Any: predictions.
"""
reload abstractmethod
reload(filename)
Import and replace the wrapped model from a dump file.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
filename | str | path to the file where the model has been exported. | required |
!!! info "Notes": This method is designed to load the model from a local dump file, that might not be in a trustworthy format. It should therefore only be used to re-load data exported locally and not received from someone else, including other FL peers.
Raises:
Type | Description |
---|---|
FedbiomedModelError | if the reloaded instance is of unproper type. |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def reload(self, filename: str) -> None:
"""Import and replace the wrapped model from a dump file.
Args:
filename: path to the file where the model has been exported.
!!! info "Notes":
This method is designed to load the model from a local dump
file, that might not be in a trustworthy format. It should
therefore only be used to re-load data exported locally and
not received from someone else, including other FL peers.
Raises:
FedbiomedModelError: if the reloaded instance is of unproper type.
"""
set_model
set_model(model)
Replace the wrapped model with a new one.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model | _MT | New model instance that needs assignment as the | required |
Source code in fedbiomed/common/models/_model.py
def set_model(self, model: _MT) -> None:
"""Replace the wrapped model with a new one.
Args:
model: New model instance that needs assignment as the `model`
attribute.
"""
self._validate_model_type(model)
self.model = model
set_weights abstractmethod
set_weights(weights)
Assign new values to the model's trainable weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weights | Dict[str, DT] | Model weights, as a dict mapping parameters' names to their value. | required |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def set_weights(self, weights: Dict[str, DT]) -> None:
"""Assign new values to the model's trainable weights.
Args:
weights: Model weights, as a dict mapping parameters' names
to their value.
"""
train abstractmethod
train(inputs, targets, **kwargs)
Perform a training step given inputs and targets data.
Warning
Please run init_training
method before running train
method, so to initialize parameters needed for model training"
Warning
This function usually does not update weights. You need to call apply_updates
to ensure updates are applied to the model.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
inputs | Any | input (training) data. | required |
targets | Any | target values. | required |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def train(self, inputs: Any, targets: Any, **kwargs) -> None:
"""Perform a training step given inputs and targets data.
!!! warning "Warning"
Please run `init_training` method before running `train` method,
so to initialize parameters needed for model training"
!!! warning "Warning"
This function usually does not update weights. You need to call
`apply_updates` to ensure updates are applied to the model.
Args:
inputs: input (training) data.
targets: target values.
"""
unflatten abstractmethod
unflatten(weights_vector, only_trainable=False, exclude_buffers=True)
Revert flatten model weights back model-dict form.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weights_vector | List[float] | Vectorized model weights to convert dict | required |
only_trainable | bool | Whether to ignore non-trainable model parameters from outputs (e.g. frozen neural network layers' parameters), or include all model parameters (the default). | False |
exclude_buffers | bool | Whether to ignore buffers (the default), or include them. | True |
Returns:
Type | Description |
---|---|
None | Model dictionary |
Source code in fedbiomed/common/models/_model.py
@abstractmethod
def unflatten(
self,
weights_vector: List[float],
only_trainable: bool = False,
exclude_buffers: bool = True
) -> None:
"""Revert flatten model weights back model-dict form.
Args:
weights_vector: Vectorized model weights to convert dict
only_trainable: Whether to ignore non-trainable model parameters
from outputs (e.g. frozen neural network layers' parameters),
or include all model parameters (the default).
exclude_buffers: Whether to ignore buffers (the default), or
include them.
Returns:
Model dictionary
"""
if not isinstance(weights_vector, list) or not all([isinstance(w, float) for w in weights_vector]):
raise FedbiomedModelError(
f"{ErrorNumbers.FB622} `weights_vector should be 1D list of float containing flatten model parameters`"
)
SGDClassifierSKLearnModel
SGDClassifierSKLearnModel(model)
Bases: SGDSkLearnModel
BaseSkLearnModel subclass for SGDClassifier models.
Source code in fedbiomed/common/models/_sklearn.py
def __init__(self, model: BaseEstimator) -> None:
super().__init__(model)
self._null_optim_params: Dict[str, Any] = {
'eta0': 1.0,
'learning_rate': "constant",
}
Attributes
is_classification class-attribute
instance-attribute
is_classification = True
model instance-attribute
model
Functions
set_init_params
set_init_params(model_args)
Initialize the model's trainable parameters.
Source code in fedbiomed/common/models/_sklearn.py
def set_init_params(self, model_args: Dict[str, Any]) -> None:
"""Initialize the model's trainable parameters."""
# Set up zero-valued start weights, for binary of multiclass classif.
n_classes = model_args["n_classes"]
if n_classes == 2:
init_params = {
"intercept_": np.zeros((1,)),
"coef_": np.zeros((1, model_args["n_features"])),
}
else:
init_params = {
"intercept_": np.zeros((n_classes,)),
"coef_": np.zeros((n_classes, model_args["n_features"])),
}
# Assign these initialization parameters and retain their names.
self.param_list = list(init_params)
for key, val in init_params.items():
setattr(self.model, key, val)
# Also initialize the "classes_" slot with unique predictable labels.
# FIXME: this assumes target values are integers in range(n_classes).
setattr(self.model, "classes_", np.arange(n_classes))
SGDRegressorSKLearnModel
SGDRegressorSKLearnModel(model)
Bases: SGDSkLearnModel
BaseSkLearnModel subclass for SGDRegressor models.
Source code in fedbiomed/common/models/_sklearn.py
def __init__(self, model: BaseEstimator) -> None:
super().__init__(model)
self._null_optim_params: Dict[str, Any] = {
'eta0': 1.0,
'learning_rate': "constant",
}
Attributes
is_classification class-attribute
instance-attribute
is_classification = False
model instance-attribute
model
Functions
set_init_params
set_init_params(model_args)
Initialize the model's trainable parameters.
Source code in fedbiomed/common/models/_sklearn.py
def set_init_params(self, model_args: Dict[str, Any]):
"""Initialize the model's trainable parameters."""
init_params = {
"intercept_": np.array([0.0]),
"coef_": np.array([0.0] * model_args["n_features"]),
}
self.param_list = list(init_params)
for key, val in init_params.items():
setattr(self.model, key, val)
SkLearnModel
SkLearnModel(model)
Sklearn model builder.
It wraps one of Fed-BioMed BaseSkLearnModel
object children, by passing a (BaseEstimator)(https://scikit-learn.org/stable/modules/generated/sklearn.base.BaseEstimator.html) object to the constructor, as shown below.
Usage
from sklearn.linear_model import SGDClassifier
model = SkLearnModel(SGDClassifier)
model.set_weights(some_weights)
type(model.model)
# Output: <class 'sklearn.linear_model._stochastic_gradient.SGDClassifier'>
Attributes:
Name | Type | Description |
---|---|---|
_instance | BaseSkLearnModel | instance of BaseSkLearnModel |
Parameters:
Name | Type | Description | Default |
---|---|---|---|
model | Type[BaseEstimator] | non-initialized BaseEstimator object | required |
Raises:
Type | Description |
---|---|
FedbiomedModelError | raised if model does not belong to the implemented models. |
FedbiomedModelError | raised if |
Source code in fedbiomed/common/models/_sklearn.py
def __init__(self, model: Type[BaseEstimator]):
"""Constructor of the model builder.
Args:
model: non-initialized [BaseEstimator][sklearn.base.BaseEstimator] object
Raises:
FedbiomedModelError: raised if model does not belong to the implemented models.
FedbiomedModelError: raised if `__name__` attribute does not belong to object. This may happen
when passing an instantiated object instead of the class object (e.g. instance of
SGDClassifier() instead of SGDClassifier object)
"""
if not isinstance(model, type):
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}: 'SkLearnModel' received a '{type(model)}' instance as 'model' "
"input while it was expecting a scikit-learn BaseEstimator subclass constructor."
)
if not issubclass(model, BaseEstimator):
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}: 'SkLearnModel' received a 'model' class that is not "
f"a scikit-learn BaseEstimator subclass: '{model}'."
)
if model.__name__ not in SKLEARN_MODELS:
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}: 'SkLearnModel' received '{model}' as 'model' class, "
f"support for which has not yet been implemented in Fed-BioMed."
)
self._instance: BaseSkLearnModel = SKLEARN_MODELS[model.__name__](model())
TorchModel
TorchModel(model)
Bases: Model
PyTorch model wrapper that ease the handling of a pytorch model
Attributes:
Name | Type | Description |
---|---|---|
model | Module | torch.nn.Module. Pytorch model wrapped. |
init_params | Dict[str, Tensor] | OrderedDict. Model initial parameters. Set when calling |
Source code in fedbiomed/common/models/_torch.py
def __init__(self, model: torch.nn.Module) -> None:
"""Instantiates the wrapper over a torch Module instance."""
super().__init__(model)
self.init_params: Dict[str, torch.Tensor] = {}
Attributes
init_params instance-attribute
init_params = {}
model instance-attribute
model
Functions
add_corrections_to_gradients
add_corrections_to_gradients(corrections)
Add values to the gradients currently attached to the model.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
corrections | Dict[str, Tensor] | corrections to be added to the model's gradients. | required |
Source code in fedbiomed/common/models/_torch.py
def add_corrections_to_gradients(
self,
corrections: Dict[str, torch.Tensor],
) -> None:
"""Add values to the gradients currently attached to the model.
Args:
corrections: corrections to be added to the model's gradients.
"""
self._assert_dict_inputs(corrections)
for name, update in corrections.items():
param = self.model.get_parameter(name)
if param.grad is not None:
param.grad.add_(update.to(param.grad.device))
apply_updates
apply_updates(updates)
Apply incoming updates to the wrapped model's parameters.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
updates | Dict[str, Tensor] | model updates to be added to the model. | required |
Source code in fedbiomed/common/models/_torch.py
def apply_updates(
self,
updates: Dict[str, torch.Tensor],
) -> None:
"""Apply incoming updates to the wrapped model's parameters.
Args:
updates: model updates to be added to the model.
"""
self._assert_dict_inputs(updates)
with torch.no_grad():
for name, update in updates.items():
param = self.model.get_parameter(name)
param.add_(update.to(param.device))
export
export(filename)
Export the wrapped model to a dump file.
For PyTorch only export the model weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
filename | str | path to the file where the model will be saved. | required |
!!! info "Notes": This method is designed to save the model to a local dump file for easy re-use by the same user, possibly outside of Fed-BioMed. It is not designed to produce trustworthy data dumps and is not used to exchange models and their weights as part of the federated learning process.
!!! warning "Warning": This method uses torch.save
, which relies on pickle and is therefore hard to trust by third-party loading methods.
Source code in fedbiomed/common/models/_torch.py
def export(self, filename: str) -> None:
"""Export the wrapped model to a dump file.
For PyTorch only export the model weights.
Args:
filename: path to the file where the model will be saved.
!!! info "Notes":
This method is designed to save the model to a local dump
file for easy re-use by the same user, possibly outside of
Fed-BioMed. It is not designed to produce trustworthy data
dumps and is not used to exchange models and their weights
as part of the federated learning process.
!!! warning "Warning":
This method uses `torch.save`, which relies on pickle and
is therefore hard to trust by third-party loading methods.
"""
torch.save(self.model.state_dict(), filename)
flatten
flatten(only_trainable=False, exclude_buffers=True)
Gets weights as flatten vector
Parameters:
Name | Type | Description | Default |
---|---|---|---|
only_trainable | bool | Whether to ignore non-trainable model parameters from outputs (e.g. frozen neural network layers' parameters), or include all model parameters (the default). | False |
exclude_buffers | bool | Whether to ignore buffers (the default), or include them. | True |
Returns:
Name | Type | Description |
---|---|---|
to_list | List[float] | Convert np.ndarray to a list if it is True. |
Source code in fedbiomed/common/models/_torch.py
def flatten(self,
only_trainable: bool = False,
exclude_buffers: bool = True) -> List[float]:
"""Gets weights as flatten vector
Args:
only_trainable: Whether to ignore non-trainable model parameters
from outputs (e.g. frozen neural network layers' parameters),
or include all model parameters (the default).
exclude_buffers: Whether to ignore buffers (the default), or
include them.
Returns:
to_list: Convert np.ndarray to a list if it is True.
"""
params: List[float] = torch.nn.utils.parameters_to_vector(
self.get_weights(only_trainable=only_trainable, exclude_buffers=exclude_buffers).values()
).tolist()
return params
get_gradients
get_gradients()
Return the gradients attached to the model.
Returns:
Type | Description |
---|---|
Dict[str, Tensor] | Gradients, as a dict mapping parameters' names to their gradient's torch tensor. |
Source code in fedbiomed/common/models/_torch.py
def get_gradients(
self,
) -> Dict[str, torch.Tensor]:
"""Return the gradients attached to the model.
Returns:
Gradients, as a dict mapping parameters' names to their gradient's
torch tensor.
"""
gradients = {
name: param.grad.detach().clone()
for name, param in self.model.named_parameters()
if (param.requires_grad and param.grad is not None)
}
if len(gradients) < len(list(self.model.named_parameters())):
# FIXME: this will be triggered when having some frozen weights
# even if training was properly conducted
logger.warning(
"Warning: can not retrieve all gradients from the model. "
"Are you sure you have trained the model beforehand?"
)
return gradients
get_weights
get_weights(only_trainable=False, exclude_buffers=True)
Return the model's parameters.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
only_trainable | bool | Whether to ignore non-trainable model parameters from outputs (e.g. frozen neural network layers' parameters), or include all model parameters (the default). | False |
exclude_buffers | bool | Whether to ignore buffers (the default), or include them. | True |
Returns:
Type | Description |
---|---|
Dict[str, Tensor] | Model weights, as a dictionary mapping parameters' names to their torch tensor. |
Source code in fedbiomed/common/models/_torch.py
def get_weights(
self,
only_trainable: bool = False,
exclude_buffers: bool = True
) -> Dict[str, torch.Tensor]:
"""Return the model's parameters.
Args:
only_trainable: Whether to ignore non-trainable model parameters
from outputs (e.g. frozen neural network layers' parameters),
or include all model parameters (the default).
exclude_buffers: Whether to ignore buffers (the default), or
include them.
Returns:
Model weights, as a dictionary mapping parameters' names to their
torch tensor.
"""
param_iterator = self.model.named_parameters() if exclude_buffers else self.model.state_dict().items()
parameters = {
name: param.detach().clone()
for name, param in param_iterator
if param.requires_grad or not only_trainable
}
return parameters
init_training
init_training()
Initializes and sets attributes before the training.
Initializes init_params
as a copy of the initial parameters of the model
Source code in fedbiomed/common/models/_torch.py
def init_training(self) -> None:
"""Initializes and sets attributes before the training.
Initializes `init_params` as a copy of the initial parameters of the model
"""
# initial aggregated model parameters
self.init_params = {
key: param.data.detach().clone()
for key, param in self.model.named_parameters()
}
self.model.train() # pytorch switch for training
self.model.zero_grad()
predict
predict(inputs)
Computes prediction given input data.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
inputs | Tensor | input data | required |
Returns:
Type | Description |
---|---|
ndarray | Model predictions returned as a numpy array |
Source code in fedbiomed/common/models/_torch.py
def predict(
self,
inputs: torch.Tensor,
) -> np.ndarray:
"""Computes prediction given input data.
Args:
inputs: input data
Returns:
Model predictions returned as a numpy array
"""
self.model.eval() # pytorch switch for model inference-mode
with torch.no_grad():
pred = self.model(inputs)
return pred.cpu().numpy()
reload
reload(filename)
Import and replace the wrapped model from a dump file.
For PyTorch, only import the model weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
filename | str | path to the file where the model has been exported. | required |
!!! info "Notes": This method is designed to load the model from a local dump file, that might not be in a trustworthy format. It should therefore only be used to re-load data exported locally and not received from someone else, including other FL peers.
Raises:
Type | Description |
---|---|
FedbiomedModelError | if the reloaded instance is of unproper type. |
Source code in fedbiomed/common/models/_torch.py
def reload(self, filename: str) -> None:
"""Import and replace the wrapped model from a dump file.
For PyTorch, only import the model weights.
Args:
filename: path to the file where the model has been exported.
!!! info "Notes":
This method is designed to load the model from a local dump
file, that might not be in a trustworthy format. It should
therefore only be used to re-load data exported locally and
not received from someone else, including other FL peers.
Raises:
FedbiomedModelError: if the reloaded instance is of unproper type.
"""
weights = torch.load(filename)
# check format of weights and apply them to the model
self.set_weights(weights)
send_to_device
send_to_device(device)
Sends model to device
Parameters:
Name | Type | Description | Default |
---|---|---|---|
device | device | device set for using GPU or CPU. | required |
Source code in fedbiomed/common/models/_torch.py
def send_to_device(
self,
device: torch.device,
) -> None:
"""Sends model to device
Args:
device: device set for using GPU or CPU.
"""
self.model.to(device)
set_weights
set_weights(weights)
Sets model weights.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weights | Dict[str, Tensor] | Model weights, as a dict mapping parameters' names to their torch tensor. | required |
Source code in fedbiomed/common/models/_torch.py
def set_weights(
self,
weights: Dict[str, torch.Tensor],
) -> None:
"""Sets model weights.
Args:
weights: Model weights, as a dict mapping parameters' names
to their torch tensor.
"""
self._assert_dict_inputs(weights)
incompatible = self.model.load_state_dict(weights, strict=False)
# Warn about (probably-)missing trainable weights.
# Note: state_dict may include values that do not belong to the model's
# parameters, and/or input weights may exclude non-trainable weights,
# without requiring a warning.
if incompatible.missing_keys:
params = {key for key, prm in self.model.named_parameters() if prm.requires_grad}
missing = params.intersection(incompatible.missing_keys)
if missing:
logger.warning(
"'TorchModel.set_weights' received inputs that did not cover all"
"trainable model parameters; missing weights: %s",
missing
)
# Warn about invalid (hence, unused) inputs.
if incompatible.unexpected_keys:
logger.warning(
"'TorchModel.set_weights' received inputs with unexpected names: %s",
incompatible.unexpected_keys
)
train
train(inputs, targets, **kwargs)
Source code in fedbiomed/common/models/_torch.py
def train(
self,
inputs: torch.Tensor,
targets: torch.Tensor,
**kwargs,
) -> None:
# TODO: should we pass loss function here? and do the backward prop?
if not self.init_params:
raise FedbiomedModelError(
f"{ErrorNumbers.FB622.value}. Training has not been initialized, please initialize it beforehand"
)
unflatten
unflatten(weights_vector, only_trainable=False, exclude_buffers=True)
Unflatten vectorized model weights using vector_to_parameters
This method does not manipulate current model weights modify model parameters.
Parameters:
Name | Type | Description | Default |
---|---|---|---|
weights_vector | List[float] | Vectorized model weights to convert dict | required |
only_trainable | bool | Whether to ignore non-trainable model parameters from outputs (e.g. frozen neural network layers' parameters), or include all model parameters (the default). | False |
exclude_buffers | bool | Whether to ignore buffers (the default), or include them. | True |
Returns:
Type | Description |
---|---|
Dict[str, Tensor] | Model dictionary |
Source code in fedbiomed/common/models/_torch.py
def unflatten(
self,
weights_vector: List[float],
only_trainable: bool = False,
exclude_buffers: bool = True
) -> Dict[str, torch.Tensor]:
"""Unflatten vectorized model weights using [`vector_to_parameters`][torch.nn.utils.vector_to_parameters]
This method does not manipulate current model weights modify model parameters.
Args:
weights_vector: Vectorized model weights to convert dict
only_trainable: Whether to ignore non-trainable model parameters
from outputs (e.g. frozen neural network layers' parameters),
or include all model parameters (the default).
exclude_buffers: Whether to ignore buffers (the default), or
include them.
Returns:
Model dictionary
"""
super().unflatten(weights_vector, only_trainable, exclude_buffers)
# Copy model to make sure global model parameters won't be overwritten
model = copy.deepcopy(self)
vector = torch.as_tensor(weights_vector).type(torch.DoubleTensor)
weights = model.get_weights(only_trainable=only_trainable, exclude_buffers=exclude_buffers)
# Following operation updates model parameters of copied model object
try:
torch.nn.utils.vector_to_parameters(vector, weights.values())
except TypeError as e:
FedbiomedModelError(
f"{ErrorNumbers.FB622.value} Can not unflatten model parameters. {e}"
)
return weights