Skip to content

DecisionTreeRegressor

Bases: Regressor, _DecisionTreeBase

Decision tree regression.

Parameters:

Name Type Description Default
max_depth int | None | Choice[int | None]

The maximum depth of each tree. If None, the depth is not limited. Has to be greater than 0.

None
min_sample_count_in_leaves int | Choice[int]

The minimum number of samples that must remain in the leaves of each tree. Has to be greater than 0.

5

Raises:

Type Description
OutOfBoundsError

If max_depth is less than 1.

OutOfBoundsError

If min_sample_count_in_leaves is less than 1.

Source code in src/safeds/ml/classical/regression/_decision_tree_regressor.py
class DecisionTreeRegressor(Regressor, _DecisionTreeBase):
    """
    Decision tree regression.

    Parameters
    ----------
    max_depth:
        The maximum depth of each tree. If None, the depth is not limited. Has to be greater than 0.
    min_sample_count_in_leaves:
        The minimum number of samples that must remain in the leaves of each tree. Has to be greater than 0.

    Raises
    ------
    OutOfBoundsError
        If `max_depth` is less than 1.
    OutOfBoundsError
        If `min_sample_count_in_leaves` is less than 1.
    """

    # ------------------------------------------------------------------------------------------------------------------
    # Dunder methods
    # ------------------------------------------------------------------------------------------------------------------

    def __init__(
        self,
        *,
        max_depth: int | None | Choice[int | None] = None,
        min_sample_count_in_leaves: int | Choice[int] = 5,
    ) -> None:
        # Initialize superclasses
        Regressor.__init__(self)
        _DecisionTreeBase.__init__(
            self,
            max_depth=max_depth,
            min_sample_count_in_leaves=min_sample_count_in_leaves,
        )

    def __hash__(self) -> int:
        return _structural_hash(
            Regressor.__hash__(self),
            _DecisionTreeBase.__hash__(self),
        )

    # ------------------------------------------------------------------------------------------------------------------
    # Template methods
    # ------------------------------------------------------------------------------------------------------------------

    def _clone(self) -> DecisionTreeRegressor:
        return DecisionTreeRegressor(
            max_depth=self._max_depth,
            min_sample_count_in_leaves=self._min_sample_count_in_leaves,
        )

    def _get_sklearn_model(self) -> RegressorMixin:
        from sklearn.tree import DecisionTreeRegressor as SklearnDecisionTreeRegressor

        return SklearnDecisionTreeRegressor(
            max_depth=self._max_depth,
            min_samples_leaf=self._min_sample_count_in_leaves,
        )

    def _check_additional_fit_preconditions(self) -> None:
        if isinstance(self._max_depth, Choice) or isinstance(self._min_sample_count_in_leaves, Choice):
            raise FittingWithChoiceError

    def _check_additional_fit_by_exhaustive_search_preconditions(self) -> None:
        if not isinstance(self._max_depth, Choice) and not isinstance(self._min_sample_count_in_leaves, Choice):
            raise FittingWithoutChoiceError

    def _get_models_for_all_choices(self) -> list[DecisionTreeRegressor]:
        max_depth_choices = self._max_depth if isinstance(self._max_depth, Choice) else [self._max_depth]
        min_sample_count_choices = (
            self._min_sample_count_in_leaves
            if isinstance(self._min_sample_count_in_leaves, Choice)
            else [self._min_sample_count_in_leaves]
        )

        models = []
        for md in max_depth_choices:
            for msc in min_sample_count_choices:
                models.append(DecisionTreeRegressor(max_depth=md, min_sample_count_in_leaves=msc))
        return models

    # ------------------------------------------------------------------------------------------------------------------
    # Plot
    # ------------------------------------------------------------------------------------------------------------------

    def plot(self) -> Image:
        """
        Get the image of the decision tree.

        Returns
        -------
        plot:
            The decision tree figure as an image.

        Raises
        ------
        ModelNotFittedError:
            If model is not fitted.
        """
        if not self.is_fitted:
            raise ModelNotFittedError

        from io import BytesIO

        import matplotlib.pyplot as plt
        from sklearn.tree import plot_tree

        plot_tree(self._wrapped_model)

        # save plot fig bytes in buffer
        with BytesIO() as buffer:
            plt.savefig(buffer)
            image = buffer.getvalue()

        # prevent forced plot from sklearn showing
        plt.close()

        return Image.from_bytes(image)

is_fitted: bool

Whether the model is fitted.

max_depth: int | None | Choice[int | None]

The maximum depth of the tree.

min_sample_count_in_leaves: int | Choice[int]

The minimum number of samples that must remain in the leaves of the tree.

coefficient_of_determination

Compute the coefficient of determination (R²) of the regressor on the given data.

The coefficient of determination compares the regressor's predictions to another model that always predicts the mean of the target values. It is a measure of how well the regressor explains the variance in the target values.

The higher the coefficient of determination, the better the regressor. Results range from negative infinity to 1.0. You can interpret the coefficient of determination as follows:

R² Interpretation
1.0 The model perfectly predicts the target values. Did you overfit?
(0.0, 1.0) The model is better than predicting the mean of the target values. You should be here.
0.0 The model is as good as predicting the mean of the target values. Try something else.
(-∞, 0.0) The model is worse than predicting the mean of the target values. Something is very wrong.

Notes:

  • The model must be fitted.
  • Some other libraries call this metric r2_score.

Parameters:

Name Type Description Default
validation_or_test_set Table | TabularDataset

The validation or test set.

required

Returns:

Name Type Description
coefficient_of_determination float

The coefficient of determination of the regressor.

Raises:

Type Description
ModelNotFittedError

If the classifier has not been fitted yet.

Source code in src/safeds/ml/classical/regression/_regressor.py
def coefficient_of_determination(self, validation_or_test_set: Table | TabularDataset) -> float:
    """
    Compute the coefficient of determination (R²) of the regressor on the given data.

    The coefficient of determination compares the regressor's predictions to another model that always predicts the
    mean of the target values. It is a measure of how well the regressor explains the variance in the target values.

    The **higher** the coefficient of determination, the better the regressor. Results range from negative infinity
    to 1.0. You can interpret the coefficient of determination as follows:

    | R²         | Interpretation                                                                             |
    | ---------- | ------------------------------------------------------------------------------------------ |
    | 1.0        | The model perfectly predicts the target values. Did you overfit?                           |
    | (0.0, 1.0) | The model is better than predicting the mean of the target values. You should be here.     |
    | 0.0        | The model is as good as predicting the mean of the target values. Try something else.      |
    | (-∞, 0.0)  | The model is worse than predicting the mean of the target values. Something is very wrong. |

    **Notes:**

    - The model must be fitted.
    - Some other libraries call this metric `r2_score`.

    Parameters
    ----------
    validation_or_test_set:
        The validation or test set.

    Returns
    -------
    coefficient_of_determination:
        The coefficient of determination of the regressor.

    Raises
    ------
    ModelNotFittedError
        If the classifier has not been fitted yet.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    validation_or_test_set = _extract_table(validation_or_test_set)

    return RegressionMetrics.coefficient_of_determination(
        self.predict(validation_or_test_set),
        validation_or_test_set.get_column(self.get_target_name()),
    )

fit

Create a copy of this model and fit it with the given training data.

Note: This model is not modified.

Parameters:

Name Type Description Default
training_set TabularDataset

The training data containing the features and target.

required

Returns:

Name Type Description
fitted_model Self

The fitted model.

Raises:

Type Description
PlainTableError

If a table is passed instead of a TabularDataset.

DatasetMissesDataError

If the given training set contains no data.

FittingWithChoiceError

When trying to call this method on a model with hyperparameter choices.

LearningError

If the training data contains invalid values or if the training failed.

Source code in src/safeds/ml/classical/_supervised_model.py
def fit(self, training_set: TabularDataset) -> Self:
    """
    Create a copy of this model and fit it with the given training data.

    **Note:** This model is not modified.

    Parameters
    ----------
    training_set:
        The training data containing the features and target.

    Returns
    -------
    fitted_model:
        The fitted model.

    Raises
    ------
    PlainTableError
        If a table is passed instead of a TabularDataset.
    DatasetMissesDataError
        If the given training set contains no data.
    FittingWithChoiceError
        When trying to call this method on a model with hyperparameter choices.
    LearningError
        If the training data contains invalid values or if the training failed.
    """
    if not isinstance(training_set, TabularDataset) and isinstance(training_set, Table):
        raise PlainTableError
    if training_set.to_table().row_count == 0:
        raise DatasetMissesDataError

    self._check_additional_fit_preconditions()
    self._check_more_additional_fit_preconditions(training_set)

    wrapped_model = self._get_sklearn_model()
    _fit_sklearn_model_in_place(wrapped_model, training_set)

    result = self._clone()
    result._feature_schema = training_set.features.schema
    result._target_name = training_set.target.name
    result._target_type = training_set.target.type
    result._wrapped_model = wrapped_model

    return result

Use the hyperparameter choices to create multiple models and fit them.

Note: This model is not modified.

Parameters:

Name Type Description Default
training_set TabularDataset

The training data containing the features and target.

required
optimization_metric RegressorMetric

The metric that should be used for determining the performance of a model.

required

Returns:

Name Type Description
best_model Self

The model that performed the best out of all possible models given the Choices of hyperparameters.

Raises:

Type Description
PlainTableError

If a table is passed instead of a TabularDataset.

DatasetMissesDataError

If the given training set contains no data.

FittingWithoutChoiceError

When trying to call this method on a model without hyperparameter choices.

LearningError

If the training data contains invalid values or if the training failed.

Source code in src/safeds/ml/classical/regression/_regressor.py
def fit_by_exhaustive_search(self, training_set: TabularDataset, optimization_metric: RegressorMetric) -> Self:
    """
    Use the hyperparameter choices to create multiple models and fit them.

    **Note:** This model is not modified.

    Parameters
    ----------
    training_set:
        The training data containing the features and target.
    optimization_metric:
        The metric that should be used for determining the performance of a model.

    Returns
    -------
    best_model:
        The model that performed the best out of all possible models given the Choices of hyperparameters.

    Raises
    ------
    PlainTableError
        If a table is passed instead of a TabularDataset.
    DatasetMissesDataError
        If the given training set contains no data.
    FittingWithoutChoiceError
        When trying to call this method on a model without hyperparameter choices.
    LearningError
        If the training data contains invalid values or if the training failed.
    """
    if training_set.to_table().row_count == 0:
        raise DatasetMissesDataError

    self._check_additional_fit_by_exhaustive_search_preconditions()

    [train_split, test_split] = training_set.to_table().split_rows(0.75)
    train_data = train_split.to_tabular_dataset(
        target_name=training_set.target.name,
        extra_names=training_set.extras.column_names,
    )
    test_data = test_split.to_tabular_dataset(
        target_name=training_set.target.name,
        extra_names=training_set.extras.column_names,
    )

    list_of_models = self._get_models_for_all_choices()
    list_of_fitted_models = []

    with ProcessPoolExecutor(max_workers=len(list_of_models), mp_context=mp.get_context("spawn")) as executor:
        futures = []
        for model in list_of_models:
            futures.append(executor.submit(model.fit, train_data))
        [done, _] = wait(futures, return_when=ALL_COMPLETED)
        for future in done:
            list_of_fitted_models.append(future.result())
    executor.shutdown()

    best_model = None
    best_metric_value = None
    for fitted_model in list_of_fitted_models:
        if best_model is None:
            best_model = fitted_model
            match optimization_metric.value:
                case "mean_squared_error":
                    best_metric_value = fitted_model.mean_squared_error(test_data)
                case "mean_absolute_error":
                    best_metric_value = fitted_model.mean_absolute_error(test_data)
                case "median_absolute_deviation":
                    best_metric_value = fitted_model.median_absolute_deviation(test_data)
                case "coefficient_of_determination":
                    best_metric_value = fitted_model.coefficient_of_determination(test_data)
        else:
            match optimization_metric.value:
                case "mean_squared_error":
                    error_of_fitted_model = fitted_model.mean_squared_error(test_data)
                    if error_of_fitted_model < best_metric_value:
                        best_model = fitted_model  # pragma: no cover
                        best_metric_value = error_of_fitted_model  # pragma: no cover
                case "mean_absolute_error":
                    error_of_fitted_model = fitted_model.mean_absolute_error(test_data)
                    if error_of_fitted_model < best_metric_value:
                        best_model = fitted_model  # pragma: no cover
                        best_metric_value = error_of_fitted_model  # pragma: no cover
                case "median_absolute_deviation":
                    error_of_fitted_model = fitted_model.median_absolute_deviation(test_data)
                    if error_of_fitted_model < best_metric_value:
                        best_model = fitted_model  # pragma: no cover
                        best_metric_value = error_of_fitted_model  # pragma: no cover
                case "coefficient_of_determination":
                    error_of_fitted_model = fitted_model.coefficient_of_determination(test_data)
                    if error_of_fitted_model > best_metric_value:
                        best_model = fitted_model  # pragma: no cover
                        best_metric_value = error_of_fitted_model  # pragma: no cover
    assert best_model is not None
    return best_model

get_feature_names

Return the names of the feature columns.

Note: The model must be fitted.

Returns:

Name Type Description
feature_names list[str]

The names of the feature columns.

Raises:

Type Description
ModelNotFittedError

If the model has not been fitted yet.

Source code in src/safeds/ml/classical/_supervised_model.py
def get_feature_names(self) -> list[str]:
    """
    Return the names of the feature columns.

    **Note:** The model must be fitted.

    Returns
    -------
    feature_names:
        The names of the feature columns.

    Raises
    ------
    ModelNotFittedError
        If the model has not been fitted yet.
    """
    # Used in favor of is_fitted, so the type checker is happy
    if self._feature_schema is None:
        raise ModelNotFittedError

    return self._feature_schema.column_names

get_features_schema

Return the schema of the feature columns.

Note: The model must be fitted.

Returns:

Name Type Description
feature_schema Schema

The schema of the feature columns.

Raises:

Type Description
ModelNotFittedError

If the model has not been fitted yet.

Source code in src/safeds/ml/classical/_supervised_model.py
def get_features_schema(self) -> Schema:
    """
    Return the schema of the feature columns.

    **Note:** The model must be fitted.

    Returns
    -------
    feature_schema:
        The schema of the feature columns.

    Raises
    ------
    ModelNotFittedError
        If the model has not been fitted yet.
    """
    # Used in favor of is_fitted, so the type checker is happy
    if self._feature_schema is None:
        raise ModelNotFittedError

    return self._feature_schema

get_target_name

Return the name of the target column.

Note: The model must be fitted.

Returns:

Name Type Description
target_name str

The name of the target column.

Raises:

Type Description
ModelNotFittedError

If the model has not been fitted yet.

Source code in src/safeds/ml/classical/_supervised_model.py
def get_target_name(self) -> str:
    """
    Return the name of the target column.

    **Note:** The model must be fitted.

    Returns
    -------
    target_name:
        The name of the target column.

    Raises
    ------
    ModelNotFittedError
        If the model has not been fitted yet.
    """
    # Used in favor of is_fitted, so the type checker is happy
    if self._target_name is None:
        raise ModelNotFittedError

    return self._target_name

get_target_type

Return the type of the target column.

Note: The model must be fitted.

Returns:

Name Type Description
target_type DataType

The type of the target column.

Raises:

Type Description
ModelNotFittedError

If the model has not been fitted yet.

Source code in src/safeds/ml/classical/_supervised_model.py
def get_target_type(self) -> DataType:
    """
    Return the type of the target column.

    **Note:** The model must be fitted.

    Returns
    -------
    target_type:
        The type of the target column.

    Raises
    ------
    ModelNotFittedError
        If the model has not been fitted yet.
    """
    # Used in favor of is_fitted, so the type checker is happy
    if self._target_type is None:
        raise ModelNotFittedError

    return self._target_type

mean_absolute_error

Compute the mean absolute error (MAE) of the regressor on the given data.

The mean absolute error is the average of the absolute differences between the predicted and expected target values. The lower the mean absolute error, the better the regressor. Results range from 0.0 to positive infinity.

Note: The model must be fitted.

Parameters:

Name Type Description Default
validation_or_test_set Table | TabularDataset

The validation or test set.

required

Returns:

Name Type Description
mean_absolute_error float

The mean absolute error of the regressor.

Raises:

Type Description
ModelNotFittedError

If the classifier has not been fitted yet.

Source code in src/safeds/ml/classical/regression/_regressor.py
def mean_absolute_error(self, validation_or_test_set: Table | TabularDataset) -> float:
    """
    Compute the mean absolute error (MAE) of the regressor on the given data.

    The mean absolute error is the average of the absolute differences between the predicted and expected target
    values. The **lower** the mean absolute error, the better the regressor. Results range from 0.0 to positive
    infinity.

    **Note:** The model must be fitted.

    Parameters
    ----------
    validation_or_test_set:
        The validation or test set.

    Returns
    -------
    mean_absolute_error:
        The mean absolute error of the regressor.

    Raises
    ------
    ModelNotFittedError
        If the classifier has not been fitted yet.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    validation_or_test_set = _extract_table(validation_or_test_set)

    return RegressionMetrics.mean_absolute_error(
        self.predict(validation_or_test_set),
        validation_or_test_set.get_column(self.get_target_name()),
    )

mean_directional_accuracy

Compute the mean directional accuracy (MDA) of the regressor on the given data.

This metric compares two consecutive target values and checks if the predicted direction (down/unchanged/up) matches the expected direction. The mean directional accuracy is the proportion of correctly predicted directions. The higher the mean directional accuracy, the better the regressor. Results range from 0.0 to 1.0.

This metric is useful for time series data, where the order of the target values has a meaning. It is not useful for other types of data. Because of this, it is not included in the summarize_metrics method.

Note: The model must be fitted.

Parameters:

Name Type Description Default
validation_or_test_set Table | TabularDataset

The validation or test set.

required

Returns:

Name Type Description
mean_directional_accuracy float

The mean directional accuracy of the regressor.

Raises:

Type Description
ModelNotFittedError

If the classifier has not been fitted yet.

Source code in src/safeds/ml/classical/regression/_regressor.py
def mean_directional_accuracy(self, validation_or_test_set: Table | TabularDataset) -> float:
    """
    Compute the mean directional accuracy (MDA) of the regressor on the given data.

    This metric compares two consecutive target values and checks if the predicted direction (down/unchanged/up)
    matches the expected direction. The mean directional accuracy is the proportion of correctly predicted
    directions. The **higher** the mean directional accuracy, the better the regressor. Results range from 0.0 to
    1.0.

    This metric is useful for time series data, where the order of the target values has a meaning. It is not useful
    for other types of data. Because of this, it is not included in the `summarize_metrics` method.

    **Note:** The model must be fitted.

    Parameters
    ----------
    validation_or_test_set:
        The validation or test set.

    Returns
    -------
    mean_directional_accuracy:
        The mean directional accuracy of the regressor.

    Raises
    ------
    ModelNotFittedError
        If the classifier has not been fitted yet.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    validation_or_test_set = _extract_table(validation_or_test_set)

    return RegressionMetrics.mean_directional_accuracy(
        self.predict(validation_or_test_set),
        validation_or_test_set.get_column(self.get_target_name()),
    )

mean_squared_error

Compute the mean squared error (MSE) of the regressor on the given data.

The mean squared error is the average of the squared differences between the predicted and expected target values. The lower the mean squared error, the better the regressor. Results range from 0.0 to positive infinity.

NoteS:

  • The model must be fitted.
  • To get the root mean squared error (RMSE), take the square root of the result.

Parameters:

Name Type Description Default
validation_or_test_set Table | TabularDataset

The validation or test set.

required

Returns:

Name Type Description
mean_squared_error float

The mean squared error of the regressor.

Raises:

Type Description
ModelNotFittedError

If the classifier has not been fitted yet.

Source code in src/safeds/ml/classical/regression/_regressor.py
def mean_squared_error(self, validation_or_test_set: Table | TabularDataset) -> float:
    """
    Compute the mean squared error (MSE) of the regressor on the given data.

    The mean squared error is the average of the squared differences between the predicted and expected target
    values. The **lower** the mean squared error, the better the regressor. Results range from 0.0 to positive
    infinity.

    **NoteS:**

    - The model must be fitted.
    - To get the root mean squared error (RMSE), take the square root of the result.

    Parameters
    ----------
    validation_or_test_set:
        The validation or test set.

    Returns
    -------
    mean_squared_error:
        The mean squared error of the regressor.

    Raises
    ------
    ModelNotFittedError
        If the classifier has not been fitted yet.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    validation_or_test_set = _extract_table(validation_or_test_set)

    return RegressionMetrics.mean_squared_error(
        self.predict(validation_or_test_set),
        validation_or_test_set.get_column(self.get_target_name()),
    )

median_absolute_deviation

Compute the median absolute deviation (MAD) of the regressor on the given data.

The median absolute deviation is the median of the absolute differences between the predicted and expected target values. The lower the median absolute deviation, the better the regressor. Results range from 0.0 to positive infinity.

Note: The model must be fitted.

Parameters:

Name Type Description Default
validation_or_test_set Table | TabularDataset

The validation or test set.

required

Returns:

Name Type Description
median_absolute_deviation float

The median absolute deviation of the regressor.

Raises:

Type Description
ModelNotFittedError

If the classifier has not been fitted yet.

Source code in src/safeds/ml/classical/regression/_regressor.py
def median_absolute_deviation(self, validation_or_test_set: Table | TabularDataset) -> float:
    """
    Compute the median absolute deviation (MAD) of the regressor on the given data.

    The median absolute deviation is the median of the absolute differences between the predicted and expected
    target values. The **lower** the median absolute deviation, the better the regressor. Results range from 0.0 to
    positive infinity.

    **Note:** The model must be fitted.

    Parameters
    ----------
    validation_or_test_set:
        The validation or test set.

    Returns
    -------
    median_absolute_deviation:
        The median absolute deviation of the regressor.

    Raises
    ------
    ModelNotFittedError
        If the classifier has not been fitted yet.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    validation_or_test_set = _extract_table(validation_or_test_set)

    return RegressionMetrics.median_absolute_deviation(
        self.predict(validation_or_test_set),
        validation_or_test_set.get_column(self.get_target_name()),
    )

plot

Get the image of the decision tree.

Returns:

Name Type Description
plot Image

The decision tree figure as an image.

Raises:

Type Description
ModelNotFittedError:

If model is not fitted.

Source code in src/safeds/ml/classical/regression/_decision_tree_regressor.py
def plot(self) -> Image:
    """
    Get the image of the decision tree.

    Returns
    -------
    plot:
        The decision tree figure as an image.

    Raises
    ------
    ModelNotFittedError:
        If model is not fitted.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    from io import BytesIO

    import matplotlib.pyplot as plt
    from sklearn.tree import plot_tree

    plot_tree(self._wrapped_model)

    # save plot fig bytes in buffer
    with BytesIO() as buffer:
        plt.savefig(buffer)
        image = buffer.getvalue()

    # prevent forced plot from sklearn showing
    plt.close()

    return Image.from_bytes(image)

predict

Predict the target values on the given dataset.

Note: The model must be fitted.

Parameters:

Name Type Description Default
dataset Table | TabularDataset

The dataset containing at least the features.

required

Returns:

Name Type Description
prediction TabularDataset

The given dataset with an additional column for the predicted target values.

Raises:

Type Description
ModelNotFittedError

If the model has not been fitted yet.

DatasetMissesFeaturesError

If the dataset misses feature columns.

PredictionError

If predicting with the given dataset failed.

Source code in src/safeds/ml/classical/_supervised_model.py
def predict(
    self,
    dataset: Table | TabularDataset,
) -> TabularDataset:
    """
    Predict the target values on the given dataset.

    **Note:** The model must be fitted.

    Parameters
    ----------
    dataset:
        The dataset containing at least the features.

    Returns
    -------
    prediction:
        The given dataset with an additional column for the predicted target values.

    Raises
    ------
    ModelNotFittedError
        If the model has not been fitted yet.
    DatasetMissesFeaturesError
        If the dataset misses feature columns.
    PredictionError
        If predicting with the given dataset failed.
    """
    self._check_additional_predict_preconditions(dataset)

    return _predict_with_sklearn_model(
        self._wrapped_model,
        dataset,
        self.get_feature_names(),
        self.get_target_name(),
    )

summarize_metrics

Summarize the regressor's metrics on the given data.

Note: The model must be fitted.

Parameters:

Name Type Description Default
validation_or_test_set Table | TabularDataset

The validation or test set.

required

Returns:

Name Type Description
metrics Table

A table containing the regressor's metrics.

Raises:

Type Description
ModelNotFittedError

If the classifier has not been fitted yet.

Source code in src/safeds/ml/classical/regression/_regressor.py
def summarize_metrics(self, validation_or_test_set: Table | TabularDataset) -> Table:
    """
    Summarize the regressor's metrics on the given data.

    **Note:** The model must be fitted.

    Parameters
    ----------
    validation_or_test_set:
        The validation or test set.

    Returns
    -------
    metrics:
        A table containing the regressor's metrics.

    Raises
    ------
    ModelNotFittedError
        If the classifier has not been fitted yet.
    """
    if not self.is_fitted:
        raise ModelNotFittedError

    validation_or_test_set = _extract_table(validation_or_test_set)

    return RegressionMetrics.summarize(
        self.predict(validation_or_test_set),
        validation_or_test_set.get_column(self.get_target_name()),
    )