diff --git a/models/__init__.py b/models/__init__.py
index 63e7476566a3bededd86feaa540e5bf721594cf5..91b13a59336a53b18cf26ba1e58d2700616e34cb 100644
--- a/models/__init__.py
+++ b/models/__init__.py
@@ -6,12 +6,14 @@ from .dayabay_v0 import model_dayabay_v0
 from .dayabay_v0b import model_dayabay_v0b
 from .dayabay_v0c import model_dayabay_v0c
 from .dayabay_v0d import model_dayabay_v0d
+from .dayabay_v0e import model_dayabay_v0e
 
 _dayabay_models = {
     "v0": model_dayabay_v0,
     "v0b": model_dayabay_v0b,
     "v0c": model_dayabay_v0c,
     "v0d": model_dayabay_v0d,
+    "v0e": model_dayabay_v0e,
 }
 
 _available_sources = ("tsv", "hdf5", "root", "npz")
diff --git a/models/dayabay_v0e.py b/models/dayabay_v0e.py
new file mode 100644
index 0000000000000000000000000000000000000000..23b975d87603ebe38977e68c3d2c79eead1e5b2e
--- /dev/null
+++ b/models/dayabay_v0e.py
@@ -0,0 +1,2614 @@
+from __future__ import annotations
+
+from collections.abc import Collection, Mapping, Sequence
+from itertools import product
+from os.path import relpath
+from pathlib import Path
+from typing import TYPE_CHECKING, Literal, get_args
+
+from numpy import ndarray
+from numpy.random import Generator
+from pandas import DataFrame
+
+from dagflow.bundles.file_reader import FileReader
+from dagflow.bundles.load_array import load_array
+from dagflow.bundles.load_graph import load_graph, load_graph_data
+from dagflow.bundles.load_parameters import load_parameters
+from dagflow.core import Graph, NodeStorage
+from dagflow.tools.logger import logger
+from dagflow.tools.schema import LoadYaml
+from multikeydict.nestedmkdict import NestedMKDict
+
+if TYPE_CHECKING:
+    from dagflow.core.meta_node import MetaNode
+
+FutureType = Literal[
+    "reactor-28days",  # merge reactor data, each 4 weeks
+    "reactor-35days",  # merge reactor data, each 5 weeks
+]
+_future_redundant = ["reactor-35days"]
+_future_included = {}
+
+# Define a dictionary of groups of nuisance parameters in a format `name: path`,
+# where path denotes the location of the parameters in the storage.
+_SYSTEMATIC_UNCERTAINTIES_GROUPS = {
+    "oscprob": "oscprob",
+    "eres": "detector.eres",
+    "lsnl": "detector.lsnl_scale_a",
+    "iav": "detector.iav_offdiag_scale_factor",
+    "detector_relative": "detector.detector_relative",
+    "energy_per_fission": "reactor.energy_per_fission",
+    "nominal_thermal_power": "reactor.nominal_thermal_power",
+    "snf": "reactor.snf_scale",
+    "neq": "reactor.nonequilibrium_scale",
+    "fission_fraction": "reactor.fission_fraction_scale",
+    "bkg_rate": "bkg",
+    "hm_corr": "reactor_anue.spectrum_uncertainty.corr",
+    "hm_uncorr": "reactor_anue.spectrum_uncertainty.uncorr",
+}
+
+
+class model_dayabay_v0e:
+    """The Daya Bay analysis implementation version v0e.
+
+    Purpose:
+        - copy of model v0d with removed old options
+
+    Attributes
+    ----------
+    storage : NodeStorage
+        nested dictionary with model elements: nodes, parameters, etc.
+
+    graph : Graph
+        graph instance
+
+    index : dict[str, tuple[str, ...]]
+        dictionary with all possible names for replicated items, e.g.
+        "detector": ("AD11", "AD12", ...); reactor: ("DB1", ...); ...
+        index is setup within the model
+
+    combinations : dict[str, tuple[tuple[str, ...], ...]]
+        lists of all combinations of values of 1 and more indices,
+        e.g. detector, detector/period, reator/isotope, reactor/isotope/period, etc.
+
+    spectrum_correction_mode : str, default="exponential"
+        mode of how the parameters of the free spectrum model
+        are treated:
+            - "exponential": p岬�=0 by default, S(E岬�) is
+              multiplied by exp(p岬�) the correction is always
+              positive, but nonlinear
+            - "linear": p岬�=0 by default, S(E岬�) is multiplied by
+              1+p岬� the correction may be negative, but is always
+              linear
+
+    concatenation_mode : str, default="detector_period"
+        choses the observation to be analyzed:
+            - "detector_period" - concatenation of observations at
+              each detector at each period
+            - "detector" - concatenation of observations at each
+              detector (combined for all period)
+
+    monte_carlo_mode : str, default="asimov"
+        the Monte-Carlo mode for pseudo-data:
+            - "asimov" - Asimov, no fluctuations
+            - "normal-stats" - normal fluctuations with statistical
+              errors
+            - "poisson" - Poisson fluctuations
+
+    path_data : Path
+        path to the data
+
+    source_type : str, default="npz"
+        type of the data to read ("tsv", "hdf5", "root" or "npz")
+
+    Technical attributes
+    --------------------
+    _strict : bool, default=True
+        strict mode. Stop execution if:
+            - the model is not complete
+            - any labels were not applied
+
+    _close : bool, default=True
+        if True the graph is closed and memory is allocated
+        may be used to debug corrupt model
+
+    _random_generator : Generator
+        numpy random generator to be used for ToyMC
+
+    _covariance_matrix : MetaNode
+        covariance matrix, computed on this model
+
+    _frozen_nodes : dict[str, tuple]
+        storage with nodes, which are being fixed at their values and
+        require manual intervention in order to be recalculated
+    """
+
+    __slots__ = (
+        "storage",
+        "graph",
+        "index",
+        "combinations",
+        "path_data",
+        "spectrum_correction_mode",
+        "concatenation_mode",
+        "monte_carlo_mode",
+        "source_type",
+        "_strict",
+        "_close",
+        "_future",
+        "_covariance_matrix",
+        "_frozen_nodes",
+        "_random_generator",
+    )
+
+    storage: NodeStorage
+    graph: Graph
+    index: dict[str, tuple[str, ...]]
+    combinations: dict[str, tuple[tuple[str, ...], ...]]
+    path_data: Path
+    spectrum_correction_mode: Literal["linear", "exponential"]
+    concatenation_mode: Literal["detector", "detector_period"]
+    monte_carlo_mode: Literal["asimov", "normal-stats", "poisson"]
+    source_type: Literal["tsv", "hdf5", "root", "npz"]
+    _strict: bool
+    _close: bool
+    _random_generator: Generator
+    _future: set[FutureType]
+    _covariance_matrix: MetaNode
+    _frozen_nodes: dict[str, tuple]
+
+    def __init__(
+        self,
+        *,
+        source_type: Literal["tsv", "hdf5", "root", "npz"] = "npz",
+        strict: bool = True,
+        close: bool = True,
+        override_indices: Mapping[str, Sequence[str]] = {},
+        spectrum_correction_mode: Literal["linear", "exponential"] = "exponential",
+        seed: int = 0,
+        monte_carlo_mode: Literal["asimov", "normal-stats", "poisson"] = "asimov",
+        concatenation_mode: Literal["detector", "detector_period"] = "detector_period",
+        parameter_values: dict[str, float | str] = {},
+        future: Collection[FutureType] = set(),
+    ):
+        """Model initialization.
+
+        Parameters
+        ----------
+        seed: int
+              random seed to be passed to random generator for ToyMC
+        override_indices : dict[str, Sequence[str]]
+                           dictionary with indices to override self.index.
+                           may be used to reduce the number of detectors or reactors in the
+                           model
+
+        for the dscription of other parameters, see description of the class.
+        """
+        self._strict = strict
+        self._close = close
+
+        self.storage = NodeStorage()
+        self.path_data = Path("data/dayabay-v0d")  # TODO: update
+        self.source_type = source_type
+        self.spectrum_correction_mode = spectrum_correction_mode
+        self.concatenation_mode = concatenation_mode
+        self.monte_carlo_mode = monte_carlo_mode
+        self._random_generator = self._create_random_generator(seed)
+
+        self._future = set(future)
+        future_variants = set(get_args(FutureType))
+        assert all(f in future_variants for f in self._future)
+        if "all" in self._future:
+            self._future = future_variants
+            for ft in _future_redundant:
+                self._future.remove(ft)  # pyright: ignore [reportArgumentType]
+            for ft in self._future:
+                if not (extra := _future_included.get(ft)):
+                    continue
+                self._future.update(extra)  # pyright: ignore [reportArgumentType]
+        if self._future:
+            logger.info(f"Future options: {', '.join(self._future)}")
+        self._frozen_nodes = {}
+
+        self.combinations = {}
+
+        override_indices = {k: tuple(v) for k, v in override_indices.items()}
+        self.build(override_indices)
+
+        if parameter_values:
+            self.set_parameters(parameter_values)
+
+    def build(self, override_indices: dict[str, tuple[str, ...]] = {}):
+        """Actually build the model.
+
+        Steps:
+            - initialize indices to describe repeated components
+            - read parameters
+            - block by block initialize the nodes of the model and connect them
+                - read the data whenever necessary
+
+        Parameters
+        ----------
+        override_indices : dict[str, tuple[str, ...]]
+                           dictionary with indices to override self.index.
+                           may be used to reduce the number of detectors or reactors in the
+                           model
+        """
+        #
+        # Import necessary nodes and loaders
+        #
+        from numpy import arange, concatenate, linspace, ones
+
+        from dagflow.bundles.load_hist import load_hist
+        from dagflow.bundles.load_record import load_record_data
+        from dagflow.bundles.make_y_parameters_for_x import make_y_parameters_for_x
+        from dagflow.lib.arithmetic import Division, Product, ProductShiftedScaled, Sum
+        from dagflow.lib.common import Array, Concatenation, Proxy, View
+        from dagflow.lib.exponential import Exp
+        from dagflow.lib.integration import Integrator
+        from dagflow.lib.interpolation import Interpolator
+        from dagflow.lib.linalg import Cholesky, VectorMatrixProduct
+        from dagflow.lib.normalization import RenormalizeDiag
+        from dagflow.lib.parameters import ParArrayInput
+        from dagflow.lib.statistics import CovarianceMatrixGroup, LogProdDiag
+        from dagflow.lib.summation import ArraySum, SumMatOrDiag
+        from dagflow.tools.schema import LoadPy
+        from dgf_detector import (
+            AxisDistortionMatrix,
+            EnergyResolution,
+            Monotonize,
+            Rebin,
+        )
+        from dgf_detector.bundles.refine_lsnl_data import refine_lsnl_data
+        from dgf_reactoranueosc import (
+            IBDXsecVBO1Group,
+            InverseSquareLaw,
+            NueSurvivalProbability,
+        )
+        from dgf_statistics import Chi2, CNPStat, MonteCarlo
+        from models.bundles.refine_detector_data import refine_detector_data
+        from models.bundles.refine_reactor_data import refine_reactor_data
+        from models.bundles.sync_reactor_detector_data import sync_reactor_detector_data
+        from multikeydict.tools import remap_items
+
+        # Initialize the storage and paths
+        storage = self.storage
+        path_data = self.path_data
+
+        path_parameters = path_data / "parameters"
+        path_arrays = path_data / self.source_type
+
+        # Read E谓 edges for the parametrization of free antineutrino spectrum model
+        # Loads the python file and returns variable "edges", which should be defined
+        # in the file and has type `ndarray`.
+        antineutrino_model_edges = LoadPy(
+            path_parameters / "reactor_antineutrino_spectrum_edges.py",
+            variable="edges",
+            type=ndarray,
+        )
+
+        # Provide some convenience substitutions for labels
+        index_names = {
+            "U235": "虏鲁鈦礥",
+            "U238": "虏鲁鈦窾",
+            "Pu239": "虏鲁鈦筆u",
+            "Pu241": "虏鈦绰筆u",
+        }
+        site_arrangement = {
+            "EH1": ("AD11", "AD12"),
+            "EH2": ("AD21", "AD22"),
+            "EH3": ("AD31", "AD32", "AD33", "AD34"),
+        }
+
+        #
+        # Provide indices, names and lists of values in order to work with repeated
+        # items
+        #
+        index = self.index = {
+            # Data acquisition period
+            "period": ("6AD", "8AD", "7AD"),
+            # Detector names
+            "detector": (
+                "AD11",
+                "AD12",
+                "AD21",
+                "AD22",
+                "AD31",
+                "AD32",
+                "AD33",
+                "AD34",
+            ),
+            # Source of background events:
+            #     - acc: accidental coincidences
+            #     - lihe: 鈦筁i and 鈦窰e related events
+            #     - fastn: fast neutrons and muon-x background
+            #     - amc: 虏鈦绰笰m鹿鲁C calibration source related background
+            #     - alphan: 鹿鲁C(伪,n)鹿鈦禣 background
+            "bkg": ("acc", "lihe", "fastn", "amc", "alphan"),
+            "bkg_stable": ("lihe", "fastn", "amc", "alphan"),  # TODO: doc
+            "bkg_site_correlated": ("lihe", "fastn"),  # TODO: doc
+            "bkg_not_site_correlated": ("acc", "amc", "alphan"),  # TODO: doc
+            "bkg_not_correlated": ("acc", "alphan"),  # TODO: doc
+            # Experimental sites
+            "site": ("EH1", "EH2", "EH3"),
+            # Fissile isotopes
+            "isotope": ("U235", "U238", "Pu239", "Pu241"),
+            # Fissile isotopes, which spectrum requires Non-Equilibrium correction to be
+            # applied
+            "isotope_neq": ("U235", "Pu239", "Pu241"),
+            # Nuclear reactors
+            "reactor": ("DB1", "DB2", "LA1", "LA2", "LA3", "LA4"),
+            # Sources of antineutrinos:
+            #     - "nu_main": for antineutrinos from reactor cores with no
+            #                  Non-Equilibrium correction applied
+            #     - "nu_neq": antineutrinos from Non-Equilibrium correction
+            #     - "nu_snf": antineutrinos from Spent Nuclear Fuel
+            "anue_source": ("nu_main", "nu_neq", "nu_snf"),
+            # Model related antineutrino spectrum correction type:
+            #     - uncorrelated
+            #     - correlated
+            "anue_unc": ("uncorr", "corr"),
+            # Part of the Liquid scintillator non-linearity (LSNL) parametrization
+            "lsnl": ("nominal", "pull0", "pull1", "pull2", "pull3"),
+            # Nuisance related part of the Liquid scintillator non-linearity (LSNL)
+            # parametrization
+            "lsnl_nuisance": ("pull0", "pull1", "pull2", "pull3"),
+            # Free antineutrino spectrum parameter names: one parameter for each edge
+            # from `antineutrino_model_edges`
+            "spec": tuple(
+                f"spec_scale_{i:02d}" for i in range(len(antineutrino_model_edges))
+            ),
+        }
+
+        # NOTE: keep it this way as dataset B may not have it separated
+        index["bkg"] = index["bkg"] + ("muonx",)
+        index["bkg_stable"] = index["bkg_stable"] + ("muonx",)
+        index["bkg_site_correlated"] = index["bkg_site_correlated"] + ("muonx",)
+
+        # Define isotope names in lower case
+        index["isotope_lower"] = tuple(isotope.lower() for isotope in index["isotope"])
+
+        # Optionally override (reduce) indices
+        index.update(override_indices)
+
+        # Check there are now overlaps
+        index_all = (
+            index["isotope"] + index["detector"] + index["reactor"] + index["period"]
+        )
+        set_all = set(index_all)
+        if len(index_all) != len(set_all):
+            raise RuntimeError("Repeated indices")
+
+        # Collection combinations between 2 and more indices. Ensure some combinations,
+        # e.g. detectors not present at certain periods, are excluded.
+        # For example, combinations["reactor.detector"] contains:
+        # (("DB1", "AD11"), ("DB1", "AD12"), ..., ("DB2", "AD11"), ...)
+        #
+        # The dictionary combinations is one of the main elements to loop over and match
+        # parts of the computational graph
+        inactive_detectors = ({"6AD", "AD22"}, {"6AD", "AD34"}, {"7AD", "AD11"})
+        inactive_backgrounds = (
+            {"6AD", "muonx"},
+            {"8AD", "muonx"},
+            {"AD11", "muonx"},
+        )  # TODO: doc
+        inactive_combinations = inactive_detectors + inactive_backgrounds
+        required_combinations = tuple(index.keys()) + (
+            "reactor.detector",
+            "reactor.isotope",
+            "reactor.isotope_neq",
+            "reactor.period",
+            "reactor.isotope.period",
+            "reactor.isotope.detector",
+            "reactor.isotope_neq.detector",
+            "reactor.isotope.detector.period",
+            "reactor.isotope_neq.detector.period",
+            "reactor.detector.period",
+            "detector.period",
+            "site.period",
+            "period.detector",
+            "anue_unc.isotope",
+            "bkg.detector",
+            "bkg_stable.detector",
+            "bkg.detector.period",
+            "bkg.period.detector",
+            "bkg_stable.detector.period",
+            "bkg_site_correlated.detector.period",
+            "bkg_not_site_correlated.detector.period",
+            "bkg_not_correlated.detector.period",
+        )
+        combinations = self.combinations
+        for combname in required_combinations:
+            combitems = combname.split(".")
+            items = []
+            for it in product(*(index[item] for item in combitems)):
+                if any(inact.issubset(it) for inact in inactive_combinations):
+                    continue
+                items.append(it)
+            combinations[combname] = tuple(items)
+
+        # Special treatment is needed for combinations of anue_source and isotope as
+        # nu_neq is related to only a fraction of isotops, while nu_snf does not index
+        # isotopes at all
+        combinations["anue_source.reactor.isotope.detector"] = (
+            tuple(
+                ("nu_main",) + cmb for cmb in combinations["reactor.isotope.detector"]
+            )
+            + tuple(
+                ("nu_neq",) + cmb
+                for cmb in combinations["reactor.isotope_neq.detector"]
+            )
+            + tuple(("nu_snf",) + cmb for cmb in combinations["reactor.detector"])
+        )
+
+        # Start building the computational graph within a dedicated context, which
+        # includes:
+        # - graph - the graph instance.
+        #     + All the nodes are added to the graph while graph is open.
+        #     + On the exit from the context the graph closes itself, which triggers
+        #       allocation of memory for the calculations.
+        # - storage - nested dictionary, which is used to store all the created
+        #   elements: nodes, outputs, parameters, data items, etc.
+        # - filereader - manages reading the files
+        #     + ensures, that the input files are opened only once
+        #     + closes the files upon the exit of the context
+        self.graph = Graph(close_on_exit=self._close, strict=self._strict)
+
+        with self.graph, storage, FileReader:
+            # Load all the parameters, necessary for the model. The parameters are
+            # divided into three lists:
+            # - constant - parameters are not expected to be modified during the
+            #   analysis and thus are not passed to the minimizer.
+            # - free - parameters that should be minimized and have no constraints
+            # - constrained - parameters that should be minimized and have constraints.
+            #   The constraints are defined by:
+            #   + central values and uncertainties
+            #   + central vectors and covariance matrices
+            #
+            # additionally the following lists are provided
+            # - all - all the parameters, including fixed, free and constrained
+            # - variable - free and constrained parameters
+            # - normalized - a shadow definition of the constrained parameters. Each
+            #   normalized parameter has value=0 when the constrained parameter is at
+            #   its central value, +1, when it is offset by 1蟽. The correlations,
+            #   defined by the covariance matrices are properly treated. The conversion
+            #   works the both ways: when normalized parameter is modified, the related
+            #   constrained parameters are changed as well and vice versa. The
+            #   parameters from this list are used to build the nuisance part of the 蠂虏
+            #   function.
+            #
+            # All the parameters are collected in the storage - a nested dictionary,
+            # which can handle path-like keys, with "folders" split by periods:
+            # - storage["parameters.all"] - storage with all the parameters
+            # - storage["parameters", "all"] - the same storage with all the parameters
+            # - storage["parameters.all.oscprob.SinSq2Theta12"] - neutrino oscillation
+            #   parameter sin虏2胃鈧佲倐
+            # - storage["parameters.constrained.oscprob.SinSq2Theta12"] - same neutrino
+            #   oscillation parameter sin虏2胃鈧佲倐 in the list of constrained parameters.
+            # - storage["parameters.normalized.oscprob.SinSq2Theta12"] - shadow
+            #   (nuisance) parameter for sin虏2胃鈧佲倐.
+            #
+            # The constrained parameter has fields `value`, `normvalue`, `central`, and
+            # `sigma`, which could be read to get the current value of the parameter,
+            # normalized value, central value, and uncertainty. The assignment to the
+            # fields changes the values. Additionally fields `sigma_relative` and
+            # `sigma_percent` may be used to get and set the relative uncertainty.
+            # ```python
+            # p = storage["parameters.all.oscprob.SinSq2Theta12"]
+            # print(p)        # print the description
+            # print(p.value)  # print the current value
+            # p.value = 0.8   # set the value to 0.8 - affects the model
+            # p.central = 0.7 # set the central value to 0.7 - affects the nuisance term
+            # p.normvalue = 1 # set the value to centra+1sigma
+            # ```
+            #
+            # The non-constrained parameter lacks `central`, `sigma`, `normvalue`, etc
+            # fields and is controlled only by `value`. The normalized parameter does
+            # have `central` and `sigma` fields, but they are read only. The effect of
+            # changing `value` field of the normalized parameter is the same as changing
+            # `normvalue` field of its corresponding parameter.
+            #
+            # ```python
+            # np = storage["parameters.normalized.oscprob.SinSq2Theta12"]
+            # print(np)        # print the description
+            # print(np.value)  # print the current value -> 0
+            # np.value = 1     # set the value to centra+1sigma
+            # np.normvalue = 1 # set the value to centra+1sigma
+            # # p is also affected
+            # ```
+            #
+            # Load oscillation parameters from 3 configuration files:
+            # - Free sin虏2胃鈧佲們 and 螖m虏鈧冣倐
+            # - Constrained sin虏2胃鈧佲們 and 螖m虏鈧冣倐
+            # - Fixed: Neutrino Mass Ordering
+            load_parameters(path="oscprob", load=path_parameters / "oscprob.yaml")
+            load_parameters(
+                path="oscprob",
+                load=path_parameters / "oscprob_solar.yaml",
+                joint_nuisance=True,
+            )
+            load_parameters(
+                path="oscprob", load=path_parameters / "oscprob_constants.yaml"
+            )
+            # The parameters are located in "parameters.oscprob" folder as defined by
+            # the `path` argument.
+            # The annotated table with values may be then printed for any storage as
+            # ```python
+            # print(storage["parameters.all.oscprob"].to_table())
+            # print(storage.get_dict("parameters.all.oscprob").to_table())
+            # ```
+            # the second line does the same, but ensures that the object, obtained from
+            # a storage is another nested dictionary, not a parameter.
+            #
+            # The `joint_nuisance` options instructs the loader to provide a combined
+            # nuisance term for the both the parameters, rather then two of them. The
+            # nuisance terms for created constrained parameters are located in
+            # "outputs.statistic.nuisance.parts" and may be printed with:
+            # ```python
+            # print(storage["outputs.statistic.nuisance"].to_table())
+            # ```
+            # The outputs are typically read-only. They are affected when the parameters
+            # are modified and the relevant values are calculated upon request. In this
+            # case, when the table is printed.
+
+            # Load fixed parameters for Inverse Beta Decay (IBD) cross section:
+            # - particle masses and lifetimes
+            # - constants for Vogel-Beacom IBD cross section
+            load_parameters(path="ibd", load=path_parameters / "pdg2024.yaml")
+            load_parameters(path="ibd.csc", load=path_parameters / "ibd_constants.yaml")
+
+            # Load the conversion constants from metric to natural units:
+            # - reactor thermal power
+            # - the argument of oscillation proabability
+            # `scipy.constants` are used to provide the numbers.
+            # There are no constants, except maybe 1, 1/3 and 蟺, defined within the
+            # code. All the numbers are read based on the configuration files.
+            load_parameters(
+                path="conversion", load=path_parameters / "conversion_thermal_power.py"
+            )
+            load_parameters(
+                path="conversion",
+                load=path_parameters / "conversion_oscprob_argument.py",
+            )
+
+            # Load reactor-detector baselines
+            load_parameters(load=path_parameters / "baselines.yaml")
+
+            # IBD and detector normalization parameters:
+            # - free global IBD normalization factor
+            # - fixed detector efficiency (variation is managed by uncorrelated
+            #   "detector_relative.efficiency_factor")
+            # - fixed correction to the number of protons in each detector
+            load_parameters(
+                path="detector", load=path_parameters / "detector_normalization.yaml"
+            )
+            load_parameters(
+                path="detector", load=path_parameters / "detector_efficiency.yaml"
+            )
+            load_parameters(
+                path="detector",
+                load=path_parameters / "detector_nprotons_correction.yaml",
+            )
+
+            # Detector energy scale parameters:
+            # - constrained correlated between detectors energy resolution parameters
+            # - constrained correlated between detectors Liquid Scnitillator
+            #   Non-Linearity (LSNL) parameters
+            # - constrained uncorrelated between detectors energy distortion related to
+            #   Inner Acrylic Vessel
+            load_parameters(
+                path="detector", load=path_parameters / "detector_eres.yaml"
+            )
+            load_parameters(
+                path="detector",
+                load=path_parameters / "detector_lsnl.yaml",
+                replicate=index["lsnl_nuisance"],
+            )
+            load_parameters(
+                path="detector",
+                load=path_parameters / "detector_iav_offdiag_scale.yaml",
+                replicate=index["detector"],
+            )
+            # Here we use `replicate` argument and pass a list of values. The parameters
+            # are replicated for each index value. So 4 parameters for LSNL are created
+            # and 8 parameters of IAV are created. The index values are used to
+            # construct the path to parameter. See:
+            # ```python
+            # print(storage["outputs.statistic.nuisance.parts"].to_table())
+            # ```
+            # which contains parameters "AD11", "AD12", etc.
+
+            # Relative uncorrelated between detectors parameters:
+            # - relative efficiency factor (constrained)
+            # - relative energy scale factor (constrained)
+            # the parameters of each detector are correlated between each other.
+            load_parameters(
+                path="detector",
+                load=path_parameters / "detector_relative.yaml",
+                replicate=index["detector"],
+                keys_order=(
+                    ("pargroup", "par", "detector"),
+                    ("pargroup", "detector", "par"),
+                ),
+            )
+            # By default extra index is appended at the end of the key (path). A
+            # `keys_order` argument is used to change the order of the keys from
+            # group.par.detector to group.detector.par so it is easier to access both
+            # the parameters of a single detector.
+
+            # Load reactor related parameters:
+            # - constrained nominal thermal power
+            # - constrained mean energy release per fission
+            # - constrained Non-EQuilibrium (NEQ) correction scale
+            # - cosntrained Spent Nuclear Fuel (SNF) scale
+            # - fixed values of the fission fractions for the SNF calculation
+            load_parameters(
+                path="reactor",
+                load=path_parameters / "reactor_thermal_power_nominal.yaml",
+                replicate=index["reactor"],
+            )
+            load_parameters(
+                path="reactor", load=path_parameters / "reactor_energy_per_fission.yaml"
+            )
+            load_parameters(
+                path="reactor",
+                load=path_parameters / "reactor_snf.yaml",
+                replicate=index["reactor"],
+            )
+            load_parameters(
+                path="reactor",
+                load=path_parameters / "reactor_nonequilibrium_correction.yaml",
+                replicate=combinations["reactor.isotope_neq"],
+            )
+            load_parameters(
+                path="reactor",
+                load=path_parameters / "reactor_snf_fission_fractions.yaml",
+            )
+            # The nominal thermal power is replicated for each reactor, making its
+            # uncertainty uncorrelated. Energy per fission (and fission fraction) has
+            # distinct value (and uncertainties) for each isotope, therefore the
+            # configuration files have an entry for each index and `replicate` argument
+            # is not required. SNF and NEQ corrections are made uncorrelated between the
+            # reactors. As only fraction of isotopes are affected by NEQ a dedicated
+            # index `isotope_neq` is used for it.
+
+            # Read the constrained and correlated fission fractions. The fission
+            # fractions are partially correlated within each reactor. Therefore the
+            # configuration file provides the uncertainties and correlations for
+            # isotopes. The parameters are then replicated for each reactor and the
+            # index is modified to have `isotope` as the innermost part.
+            load_parameters(
+                path="reactor",
+                load=path_parameters / "reactor_fission_fraction_scale.yaml",
+                replicate=index["reactor"],
+                keys_order=(
+                    ("par", "isotope", "reactor"),
+                    ("par", "reactor", "isotope"),
+                ),
+            )
+
+            # Finally the constrained background rates are loaded. They include the
+            # rates and uncertainties for 5 sources of background events for 6-8
+            # detectors during 3 periods of data taking.
+            load_parameters(
+                path="bkg.rate_scale",
+                load=path_parameters / "bkg_rate_scale_acc.yaml",
+                replicate=combinations["period.detector"],
+            )
+            load_parameters(
+                path="bkg.rate",
+                load=path_parameters / "bkg_rates_uncorrelated_dataset_a.yaml",
+            )
+            load_parameters(
+                path="bkg.rate",
+                load=path_parameters / "bkg_rates_correlated_dataset_a.yaml",
+                sigma_visible=True,
+            )
+            load_parameters(
+                path="bkg.uncertainty_scale",
+                load=path_parameters / "bkg_rate_uncertainty_scale_amc.yaml",
+            )
+            load_parameters(
+                path="bkg.uncertainty_scale_by_site",
+                load=path_parameters / "bkg_rate_uncertainty_scale_site.yaml",
+                replicate=combinations["site.period"],
+                ignore_keys=inactive_backgrounds,
+            )
+
+            # Additionally a few constants are provided.
+            # A constant to convert seconds to days for the backgrounds estimation
+            load_parameters(
+                format="value",
+                state="fixed",
+                parameters={
+                    "conversion": {
+                        "seconds_in_day": (60 * 60 * 24),
+                        "seconds_in_day_inverse": 1 / (60 * 60 * 24),
+                    }
+                },
+                labels={
+                    "conversion": {
+                        "seconds_in_day": "Number of seconds in a day",
+                        "seconds_in_day_inverse": "Fraction of a day in a second",
+                    }
+                },
+            )
+
+            # 1/3 and 2/3 needed to construct Combined Neyman-Pearson 蠂虏
+            load_parameters(
+                format="value",
+                state="fixed",
+                parameters={
+                    "stats": {
+                        "pearson": 2 / 3,
+                        "neyman": 1 / 3,
+                    }
+                },
+                labels={
+                    "stats": {
+                        "pearson": "Coefficient for Pearson's part of CNP 蠂虏",
+                        "neyman": "Coefficient for Neyman's part of CNP 蠂虏",
+                    }
+                },
+            )
+
+            # Provide a few variable for handy read/write access of the model objects,
+            # including:
+            # - `nodes` - nested dictionary with nodes. Node is an instantiated function
+            #   and is a main building block of the model. Nodes have inputs (function
+            #   arguments) and outputs (return values). The model is built by connecting
+            #   the outputs of the nodes to inputs of the following nodes.
+            # - `inputs` - storage for not yet connected inputs. The inputs are removed
+            #   from the storage after connection and the storage is expected to be
+            #   empty by the end of the model construction
+            # - `outputs` - the return values of the functions used in the model. A
+            #   single output contains a single numpy array. **All** the final and
+            #   intermediate data may be accessed via outputs. Note: the function
+            #   evaluation is triggered by reading the output.
+            # - `data` - storage with raw (input) data arrays. It is used as an
+            #   intermediate storage, populated with `load_graph_data` and
+            #   `load_record_data` methods.
+            # - `parameters` - already populated storage with parameters.
+            nodes = storage.child("nodes")
+            inputs = storage.child("inputs")
+            outputs = storage.child("outputs")
+            data = storage.child("data")
+            parameters = storage("parameters")
+            parameters_nuisance_normalized = storage("parameters.normalized")
+
+            # In this section the actual parts of the calculation are created as nodes.
+            # First of all the binning is defined for the histograms.
+            # - internal binning for the integration: 240 bins of 50 keV from 0 to 241.
+            # - final binning for the statistical analysis: 20 keV from 1.2 MeV to 2 MeV
+            #   with two wide bins below from 0.7 MeV and above up to 12 MeV.
+            # - cos胃 (positron angle) edges [-1,1] are defined explicitly for the
+            #   integration of the Inverse Beta Decay (IBD) cross section.
+            in_edges_fine = linspace(0, 12, 241)
+            in_edges_final = concatenate(([0.7], arange(1.2, 8.01, 0.20), [12.0]))
+            in_edges_costheta = [-1, 1]
+
+            # Instantiate the storage nodes for bin edges. In what follows all the
+            # nodes, outputs and inputs are automatically added to the relevant storage
+            # locations. This is done via usage of the `Node.replicate()` class method.
+            # The method is also responsible for creating indexed copies of the classes,
+            # hence the name.
+            edges_costheta, _ = Array.replicate(
+                name="edges.costheta", array=in_edges_costheta
+            )
+            edges_energy_common, _ = Array.replicate(
+                name="edges.energy_common", array=in_edges_fine
+            )
+            edges_energy_final, _ = Array.replicate(
+                name="edges.energy_final", array=in_edges_final
+            )
+            # For example the final energy node is stored as "nodes.edges.energy_final".
+            # The output may be accessed via the node itself, of via the storage of
+            # outputs as "outputs.edges.energy_final".
+            # ```python
+            # node = storage["nodes.edges.energy_final"] # Obtain the node from the
+            #                                            # storage
+            # output = storage["outputs.edges.energy_final"] # Obtain the outputs from
+            #                                                # the storage
+            # output = node.outputs["array"] # Obtain the outputs from the node by name
+            # output = node.outputs[0] # Obtain the outputs from the node by position
+            # print(output.data) # Get the data
+            # ```
+            # The access to the `output.data` triggers calculation recursively and
+            # returns numpy array afterwards. The returned array is read only so the
+            # user has no way to overwrite internal data accidentally.
+
+            # For the fine binning we provide a few views, each of which is associated
+            # to a distinct energy in the energy conversion process:
+            # - Enu - neutrino energy.
+            # - Edep - deposited energy of a positron..
+            # - Escint - energy, converted to the scintillation.
+            # - Evis - visible energy: scintillation energy after non-linearity
+            #   correction.
+            # - Erec - reconstructed energy: after smearing.
+            View.replicate(name="edges.energy_enu", output=edges_energy_common)
+            edges_energy_edep, _ = View.replicate(
+                name="edges.energy_edep", output=edges_energy_common
+            )
+            edges_energy_escint, _ = View.replicate(
+                name="edges.energy_escint", output=edges_energy_common
+            )
+            edges_energy_evis, _ = View.replicate(
+                name="edges.energy_evis", output=edges_energy_common
+            )
+            edges_energy_erec, _ = View.replicate(
+                name="edges.energy_erec", output=edges_energy_common
+            )
+            # While all these nodes refer to the same array, they will have different
+            # labels, which is needed for making proper plots.
+
+            # Finally, create a node with segment edges for modelling the reactor
+            # electron antineutrino spectra.
+            Array.replicate(
+                name="reactor_anue.spectrum_free_correction.spec_model_edges",
+                array=antineutrino_model_edges,
+            )
+
+            # Initialize the integration nodes. The product of reactor electron
+            # antineutrino spectrum, IBD cross section and electron antineutrino
+            # survival probability is integrated in each bin by a two-fold integral over
+            # deposited energy and positron angle. The precision of integration is
+            # defined beforehand for each Edep bin and independently for each cos胃 bin.
+            # As soon as integration precision and bin edges are defined all the values
+            # of Edep and cos胃 we need to compute the target functions on are defined as
+            # well.
+            #
+            # Initialize the orders of integration (Gauss-Legendre quadratures) for Edep
+            # and cos胃. The `Array.from_value` method is used to initialize an array
+            # from a single number. The definition of bin edges is used in order to
+            # specify the shape. `store=True` is set so the created nodes are added to
+            # the storage.
+            # In partucular using order 5 for Edep and 3 for cos胃 means 15=5脳3 points
+            # will be used to integrate each 2d bin.
+            Array.from_value(
+                "kinematics.integration.orders_edep",
+                5,
+                edges=edges_energy_edep,
+                store=True,
+            )
+            Array.from_value(
+                "kinematics.integration.orders_costheta",
+                3,
+                edges=edges_costheta,
+                store=True,
+            )
+
+            # Instantiate integration nodes. The integration consist of a single
+            # sampling node, which based on bin edges and integration orders provides
+            # samples (meshes) of points to compute the integrable function on. In the
+            # case of 2d integrtion each mesh is 2d array, similar to one, produced by
+            # numpy.meshgred function. A dedicated integrator node, which does the
+            # actual integration, is created for each integrable function. In the
+            # Daya聽Bay case the integrator part is replicated: an instance created for
+            # each combination of "anue_source.reactor.isotope.detector" indices. Note,
+            # that NEQ part (anue_source) has no contribution from 虏鲁鈦窾 and SNF part has
+            # not isotope index at all. In particular 384 integration nodes are created.
+            Integrator.replicate(
+                "gl2d",
+                path="kinematics",
+                names={
+                    "sampler": "sampler",
+                    "integrator": "integral",
+                    "mesh_x": "sampler.mesh_edep",
+                    "mesh_y": "sampler.mesh_costheta",
+                    "orders_x": "sampler.orders_edep",
+                    "orders_y": "sampler.orders_costheta",
+                },
+                replicate_outputs=combinations["anue_source.reactor.isotope.detector"],
+            )
+            # Pass the integration orders to the sampler inputs. The operator `>>` is
+            # used to make a connection `input >> output` or batch connection
+            # `input(s) >> outputs`. The operator connects all the objects on the left
+            # side to all the corresponding objects on the right side. Missing pairs
+            # will cause an exception.
+            outputs.get_value("kinematics.integration.orders_edep") >> inputs.get_value(
+                "kinematics.sampler.orders_edep"
+            )
+            outputs.get_value(
+                "kinematics.integration.orders_costheta"
+            ) >> inputs.get_value("kinematics.sampler.orders_costheta")
+            # Regular way of accessing dictionaries via `[]` operator may be used. Here
+            # we use `storage.get_value(key)` function to ensure the value is an object,
+            # but not a nested dictionary. Similarly `storage.get_dict(key)` may be used
+            # to ensure the value is a nested dictionary.
+            #
+            # There are a few ways to find and access the created inputs and outputs.
+            # 1. get a node as `Node.replicate()` return value.
+            #   - access node's inputs and outputs.
+            # 2. get a node from node storage.
+            #   - access node's inputs and outputs.
+            # 3. access inputs and outputs via the storage.
+            #
+            # If replicate creates a single (main) node, as Integrator does, it is
+            # returned as a first return value. Then print may be used to print
+            # available inputs and outputs.
+            # ```python
+            # integrator, integrator_storage = Integrator.replicate(...)
+            # orders_x >> integrator.inputs["orders_x"] # connect orders X to sampler's
+            #                                           # input
+            # integrator.outputs["x"] >> function_input # connect mesh X to function's
+            #                                           # input
+            # ```
+            # Alternatively, the inputs may be accessed from the storage, as it is done
+            # above. The list of created inputs and outputs may be found by passing
+            # `verbose=True` flat to the replicate function as
+            # `Node.replicate(verbose=True)`. The second return value of the function is
+            # always a created storage with all the inputs and outputs, which can be
+            # printed to the terminal:
+            # ```python
+            # integrator, integrator_storage = Integrator.replicate(...)
+            # integrator_storage.print() # print local storage
+            # storage.print() # print global storage
+            # integrator_storage["inputs"].print() # print inputs from a local storage
+            # ```
+            # The local storage is always merged to the common (context) storage. It is
+            # ensured that there is no overlap in the keys.
+
+            # As of now we know all the Edep and cos胃 points to compute the target
+            # functions on. The next step is to initialize the functions themselves,
+            # which include: Inverse Beta Decay cross section (IBD), electron
+            # antineutrino survival probability and antineutrino spectrum.
+
+            # Here we create an instance of Inverse Beta Decay cross section, which also
+            # includes conversion from deposited energy Edep to neutrino energy Enu and
+            # corresponding dEnu/dEdep jacobian. The IBD nodes may operate with either
+            # positron energy Ee as input, or deposited energy Edep=Ee+m(e) as input,
+            # which is specified via an argument.
+            ibd, _ = IBDXsecVBO1Group.replicate(
+                path="kinematics.ibd", input_energy="edep"
+            )
+            # IBD cross section depends on a set of parameters, including neutron
+            # liftime, proton and neutron masses, vector coupling constant, etc. The
+            # values of these parameters were previously loaded and are located in the
+            # 'parameters.constant.ibd' namespace. The IBD node(s) have an input for
+            # each parameter. In order to connect the parameters the `<<` operator is
+            # used as `node << parameters_storage`. It will loop over all the inputs of
+            # the node and find parameters of the same name in the right hand side
+            # namespace. Missing parameters are skipped, extra parameters are ignored.
+            ibd << storage("parameters.constant.ibd")
+            ibd << storage("parameters.constant.ibd.csc")
+            # Connect the integration meshes for Edep and cos胃 to the inputs of the
+            # IBD node.
+            outputs.get_value("kinematics.sampler.mesh_edep") >> ibd.inputs["edep"]
+            (
+                outputs.get_value("kinematics.sampler.mesh_costheta")
+                >> ibd.inputs["costheta"]
+            )
+            # There is an output, which yields neutrino energy Enu (mesh), corresponding
+            # to the Edep, cos胃 meshes. As it will be used quite often, let us save it
+            # to a variable.
+            kinematic_integrator_enu = ibd.outputs["enu"]
+
+            # Initialize survival probability for reactor electron antineutrinos. As it
+            # is affected by the distance, we replicate it for each combination of
+            # "reactor.detector" indices of count of 48. It is defined for energies in
+            # MeV, while the unit for distance may be choosen between "m" and "km".
+            NueSurvivalProbability.replicate(
+                name="oscprob",
+                distance_unit="m",
+                replicate_outputs=combinations["reactor.detector"],
+                surprobArgConversion=True,
+            )
+            # If created in the verbose mode one can see, that the following items are
+            # created:
+            # - nodes.oscprob.DB1.AD11
+            # - nodes.oscprob.DB1.AD12
+            # - ...
+            # - inputs.oscprob.enu.DB1.AD11
+            # - inputs.oscprob.enu.DB1.AD12
+            # - ...
+            # - inputs.oscprob.L.DB1.AD11
+            # - inputs.oscprob.L.DB1.AD12
+            # - ...
+            # - inputs.oscprob.surprobArgConversion.DB1.AD11
+            # - inputs.oscprob.surprobArgConversion.DB1.AD12
+            # - ...
+            # - outputs.oscprob.DB1.AD11
+            # - outputs.oscprob.DB1.AD12
+            # - ...
+            # On one hand each node with its inputs and outputs may be accessed via
+            # "nodes.oscprob.<reactor>.<detector>" address. On the other hand all the
+            # inputs, corresponding to the baselines and input energyies may be accessed
+            # via "inputs.oscprob.L" and "inputs.oscprob.enu" respectively. It is then
+            # under user control whether he wants to provide similar or different data
+            # for them.
+            # Connect the same mesh of neutrino energy to all the 48 inputs:
+            kinematic_integrator_enu >> inputs.get_dict("oscprob.enu")
+            # Connect the corresponding baselines:
+            parameters.get_dict("constant.baseline") >> inputs.get_dict("oscprob.L")
+            # The matching is done based on the index with order being ignored. Thus
+            # baselines stored as "DB1.AD11" or "AD11.DB1" both may be connected to the
+            # input "DB1.AD11". Moreover, if the left part has fewer indices, the
+            # connection will be broadcasted, e.g. "DB1" on the left will be connected
+            # to all the indices on the right, containing "DB1".
+            #
+            # Provide a conversion constant to convert the argument of sin虏(...螖m虏L/E)
+            # from chosen units to natural ones.
+            parameters.get_value("all.conversion.oscprobArgConversion") >> inputs(
+                "oscprob.surprobArgConversion"
+            )
+            # Also connect free, constrained and constant oscillation parameters to each
+            # instance of the oscillation probability.
+            nodes.get_dict("oscprob") << parameters("free.oscprob")
+            nodes.get_dict("oscprob") << parameters("constrained.oscprob")
+            nodes.get_dict("oscprob") << parameters("constant.oscprob")
+
+            # The third component is the antineutrino spectrum as dN/dE per fission. We
+            # start from loading the reference antineutrino spectrum (Huber-Mueller)
+            # from input files. There are four spectra for four active isotopes. The
+            # loading is done with the command `load_graph`, which supports hdf5, npz,
+            # root, tsv (files or folder) or compressed tsv.bz2. The command will read
+            # items with names "U235", "U238", "Pu239" and "Pu241" (from
+            # index["isotope"]) as follows:
+            # - hdf5: open with filename, request (X,Y) dataset by name.
+            # - npz: open with filename, get (X,Y)  array from a dictionary by name.
+            # - root: open with filename, get TH1D object by name. Build graph by taking
+            #         left edges of the bins and their heights. `uproot` is used to load
+            #         ROOT files by default. If `$ROOTSYS` is defined, then ROOT is used
+            #         directly.
+            # - tsv: different arrays are kept in distinct files. Therefore for the tsv
+            #        some logic is implemeted to find the files. Given 'filename.tsv'
+            #        and 'key', the following files are checked:
+            #        + filename.tsv/key.tsv
+            #        + filename.tsv/filename_key.tsv
+            #        + filename_key.tsv
+            #        + filename.tsv/key.tsv.bz2
+            #        + filename.tsv/filename_key.tsv.bz2
+            #        + filename_key.tsv.bz2
+            #        The graph is expected to be writtein in 2 columns: X, Y.
+            #
+            # The appropriate loader is choosen based on extension. The objects are
+            # loaded and stored in the "reactor_anue.neutrino_per_fission_per_MeV_input"
+            # location. As `merge_x` flag is specified, only on X array is stored with
+            # no index. A dedicated check is performed to ensure the graphs have
+            # consistent X axes.
+            # Note, that each Y node (called spec) will have an reference to the X node,
+            # so it could be used when plotting.
+            load_graph(
+                name="reactor_anue.neutrino_per_fission_per_MeV_input",
+                filenames=path_arrays
+                / f"reactor_anue_spectrum_interp_scaled_approx_50keV.{self.source_type}",
+                x="enu",
+                y="spec",
+                merge_x=True,
+                replicate_outputs=index["isotope"],
+            )
+
+            # The input antineutrino spectra have step of 50 keV. They now should be
+            # interpolated to the integration mesh. Similarly to integration nodes,
+            # interpolation is implemented in two steps by two nodes: `indexer` node
+            # identifies the indexes of segments, which should be used to interpolate
+            # each mesh point. The `interpolator` does the actual interpolation, using
+            # the input coarse data and indices from indexer. We instruct interpolator
+            # to create a distinct node for each isotope by setting `replicate_outputs`
+            # argument. We use exponential interpolation (`method="exp"`) and provide
+            # names for the nodes and outputs. By default interpolator does
+            # extrapolation in both directions outside of the domain.
+            Interpolator.replicate(
+                method="exp",
+                names={
+                    "indexer": "reactor_anue.spec_indexer",
+                    "interpolator": "reactor_anue.neutrino_per_fission_per_MeV_nominal",
+                },
+                replicate_outputs=index["isotope"],
+            )
+            # Connect the common neutrino energy mesh as coarse input of the
+            # interpolator.
+            outputs.get_value(
+                "reactor_anue.neutrino_per_fission_per_MeV_input.enu"
+            ) >> inputs.get_value(
+                "reactor_anue.neutrino_per_fission_per_MeV_nominal.xcoarse"
+            )
+            # Connect the input antineutrino spectra as coarse Y inputs of the
+            # interpolator. This is performed for each of the 4 isotopes.
+            outputs("reactor_anue.neutrino_per_fission_per_MeV_input.spec") >> inputs(
+                "reactor_anue.neutrino_per_fission_per_MeV_nominal.ycoarse"
+            )
+            # The interpolators are using the same target mesh for all the same target
+            # mesh. Use the neutrino energy mesh provided by interpolator as an input to
+            # fine X of the interpolation.
+            kinematic_integrator_enu >> inputs.get_value(
+                "reactor_anue.neutrino_per_fission_per_MeV_nominal.xfine"
+            )
+
+            # The antineutrino spectrum in this analysis is a subject of five
+            # independent corrections:
+            # 1. Non-EQuilibrium correction (NEQ). A dedicated correction to ILL (Huber)
+            #    antineutrino spectra from 虏鲁鈦礥, 虏鲁鈦筆u, 虏鈦绰筆u. Note: that 虏鲁鈦窾 as no NEQ
+            #    applied. In order to handle this an alternative index `isotope_neq` is
+            #    used.
+            # 2. Spent Nuclear Fuel (SNF) correction to account for the existence of
+            #    antineutrino flux from spent nuclear fuel.
+            # 3. Average antineutrino spectrum correction 鈥� not constrained correction
+            #    to the shape of average reactor antineutrino spectrum. Having its
+            #    parameters free during the fit effectively implements the relative
+            #    Far-to-Near measurement. The correction curve is single and is applied
+            #    to all the isotopes (correlated between the isotopes).
+            # 4. Huber-Mueller related:
+            #    a. constrained spectrum shape correction due to
+            #       model uncertainties. Uncorrelated between energy intervals and
+            #       uncorrelated between isotopes.
+            #    b. constrained spectrum shape correction due to model uncertainties.
+            #       Correlated between energy intervals and correlated between isotopes.
+            #
+            # For convenience reasons let us introduce two constants to enable/disable
+            # SNF and NEQ contributions. The `load_parameters()` method is used. The
+            # syntax is similar to the one in yaml files.
+            load_parameters(
+                format="value",
+                state="fixed",
+                parameters={
+                    "reactor": {
+                        "snf_factor": 1.0,
+                        "neq_factor": 1.0,
+                    }
+                },
+                labels={
+                    "reactor": {
+                        "snf_factor": "Common Spent Nuclear Fuel (SNF) factor",
+                        "neq_factor": "Common Non-Equilibrium (NEQ) factor",
+                    }
+                },
+            )
+
+            # Similarly to the case of electron antineutrino spectrum load the
+            # corresponding corrections to 3 out of 4 isotopes. The correction C should
+            # be applied to spectrum as follows: S'(E谓)=S(E谓)(1+C(E谓))
+            load_graph(
+                name="reactor_nonequilibrium_anue.correction_input",
+                x="enu",
+                y="nonequilibrium_correction",
+                merge_x=True,
+                filenames=path_arrays / f"nonequilibrium_correction.{self.source_type}",
+                replicate_outputs=index["isotope_neq"],
+            )
+
+            # Create interpolators for NEQ correction. Use linear interpolation
+            # (`method="linear"`). The regions outside the domains will be filled with a
+            # constant (0 by default).
+            Interpolator.replicate(
+                method="linear",
+                names={
+                    "indexer": "reactor_nonequilibrium_anue.correction_indexer",
+                    "interpolator": "reactor_nonequilibrium_anue.correction_interpolated",
+                },
+                replicate_outputs=index["isotope_neq"],
+                underflow="constant",
+                overflow="constant",
+            )
+            # Similarly to the case of antineutrino spectrum connect coarse X, a few
+            # coarse Y and target mesh to the interpolator nodes.
+            outputs.get_value(
+                "reactor_nonequilibrium_anue.correction_input.enu"
+            ) >> inputs.get_value(
+                "reactor_nonequilibrium_anue.correction_interpolated.xcoarse"
+            )
+            outputs(
+                "reactor_nonequilibrium_anue.correction_input.nonequilibrium_correction"
+            ) >> inputs("reactor_nonequilibrium_anue.correction_interpolated.ycoarse")
+            kinematic_integrator_enu >> inputs.get_value(
+                "reactor_nonequilibrium_anue.correction_interpolated.xfine"
+            )
+
+            # Now load the SNF correction. The SNF correction is different from NEQ in a
+            # sense that it is computed for each reactor, not isotope. Thus we will use
+            # reactor index for it. Aside from index the loading and interpolation
+            # procedure is similar to that of NEQ correction.
+            load_graph(
+                name="snf_anue.correction_input",
+                x="enu",
+                y="snf_correction",
+                merge_x=True,
+                filenames=path_arrays / f"snf_correction.{self.source_type}",
+                replicate_outputs=index["reactor"],
+            )
+            Interpolator.replicate(
+                method="linear",
+                names={
+                    "indexer": "snf_anue.correction_indexer",
+                    "interpolator": "snf_anue.correction_interpolated",
+                },
+                replicate_outputs=index["reactor"],
+                underflow="constant",
+                overflow="constant",
+            )
+            outputs.get_value("snf_anue.correction_input.enu") >> inputs.get_value(
+                "snf_anue.correction_interpolated.xcoarse"
+            )
+            outputs("snf_anue.correction_input.snf_correction") >> inputs(
+                "snf_anue.correction_interpolated.ycoarse"
+            )
+            kinematic_integrator_enu >> inputs.get_value(
+                "snf_anue.correction_interpolated.xfine"
+            )
+
+            # Finally create the parametrization of the correction to the shape of
+            # average reactor electron antineutrino spectrum. The `spec_scale`
+            # correction is defined on a user defined segments `spec_model_edges`. The
+            # values of the correction scale F岬� are 0 by default at each edge. There exit
+            # two options of operation:
+            # - exponential: S岬�=exp(F鈧�) are used for each edge. The result is always
+            #                positive, although non-linear.
+            # - linear: S岬�=1+F岬⒙爄s used. The behavior is always linear, but this approach
+            #           may yield negative results.
+            # The parameters F岬⒙燼re free parameters of the fit. The behavior of the
+            # correction S岬� is interpolated exponentially within the segments ensuring
+            # the overall correction is continuous for the whole spectrum.
+            #
+            # To initialize the correction we use a convenience function
+            # `make_y_parameters_for_x`, which is using `load_parameters()` to create
+            # 'parameters.free.neutrino_per_fission_factor.spec_scale_00` and other
+            # parameters with proper labels.
+            # An option `hide_nodes` is used to ensure the nodes are not shown on the
+            # graph to keep it less busy. It does not affect the computation mechanism.
+            # Note: in order to create the parameters, it will access the edges.
+            # Therefore the node with edges should be closed. This is typically the case
+            # for the Array nodes, which already have data, but may be not the case for
+            # other nodes.
+            make_y_parameters_for_x(
+                outputs.get_value(
+                    "reactor_anue.spectrum_free_correction.spec_model_edges"
+                ),
+                namefmt="spec_scale_{:02d}",
+                format="value",
+                state="variable",
+                key="neutrino_per_fission_factor",
+                values=0.0,
+                labels="Edge {i:02d} ({value:.2f} MeV) reactor antineutrino spectrum correction"
+                + (
+                    " (exp)"
+                    if self.spectrum_correction_mode == "exponential"
+                    else " (linear)"
+                ),
+                hide_nodes=True,
+            )
+
+            # The created parameters are now available to be used for the minimizer, but
+            # in order to use them conveniently they should be kept as an array.
+            # Concatenation node is used to organize an array. The result of
+            # concatenation will be updated lazily as the minimizer modifies the
+            # parameters.
+            Concatenation.replicate(
+                parameters("all.neutrino_per_fission_factor"),
+                name="reactor_anue.spectrum_free_correction.input",
+            )
+            # For convenience purposes let us assign `spec_model_edges` as X axis for
+            # the array of parameters.
+            outputs.get_value(
+                "reactor_anue.spectrum_free_correction.input"
+            ).dd.axes_meshes = (
+                outputs.get_value(
+                    "reactor_anue.spectrum_free_correction.spec_model_edges"
+                ),
+            )
+
+            # Depending on chosen method, convert the parameters to the correction
+            # on a scale.
+            if self.spectrum_correction_mode == "exponential":
+                # Exponentiate the array of values. No `>>` is used as the array is
+                # passed as an argument and the connection is done internally.
+                Exp.replicate(
+                    outputs.get_value("reactor_anue.spectrum_free_correction.input"),
+                    name="reactor_anue.spectrum_free_correction.correction",
+                )
+            else:
+                # Create an array with [1].
+                Array.from_value(
+                    "reactor_anue.spectrum_free_correction.unity",
+                    1.0,
+                    dtype="d",
+                    mark="1",
+                    label="Array of 1 element =1",
+                    shape=1,
+                    store=True,
+                )
+                # Calculate the sum of [1] and array of spectral parameters. The
+                # broadcasting is done similarly to numpy, i.e. the result of the
+                # operation has the shape of the spectral parameters and 1 is added to
+                # each element.
+                Sum.replicate(
+                    outputs.get_value("reactor_anue.spectrum_free_correction.unity"),
+                    outputs.get_value("reactor_anue.spectrum_free_correction.input"),
+                    name="reactor_anue.spectrum_free_correction.correction",
+                )
+                # For convenience purposes assign `spec_model_edges` as X axis for the
+                # array of scale factors.
+                outputs.get_value(
+                    "reactor_anue.spectrum_free_correction.correction"
+                ).dd.axes_meshes = (
+                    outputs.get_value(
+                        "reactor_anue.spectrum_free_correction.spec_model_edges"
+                    ),
+                )
+
+            # Interpolate the spectral correction exponentially. The extrapolation will
+            # be applied to the points outside the domain.
+            Interpolator.replicate(
+                method="exp",
+                names={
+                    "indexer": "reactor_anue.spectrum_free_correction.indexer",
+                    "interpolator": "reactor_anue.spectrum_free_correction.interpolated",
+                },
+            )
+            outputs.get_value(
+                "reactor_anue.spectrum_free_correction.spec_model_edges"
+            ) >> inputs.get_value(
+                "reactor_anue.spectrum_free_correction.interpolated.xcoarse"
+            )
+            outputs.get_value(
+                "reactor_anue.spectrum_free_correction.correction"
+            ) >> inputs.get_value(
+                "reactor_anue.spectrum_free_correction.interpolated.ycoarse"
+            )
+            kinematic_integrator_enu >> inputs.get_value(
+                "reactor_anue.spectrum_free_correction.interpolated.xfine"
+            )
+            # fmt: off
+
+            #
+            # Huber+Mueller spectrum shape uncertainties
+            #   - constrained
+            #   - two parts:
+            #       - uncorrelated between isotopes and energy intervals
+            #       - correlated between isotopes and energy intervals
+            #
+            load_graph(
+                name = "reactor_anue.spectrum_uncertainty",
+                filenames = path_arrays / f"reactor_anue_spectrum_unc_interp_scaled_approx_50keV.{self.source_type}",
+                x = "enu_centers",
+                y = "uncertainty",
+                merge_x = True,
+                replicate_outputs = combinations["anue_unc.isotope"],
+            )
+
+            for isotope in index["isotope"]:
+                make_y_parameters_for_x(
+                        outputs.get_value("reactor_anue.spectrum_uncertainty.enu_centers"),
+                        namefmt = "unc_scale_{:03d}",
+                        format = ("value", "sigma_absolute"),
+                        state = "variable",
+                        key = f"reactor_anue.spectrum_uncertainty.uncorr.{isotope}",
+                        values = (0.0, 1.0),
+                        labels = f"Edge {{i:02d}} ({{value:.2f}} MeV) uncorrelated {index_names[isotope]} spectrum correction",
+                        disable_last_one = False, # True for the constant interpolation, last edge is unused
+                        hide_nodes = True
+                        )
+
+            load_parameters(
+                    path = "reactor_anue.spectrum_uncertainty",
+                    format=("value", "sigma_absolute"),
+                    state="variable",
+                    parameters={
+                        "corr": (0.0, 1.0)
+                        },
+                    labels={
+                        "corr": "Correlated 谓虆 spectrum shape correction"
+                        },
+                    joint_nuisance = False
+                    )
+
+            Concatenation.replicate(
+                    parameters("constrained.reactor_anue.spectrum_uncertainty.uncorr"),
+                    name = "reactor_anue.spectrum_uncertainty.scale.uncorr",
+                    replicate_outputs = index["isotope"]
+                    )
+
+            Product.replicate(
+                    outputs("reactor_anue.spectrum_uncertainty.scale.uncorr"),
+                    outputs("reactor_anue.spectrum_uncertainty.uncertainty.uncorr"),
+                    name = "reactor_anue.spectrum_uncertainty.correction.uncorr",
+                    replicate_outputs = index["isotope"]
+                    )
+
+            Product.replicate(
+                    parameters.get_value("constrained.reactor_anue.spectrum_uncertainty.corr"),
+                    outputs("reactor_anue.spectrum_uncertainty.uncertainty.corr"),
+                    name = "reactor_anue.spectrum_uncertainty.correction.corr",
+                    replicate_outputs = index["isotope"]
+                    )
+
+            single_unity = Array("single_unity", [1.0], dtype="d", mark="1", label="Array of 1 element =1")
+            Sum.replicate(
+                    outputs("reactor_anue.spectrum_uncertainty.correction.uncorr"),
+                    single_unity,
+                    name = "reactor_anue.spectrum_uncertainty.correction.uncorr_factor",
+                    replicate_outputs = index["isotope"]
+                    )
+            Sum.replicate(
+                    outputs("reactor_anue.spectrum_uncertainty.correction.corr"),
+                    single_unity,
+                    name = "reactor_anue.spectrum_uncertainty.correction.corr_factor",
+                    replicate_outputs = index["isotope"]
+                    )
+
+            Product.replicate(
+                    outputs("reactor_anue.spectrum_uncertainty.correction.uncorr_factor"),
+                    outputs("reactor_anue.spectrum_uncertainty.correction.corr_factor"),
+                    name = "reactor_anue.spectrum_uncertainty.correction.full",
+                    replicate_outputs = index["isotope"]
+                    )
+
+            Interpolator.replicate(
+                method = "linear",
+                names = {
+                    "indexer": "reactor_anue.spectrum_uncertainty.correction_index",
+                    "interpolator": "reactor_anue.spectrum_uncertainty.correction_interpolated"
+                    },
+                replicate_outputs=index["isotope"]
+            )
+            outputs.get_value("reactor_anue.spectrum_uncertainty.enu_centers") >> inputs.get_value("reactor_anue.spectrum_uncertainty.correction_interpolated.xcoarse")
+            outputs("reactor_anue.spectrum_uncertainty.correction.full") >> inputs("reactor_anue.spectrum_uncertainty.correction_interpolated.ycoarse")
+            kinematic_integrator_enu >> inputs.get_value("reactor_anue.spectrum_uncertainty.correction_interpolated.xfine")
+
+            #
+            # Antineutrino spectrum with corrections
+            #
+            Product.replicate(
+                    outputs("reactor_anue.neutrino_per_fission_per_MeV_nominal"),
+                    outputs.get_value("reactor_anue.spectrum_free_correction.interpolated"),
+                    outputs("reactor_anue.spectrum_uncertainty.correction_interpolated"),
+                    name = "reactor_anue.part.neutrino_per_fission_per_MeV_main",
+                    replicate_outputs=index["isotope"],
+                    )
+
+            Product.replicate(
+                    outputs("reactor_anue.neutrino_per_fission_per_MeV_nominal"),
+                    outputs("reactor_nonequilibrium_anue.correction_interpolated"),
+                    name = "reactor_anue.part.neutrino_per_fission_per_MeV_neq_nominal",
+                    allow_skip_inputs = True,
+                    skippable_inputs_should_contain = ("U238",),
+                    replicate_outputs=index["isotope_neq"],
+                    )
+
+            #
+            # Livetime
+            #
+            load_record_data(
+                name = "daily_data.detector_all",
+                filenames = path_arrays/f"dayabay_dataset_a/dayabay_a_daily_detector_data.{self.source_type}",
+                replicate_outputs = index["detector"],
+                columns = ("day", "ndet", "livetime", "eff", "efflivetime", "rate_acc"),
+                skip = inactive_detectors
+            )
+            refine_detector_data(
+                data("daily_data.detector_all"),
+                data.child("daily_data.detector"),
+                detectors = index["detector"],
+                skip = inactive_detectors,
+                columns = ("livetime", "eff", "efflivetime", "rate_acc"),
+            )
+
+            if "reactor-28days" in self._future:
+                logger.warning("Future: use merged reactor data, period: 28 days")
+                load_record_data(
+                    name = "daily_data.reactor_all",
+                    filenames = path_arrays/f"reactor_power_28days.{self.source_type}",
+                    replicate_outputs = index["reactor"],
+                    columns = ("period", "day", "ndet", "ndays", "power") + index["isotope_lower"],
+                )
+                assert "reactor-35days" not in self._future, "Mutually exclusive options"
+            elif "reactor-35days" in self._future:
+                logger.warning("Future: use merged reactor data, period: 35 days")
+                load_record_data(
+                    name = "daily_data.reactor_all",
+                    filenames = path_arrays/f"reactor_power_35days.{self.source_type}",
+                    replicate_outputs = index["reactor"],
+                    columns = ("period", "day", "ndet", "ndays", "power") + index["isotope_lower"],
+                )
+            else:
+                load_record_data(
+                    name = "daily_data.reactor_all",
+                    filenames = path_arrays/f"reactor_thermal_power_weekly.{self.source_type}",
+                    replicate_outputs = index["reactor"],
+                    columns = ("period", "day", "ndet", "ndays", "power") + index["isotope_lower"],
+                )
+            refine_reactor_data(
+                data("daily_data.reactor_all"),
+                data.child("daily_data.reactor"),
+                reactors = index["reactor"],
+                isotopes = index["isotope"],
+            )
+
+            sync_reactor_detector_data(
+                    data("daily_data.reactor"),
+                    data("daily_data.detector"),
+                    )
+
+            Array.from_storage(
+                "daily_data.detector.livetime",
+                storage("data"),
+                remove_used_arrays = True,
+                dtype = "d"
+            )
+
+            Array.from_storage(
+                "daily_data.detector.eff",
+                storage("data"),
+                remove_used_arrays = True,
+                dtype = "d"
+            )
+
+            Array.from_storage(
+                "daily_data.detector.efflivetime",
+                storage("data"),
+                remove_used_arrays = True,
+                dtype = "d"
+            )
+
+            if True: # Dataset A
+                logger.warning("Future: create daily accidentals A")
+                Array.from_storage(
+                    "daily_data.detector.rate_acc",
+                    storage("data"),
+                    remove_used_arrays = True,
+                    dtype = "d"
+                )
+
+            Array.from_storage(
+                "daily_data.reactor.power",
+                storage("data"),
+                remove_used_arrays = True,
+                dtype = "d"
+            )
+
+            Array.from_storage(
+                "daily_data.reactor.fission_fraction",
+                storage("data"),
+                remove_used_arrays = True,
+                dtype = "d"
+            )
+            del storage["data.daily_data"]
+
+            #
+            # Neutrino rate
+            #
+            Product.replicate(
+                    parameters("all.reactor.nominal_thermal_power"),
+                    parameters.get_value("all.conversion.reactorPowerConversion"),
+                    name = "reactor.thermal_power_nominal_MeVs",
+                    replicate_outputs = index["reactor"]
+                    )
+
+            Product.replicate(
+                    parameters("central.reactor.nominal_thermal_power"),
+                    parameters.get_value("all.conversion.reactorPowerConversion"),
+                    name = "reactor.thermal_power_nominal_MeVs_central",
+                    replicate_outputs = index["reactor"]
+                    )
+
+            # Time dependent, fit dependent (non-nominal) for reactor core
+            Product.replicate(
+                    parameters("all.reactor.fission_fraction_scale"),
+                    outputs("daily_data.reactor.fission_fraction"),
+                    name = "daily_data.reactor.fission_fraction_scaled",
+                    replicate_outputs=combinations["reactor.isotope.period"],
+                    )
+
+            Product.replicate(
+                    parameters("all.reactor.energy_per_fission"),
+                    outputs("daily_data.reactor.fission_fraction_scaled"),
+                    name = "reactor.energy_per_fission_weighted_MeV",
+                    replicate_outputs=combinations["reactor.isotope.period"],
+                    )
+
+            Sum.replicate(
+                    outputs("reactor.energy_per_fission_weighted_MeV"),
+                    name = "reactor.energy_per_fission_average_MeV",
+                    replicate_outputs=combinations["reactor.period"],
+                    )
+
+            Product.replicate(
+                    outputs("daily_data.reactor.power"),
+                    outputs("daily_data.reactor.fission_fraction_scaled"),
+                    outputs("reactor.thermal_power_nominal_MeVs"),
+                    name = "reactor.thermal_power_isotope_MeV_per_second",
+                    replicate_outputs=combinations["reactor.isotope.period"],
+                    )
+
+            Division.replicate(
+                    outputs("reactor.thermal_power_isotope_MeV_per_second"),
+                    outputs("reactor.energy_per_fission_average_MeV"),
+                    name = "reactor.fissions_per_second",
+                    replicate_outputs=combinations["reactor.isotope.period"],
+                    )
+
+            # Nominal, time and reactor independent power and fission fractions for SNF
+            # NOTE: central values are used for energy_per_fission
+            Product.replicate(
+                    parameters("central.reactor.energy_per_fission"),
+                    parameters("all.reactor.fission_fraction_snf"),
+                    name = "reactor.energy_per_fission_snf_weighted_MeV",
+                    replicate_outputs=index["isotope"],
+                    )
+
+            Sum.replicate(
+                    outputs("reactor.energy_per_fission_snf_weighted_MeV"),
+                    name = "reactor.energy_per_fission_snf_average_MeV",
+                    )
+
+            # NOTE: central values are used for the thermal power
+            Product.replicate(
+                    parameters("all.reactor.fission_fraction_snf"),
+                    outputs("reactor.thermal_power_nominal_MeVs_central"),
+                    name = "reactor.thermal_power_snf_isotope_MeV_per_second",
+                    replicate_outputs=combinations["reactor.isotope"],
+                    )
+
+            Division.replicate(
+                    outputs("reactor.thermal_power_snf_isotope_MeV_per_second"),
+                    outputs.get_value("reactor.energy_per_fission_snf_average_MeV"),
+                    name = "reactor.fissions_per_second_snf",
+                    replicate_outputs=combinations["reactor.isotope"],
+                    )
+
+            # Effective number of fissions seen in Detector from Reactor from Isotope during Period
+            Product.replicate(
+                    outputs("reactor.fissions_per_second"),
+                    outputs("daily_data.detector.efflivetime"),
+                    name = "reactor_detector.nfissions_daily",
+                    replicate_outputs=combinations["reactor.isotope.detector.period"],
+                    allow_skip_inputs = True,
+                    skippable_inputs_should_contain = inactive_detectors
+                    )
+
+            # Total effective number of fissions from a Reactor seen in the Detector during Period
+            ArraySum.replicate(
+                    outputs("reactor_detector.nfissions_daily"),
+                    name = "reactor_detector.nfissions",
+                    )
+
+            # Baseline factor from Reactor to Detector: 1/(4蟺L虏)
+            InverseSquareLaw.replicate(
+                name="reactor_detector.baseline_factor_per_cm2",
+                scale="m_to_cm",
+                replicate_outputs=combinations["reactor.detector"]
+            )
+            parameters("constant.baseline") >> inputs("reactor_detector.baseline_factor_per_cm2")
+
+            # Number of protons per detector
+            Product.replicate(
+                    parameters.get_value("all.detector.nprotons_nominal_ad"),
+                    parameters("all.detector.nprotons_correction"),
+                    name = "detector.nprotons",
+                    replicate_outputs = index["detector"]
+            )
+
+            # Number of fissions 脳 N protons 脳 蔚 / (4蟺L虏)  (main)
+            Product.replicate(
+                    outputs("reactor_detector.nfissions"),
+                    outputs("detector.nprotons"),
+                    outputs("reactor_detector.baseline_factor_per_cm2"),
+                    parameters.get_value("all.detector.efficiency"),
+                    name = "reactor_detector.nfissions_nprotons_per_cm2",
+                    replicate_outputs=combinations["reactor.isotope.detector.period"],
+                    )
+
+            Product.replicate(
+                    outputs("reactor_detector.nfissions_nprotons_per_cm2"),
+                    parameters("all.reactor.nonequilibrium_scale"),
+                    parameters.get_value("all.reactor.neq_factor"),
+                    name = "reactor_detector.nfissions_nprotons_per_cm2_neq",
+                    replicate_outputs=combinations["reactor.isotope.detector.period"],
+                    )
+
+            # Detector live time
+            ArraySum.replicate(
+                    outputs("daily_data.detector.livetime"),
+                    name = "detector.livetime",
+                    )
+
+            ArraySum.replicate(
+                    outputs("daily_data.detector.efflivetime"),
+                    name = "detector.efflivetime",
+                    )
+
+            Product.replicate(
+                    outputs("detector.efflivetime"),
+                    parameters.get_value("constant.conversion.seconds_in_day_inverse"),
+                    name="detector.efflivetime_days",
+                    replicate_outputs=combinations["detector.period"],
+                    allow_skip_inputs=True,
+                    skippable_inputs_should_contain=inactive_detectors,
+                    )
+
+            # Collect some summary data for output tables
+            Sum.replicate(
+                    outputs("detector.efflivetime"),
+                    name = "summary.total.efflivetime",
+                    replicate_outputs=index["detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("detector.efflivetime"),
+                    name = "summary.periods.efflivetime",
+                    replicate_outputs=combinations["period.detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("detector.livetime"),
+                    name = "summary.total.livetime",
+                    replicate_outputs=index["detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("detector.livetime"),
+                    name = "summary.periods.livetime",
+                    replicate_outputs=combinations["period.detector"]
+                    )
+
+            Division.replicate(
+                    outputs("summary.total.efflivetime"),
+                    outputs("summary.total.livetime"),
+                    name = "summary.total.eff",
+                    replicate_outputs=index["detector"]
+                    )
+
+            Division.replicate(
+                    outputs("summary.periods.efflivetime"),
+                    outputs("summary.periods.livetime"),
+                    name = "summary.periods.eff",
+                    replicate_outputs=combinations["period.detector"]
+                    )
+
+            # Number of accidentals
+            if True: # Dataset A
+                logger.warning("Future: calculate number of accidentals A")
+                Product.replicate( # TODO: doc
+                        outputs("daily_data.detector.efflivetime"),
+                        outputs("daily_data.detector.rate_acc"),
+                        name="daily_data.detector.num_acc_s_day",
+                        replicate_outputs=combinations["detector.period"],
+                        )
+
+                ArraySum.replicate(
+                        outputs("daily_data.detector.num_acc_s_day"),
+                        name="bkg.count_acc_fixed_s_day",
+                        )
+
+                Product.replicate(
+                        outputs("bkg.count_acc_fixed_s_day"),
+                        parameters["constant.conversion.seconds_in_day_inverse"],
+                        name="bkg.count_fixed.acc",
+                        replicate_outputs=combinations["detector.period"],
+                        )
+
+            # Effective live time 脳 N protons 脳 蔚 / (4蟺L虏)  (SNF)
+            Product.replicate(
+                    outputs("detector.efflivetime"),
+                    outputs("detector.nprotons"),
+                    outputs("reactor_detector.baseline_factor_per_cm2"),
+                    parameters("all.reactor.snf_scale"),
+                    parameters.get_value("all.reactor.snf_factor"),
+                    parameters.get_value("all.detector.efficiency"),
+                    name = "reactor_detector.livetime_nprotons_per_cm2_snf",
+                    replicate_outputs=combinations["reactor.detector.period"],
+                    allow_skip_inputs = True,
+                    skippable_inputs_should_contain = inactive_detectors
+                    )
+
+            #
+            # Average SNF Spectrum
+            #
+            Product.replicate(
+                    outputs("reactor_anue.neutrino_per_fission_per_MeV_nominal"),
+                    outputs("reactor.fissions_per_second_snf"),
+                    name = "snf_anue.neutrino_per_second_isotope",
+                    replicate_outputs=combinations["reactor.isotope"],
+                    )
+
+            Sum.replicate(
+                    outputs("snf_anue.neutrino_per_second_isotope"),
+                    name = "snf_anue.neutrino_per_second",
+                    replicate_outputs=index["reactor"],
+                    )
+
+            Product.replicate(
+                    outputs("snf_anue.neutrino_per_second"),
+                    outputs("snf_anue.correction_interpolated"),
+                    name = "snf_anue.neutrino_per_second_snf",
+                    replicate_outputs = index["reactor"]
+                    )
+
+            #
+            # Integrand: flux 脳聽oscillation probability 脳 cross section
+            # [N谓路cm虏/fission/proton]
+            #
+            Product.replicate(
+                    outputs.get_value("kinematics.ibd.crosssection"),
+                    outputs.get_value("kinematics.ibd.jacobian"),
+                    name="kinematics.ibd.crosssection_jacobian",
+            )
+
+            Product.replicate(
+                    outputs.get_value("kinematics.ibd.crosssection_jacobian"),
+                    outputs("oscprob"),
+                    name="kinematics.ibd.crosssection_jacobian_oscillations",
+                    replicate_outputs=combinations["reactor.detector"]
+            )
+
+            Product.replicate(
+                    outputs("kinematics.ibd.crosssection_jacobian_oscillations"),
+                    outputs("reactor_anue.part.neutrino_per_fission_per_MeV_main"),
+                    name="kinematics.neutrino_cm2_per_MeV_per_fission_per_proton.part.nu_main",
+                    replicate_outputs=combinations["reactor.isotope.detector"]
+            )
+
+            Product.replicate(
+                    outputs("kinematics.ibd.crosssection_jacobian_oscillations"),
+                    outputs("reactor_anue.part.neutrino_per_fission_per_MeV_neq_nominal"),
+                    name="kinematics.neutrino_cm2_per_MeV_per_fission_per_proton.part.nu_neq",
+                    replicate_outputs=combinations["reactor.isotope_neq.detector"]
+            )
+
+            Product.replicate(
+                    outputs("kinematics.ibd.crosssection_jacobian_oscillations"),
+                    outputs("snf_anue.neutrino_per_second_snf"),
+                    name="kinematics.neutrino_cm2_per_MeV_per_fission_per_proton.part.nu_snf",
+                    replicate_outputs=combinations["reactor.detector"]
+            )
+            outputs("kinematics.neutrino_cm2_per_MeV_per_fission_per_proton.part.nu_main") >> inputs("kinematics.integral.nu_main")
+            outputs("kinematics.neutrino_cm2_per_MeV_per_fission_per_proton.part.nu_neq") >> inputs("kinematics.integral.nu_neq")
+            outputs("kinematics.neutrino_cm2_per_MeV_per_fission_per_proton.part.nu_snf") >> inputs("kinematics.integral.nu_snf")
+
+            #
+            # Multiply by the scaling factors:
+            #  - nu_main: fissions_per_second[p,r,i] 脳 effective live time[p,d] 脳 N protons[d] 脳 efficiency[d]
+            #  - nu_neq:  fissions_per_second[p,r,i] 脳 effective live time[p,d] 脳 N protons[d] 脳 efficiency[d] 脳 nonequilibrium scale[r,i] 脳 neq_factor(=1)
+            #  - nu_snf:                               effective live time[p,d] 脳 N protons[d] 脳 efficiency[d] 脳 SNF scale[r]              脳 snf_factor(=1)
+            #
+            Product.replicate(
+                    outputs("kinematics.integral.nu_main"),
+                    outputs("reactor_detector.nfissions_nprotons_per_cm2"),
+                    name = "eventscount.parts.nu_main",
+                    replicate_outputs = combinations["reactor.isotope.detector.period"]
+                    )
+
+            Product.replicate(
+                    outputs("kinematics.integral.nu_neq"),
+                    outputs("reactor_detector.nfissions_nprotons_per_cm2_neq"),
+                    name = "eventscount.parts.nu_neq",
+                    replicate_outputs = combinations["reactor.isotope_neq.detector.period"],
+                    allow_skip_inputs = True,
+                    skippable_inputs_should_contain = ("U238",)
+                    )
+
+            Product.replicate(
+                    outputs("kinematics.integral.nu_snf"),
+                    outputs("reactor_detector.livetime_nprotons_per_cm2_snf"),
+                    name = "eventscount.parts.nu_snf",
+                    replicate_outputs = combinations["reactor.detector.period"]
+                    )
+
+            Sum.replicate(
+                outputs("eventscount.parts"),
+                name="eventscount.raw",
+                replicate_outputs=combinations["detector.period"]
+            )
+
+            #
+            # Detector effects
+            #
+            load_array(
+                name = "detector.iav",
+                filenames = path_arrays/f"detector_IAV_matrix_P14A_LS.{self.source_type}",
+                replicate_outputs = ("matrix_raw",),
+                name_function = {"matrix_raw": "iav_matrix"},
+                array_kwargs = {
+                    'edges': (edges_energy_escint, edges_energy_edep)
+                    }
+            )
+
+            RenormalizeDiag.replicate(mode="offdiag", name="detector.iav.matrix_rescaled", replicate_outputs=index["detector"])
+            parameters("all.detector.iav_offdiag_scale_factor") >> inputs("detector.iav.matrix_rescaled.scale")
+            outputs.get_value("detector.iav.matrix_raw") >> inputs("detector.iav.matrix_rescaled.matrix")
+
+            VectorMatrixProduct.replicate(name="eventscount.iav", replicate_outputs=combinations["detector.period"])
+            outputs("detector.iav.matrix_rescaled") >> inputs("eventscount.iav.matrix")
+            outputs("eventscount.raw") >> inputs("eventscount.iav.vector")
+
+            load_graph_data(
+                name = "detector.lsnl.curves",
+                x = "edep",
+                y = "evis_parts",
+                merge_x = True,
+                filenames = path_arrays/f"detector_LSNL_curves_Jan2022_newE_v1.{self.source_type}",
+                replicate_outputs = index["lsnl"],
+            )
+
+            # Refine LSNL curves: interpolate with smaller step
+            refine_lsnl_data(
+                storage("data.detector.lsnl.curves"),
+                edepname = 'edep',
+                nominalname = 'evis_parts.nominal',
+                refine_times = 4,
+                newmin = 0.5,
+                newmax = 12.1
+            )
+
+            Array.from_storage(
+                "detector.lsnl.curves",
+                storage("data"),
+                meshname = "edep",
+                remove_used_arrays = True
+            )
+
+            Product.replicate(
+                outputs("detector.lsnl.curves.evis_parts"),
+                parameters("constrained.detector.lsnl_scale_a"),
+                name = "detector.lsnl.curves.evis_parts_scaled",
+                allow_skip_inputs = True,
+                skippable_inputs_should_contain = ("nominal",),
+                replicate_outputs=index["lsnl_nuisance"]
+            )
+
+            Sum.replicate(
+                outputs.get_value("detector.lsnl.curves.evis_parts.nominal"),
+                outputs("detector.lsnl.curves.evis_parts_scaled"),
+                name="detector.lsnl.curves.evis_coarse"
+            )
+
+            #
+            # Force Evis(Edep) to grow monotonously
+            # - Required by matrix calculation algorithm
+            # - Introduced to achieve stable minimization
+            # - Non-monotonous behavior happens for extreme systematic values and is not expected to affect the analysis
+            Monotonize.replicate(
+                    name="detector.lsnl.curves.evis_coarse_monotonous",
+                    index_fraction = 0.5,
+                    gradient = 1.0,
+                    with_x = True
+                    )
+            outputs.get_value("detector.lsnl.curves.edep") >> inputs.get_value("detector.lsnl.curves.evis_coarse_monotonous.x")
+            outputs.get_value("detector.lsnl.curves.evis_coarse") >> inputs.get_value("detector.lsnl.curves.evis_coarse_monotonous.y")
+
+            remap_items(
+                parameters("all.detector.detector_relative"),
+                outputs.child("detector.parameters_relative"),
+                reorder_indices=[
+                    ["detector", "parameters"],
+                    ["parameters", "detector"],
+                ],
+            )
+
+            # Interpolate Evis(Edep)
+            Interpolator.replicate(
+                method = "linear",
+                names = {
+                    "indexer": "detector.lsnl.indexer_fwd",
+                    "interpolator": "detector.lsnl.interpolated_fwd",
+                    },
+            )
+            outputs.get_value("detector.lsnl.curves.edep") >> inputs.get_value("detector.lsnl.interpolated_fwd.xcoarse")
+            outputs.get_value("detector.lsnl.curves.evis_coarse_monotonous") >> inputs.get_value("detector.lsnl.interpolated_fwd.ycoarse")
+            edges_energy_edep >> inputs.get_value("detector.lsnl.interpolated_fwd.xfine")
+
+            # Introduce uncorrelated between detectors energy scale for interpolated Evis[detector]=s[detector]*Evis(Edep)
+            Product.replicate(
+                outputs.get_value("detector.lsnl.interpolated_fwd"),
+                outputs("detector.parameters_relative.energy_scale_factor"),
+                name="detector.lsnl.curves.evis",
+                replicate_outputs = index["detector"]
+            )
+
+            # Introduce uncorrelated between detectors energy scale for coarse Evis[detector]=s[detector]*Evis(Edep)
+            Product.replicate(
+                outputs.get_value("detector.lsnl.curves.evis_coarse_monotonous"),
+                outputs("detector.parameters_relative.energy_scale_factor"),
+                name="detector.lsnl.curves.evis_coarse_monotonous_scaled",
+                replicate_outputs = index["detector"]
+            )
+
+            # Interpolate Edep(Evis[detector])
+            Interpolator.replicate(
+                method = "linear",
+                names = {
+                    "indexer": "detector.lsnl.indexer_bwd",
+                    "interpolator": "detector.lsnl.interpolated_bwd",
+                    },
+                replicate_xcoarse = True,
+                replicate_outputs = index["detector"]
+            )
+            outputs.get_dict("detector.lsnl.curves.evis_coarse_monotonous_scaled") >> inputs.get_dict("detector.lsnl.interpolated_bwd.xcoarse")
+            outputs.get_value("detector.lsnl.curves.edep")  >> inputs.get_dict("detector.lsnl.interpolated_bwd.ycoarse")
+            edges_energy_evis.outputs[0] >> inputs.get_dict("detector.lsnl.interpolated_bwd.xfine")
+
+            # Build LSNL matrix
+            AxisDistortionMatrix.replicate(name="detector.lsnl.matrix", replicate_outputs=index["detector"])
+            edges_energy_edep.outputs[0] >> inputs("detector.lsnl.matrix.EdgesOriginal")
+            outputs.get_value("detector.lsnl.interpolated_fwd") >> inputs.get_dict("detector.lsnl.matrix.EdgesModified")
+            outputs.get_dict("detector.lsnl.interpolated_bwd") >> inputs.get_dict("detector.lsnl.matrix.EdgesModifiedBackwards")
+            VectorMatrixProduct.replicate(name="eventscount.evis", replicate_outputs=combinations["detector.period"])
+            outputs("detector.lsnl.matrix") >> inputs("eventscount.evis.matrix")
+            outputs("eventscount.iav") >> inputs("eventscount.evis.vector")
+
+            EnergyResolution.replicate(path="detector.eres")
+            nodes.get_value("detector.eres.sigma_rel") << parameters("constrained.detector.eres")
+            outputs.get_value("edges.energy_evis") >> inputs.get_value("detector.eres.matrix")
+            outputs.get_value("edges.energy_evis") >> inputs.get_value("detector.eres.e_edges")
+
+            VectorMatrixProduct.replicate(name="eventscount.erec", replicate_outputs=combinations["detector.period"])
+            outputs.get_value("detector.eres.matrix") >> inputs("eventscount.erec.matrix")
+            outputs("eventscount.evis") >> inputs("eventscount.erec.vector")
+
+            Product.replicate(
+                parameters.get_value("all.detector.global_normalization"),
+                outputs("detector.parameters_relative.efficiency_factor"),
+                name = "detector.normalization",
+                replicate_outputs=index["detector"],
+            )
+
+            Product.replicate(
+                outputs("detector.normalization"),
+                outputs("eventscount.erec"),
+                name = "eventscount.fine.ibd_normalized",
+                replicate_outputs=combinations["detector.period"],
+            )
+
+            Sum.replicate(
+                outputs("eventscount.fine.ibd_normalized"),
+                name = "eventscount.fine.ibd_normalized_detector",
+                replicate_outputs=combinations["detector"],
+            )
+
+            Rebin.replicate(
+                names={"matrix": "detector.rebin.matrix_ibd", "product": "eventscount.final.ibd"},
+                replicate_outputs=combinations["detector.period"],
+            )
+            edges_energy_erec >> inputs.get_value("detector.rebin.matrix_ibd.edges_old")
+            edges_energy_final >> inputs.get_value("detector.rebin.matrix_ibd.edges_new")
+            outputs("eventscount.fine.ibd_normalized") >> inputs("eventscount.final.ibd")
+
+            #
+            # Backgrounds
+            #
+            if True: # Dataset A
+                logger.warning("Future: use bakckgrounds from dataset A")
+                bkg_names = {
+                    "acc": "accidental",
+                    "lihe": "lithium9",
+                    "fastn": "fastNeutron",
+                    "amc": "amCSource",
+                    "alphan": "carbonAlpha",
+                    "muon": "muonRelated"
+                }
+                load_hist(
+                    name = "bkg",
+                    x = "erec",
+                    y = "spectrum_shape",
+                    merge_x = True,
+                    normalize = True,
+                    filenames = path_arrays/f"dayabay_dataset_a/dayabay_a_bkg_spectra_{{}}.{self.source_type}",
+                    replicate_files = index["period"],
+                    replicate_outputs = combinations["bkg.detector"],
+                    skip = inactive_combinations,
+                    key_order = (
+                        ("period", "bkg", "detector"),
+                        ("bkg", "detector", "period"),
+                    ),
+                    name_function = lambda _, idx: f"spectrum_shape_{idx[0]}_{idx[1]}"
+                )
+
+            # TODO: labels
+            Product.replicate(
+                    parameters("all.bkg.rate"),
+                    outputs("detector.efflivetime_days"),
+                    name = "bkg.count_fixed",
+                    replicate_outputs=combinations["bkg_stable.detector.period"]
+                    )
+
+            # TODO: labels
+            Product.replicate(
+                    parameters("all.bkg.rate_scale.acc"),
+                    outputs("bkg.count_fixed.acc"),
+                    name = "bkg.count.acc",
+                    replicate_outputs=combinations["detector.period"]
+                    )
+
+            remap_items(
+                    parameters.get_dict("constrained.bkg.uncertainty_scale_by_site"),
+                    outputs.child("bkg.uncertainty_scale"),
+                    rename_indices = site_arrangement,
+                    skip_indices_target = inactive_detectors,
+                    )
+
+            # TODO: labels
+            ProductShiftedScaled.replicate(
+                    outputs("bkg.count_fixed"),
+                    parameters("sigma.bkg.rate"),
+                    outputs.get_dict("bkg.uncertainty_scale"),
+                    name = "bkg.count",
+                    shift=1.0,
+                    replicate_outputs=combinations["bkg_site_correlated.detector.period"],
+                    allow_skip_inputs = True,
+                    skippable_inputs_should_contain = combinations["bkg_not_site_correlated.detector.period"]
+                    )
+
+            # TODO: labels
+            ProductShiftedScaled.replicate(
+                    outputs("bkg.count_fixed.amc"),
+                    parameters("sigma.bkg.rate.amc"),
+                    parameters["all.bkg.uncertainty_scale.amc"],
+                    name = "bkg.count.amc",
+                    shift=1.0,
+                    replicate_outputs=combinations["detector.period"],
+                    )
+
+            outputs["bkg.count.alphan"] = outputs.get_dict("bkg.count_fixed.alphan")
+
+            # TODO: labels
+            Product.replicate(
+                    outputs("bkg.count"),
+                    outputs("bkg.spectrum_shape"),
+                    name="bkg.spectrum",
+                    replicate_outputs=combinations["bkg.detector.period"],
+                    )
+
+            # Summary data
+            Sum.replicate(
+                    outputs("bkg.count"),
+                    name = "summary.total.bkg_count",
+                    replicate_outputs=combinations["bkg.detector"],
+                    )
+
+            remap_items(
+                    outputs("bkg.count"),
+                    outputs.child("summary.periods.bkg_count"),
+                    reorder_indices=[
+                        ["bkg", "detector", "period"],
+                        ["bkg", "period", "detector"],
+                    ],
+                    )
+
+            Division.replicate(
+                    outputs("summary.total.bkg_count"),
+                    outputs("summary.total.efflivetime"),
+                    name = "summary.total.bkg_rate_s",
+                    replicate_outputs=combinations["bkg.detector"]
+                    )
+
+            Division.replicate(
+                    outputs("summary.periods.bkg_count"),
+                    outputs("summary.periods.efflivetime"),
+                    name = "summary.periods.bkg_rate_s",
+                    replicate_outputs=combinations["bkg.period.detector"]
+                    )
+
+            Product.replicate(
+                    outputs("summary.total.bkg_rate_s"),
+                    parameters["constant.conversion.seconds_in_day"],
+                    name = "summary.total.bkg_rate",
+                    replicate_outputs=combinations["bkg.detector"]
+                    )
+
+            Product.replicate(
+                    outputs("summary.periods.bkg_rate_s"),
+                    parameters["constant.conversion.seconds_in_day"],
+                    name = "summary.periods.bkg_rate",
+                    replicate_outputs=combinations["bkg.period.detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("summary.total.bkg_rate.fastn"),
+                    outputs("summary.total.bkg_rate.muonx"),
+                    name = "summary.total.bkg_rate_fastn_muonx",
+                    replicate_outputs=index["detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("summary.periods.bkg_rate.fastn"),
+                    outputs("summary.periods.bkg_rate.muonx"),
+                    name = "summary.periods.bkg_rate_fastn_muonx",
+                    replicate_outputs=combinations["period.detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("summary.total.bkg_rate"),
+                    name = "summary.total.bkg_rate_total",
+                    replicate_outputs=index["detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("summary.periods.bkg_rate"),
+                    name = "summary.periods.bkg_rate_total",
+                    replicate_outputs=combinations["period.detector"]
+                    )
+
+            Sum.replicate(
+                    outputs("bkg.spectrum"),
+                    name="eventscount.fine.bkg",
+                    replicate_outputs=combinations["detector.period"],
+                    )
+
+            Sum.replicate(
+                    outputs("eventscount.fine.ibd_normalized"),
+                    outputs("eventscount.fine.bkg"),
+                    name="eventscount.fine.total",
+                    replicate_outputs=combinations["detector.period"],
+                    check_edges_contents=True,
+                    )
+
+            Rebin.replicate(
+                    names={"matrix": "detector.rebin.matrix_bkg", "product": "eventscount.final.bkg"},
+                    replicate_outputs=combinations["detector.period"],
+            )
+            edges_energy_erec >> inputs.get_value("detector.rebin.matrix_bkg.edges_old")
+            edges_energy_final >> inputs.get_value("detector.rebin.matrix_bkg.edges_new")
+            outputs("eventscount.fine.bkg") >> inputs("eventscount.final.bkg")
+
+            Sum.replicate(
+                outputs("eventscount.final.ibd"),
+                outputs("eventscount.final.bkg"),
+                name="eventscount.final.detector_period",
+                replicate_outputs=combinations["detector.period"],
+            )
+
+            Concatenation.replicate(
+                outputs("eventscount.final.detector_period"),
+                name="eventscount.final.concatenated.detector_period",
+            )
+
+            Sum.replicate(
+                outputs("eventscount.final.detector_period"),
+                name="eventscount.final.detector",
+                replicate_outputs=index["detector"],
+            )
+
+            Concatenation.replicate(
+                outputs("eventscount.final.detector"),
+                name="eventscount.final.concatenated.detector"
+            )
+
+            outputs["eventscount.final.concatenated.selected"] = outputs[f"eventscount.final.concatenated.{self.concatenation_mode}"]
+
+            #
+            # Covariance matrices
+            #
+            self._covariance_matrix = CovarianceMatrixGroup(store_to="covariance")
+
+            for name, parameters_source in self.systematic_uncertainties_groups().items():
+                self._covariance_matrix.add_covariance_for(name, parameters_nuisance_normalized[parameters_source])
+            self._covariance_matrix.add_covariance_sum()
+
+            outputs.get_value("eventscount.final.concatenated.selected") >> self._covariance_matrix
+
+            npars_cov = self._covariance_matrix.get_parameters_count()
+            list_parameters_nuisance_normalized = list(parameters_nuisance_normalized.walkvalues())
+            npars_nuisance = len(list_parameters_nuisance_normalized)
+            if npars_cov!=npars_nuisance:
+                raise RuntimeError("Some parameters are missing from covariance matrix")
+
+            parinp_mc = ParArrayInput(
+                name="mc.parameters.inputs",
+                parameters=list_parameters_nuisance_normalized,
+            )
+
+            #
+            # Statistic
+            #
+            # Create Nuisance parameters
+            Sum.replicate(outputs("statistic.nuisance.parts"), name="statistic.nuisance.all")
+
+            MonteCarlo.replicate(
+                name="data.pseudo.self",
+                mode=self.monte_carlo_mode,
+                generator=self._random_generator,
+            )
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("data.pseudo.self.data")
+            self._frozen_nodes["pseudodata"] = (nodes.get_value("data.pseudo.self"),)
+
+            Proxy.replicate(
+                name="data.pseudo.proxy",
+            )
+            outputs.get_value("data.pseudo.self") >> inputs.get_value("data.pseudo.proxy.input")
+
+            MonteCarlo.replicate(
+                name="covariance.data.fixed",
+                mode="asimov",
+                generator=self._random_generator,
+            )
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("covariance.data.fixed.data")
+            self._frozen_nodes["covariance_data_fixed"] = (nodes.get_value("covariance.data.fixed"),)
+
+            MonteCarlo.replicate(
+                name="mc.parameters.toymc",
+                mode="normal-unit",
+                shape=(npars_nuisance,),
+                generator=self._random_generator,
+            )
+            outputs.get_value("mc.parameters.toymc") >> parinp_mc
+            nodes["mc.parameters.inputs"] = parinp_mc
+
+            Cholesky.replicate(name="cholesky.stat.variable")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("cholesky.stat.variable")
+
+            Cholesky.replicate(name="cholesky.stat.fixed")
+            outputs.get_value("covariance.data.fixed") >> inputs.get_value("cholesky.stat.fixed")
+
+            Cholesky.replicate(name="cholesky.stat.data.fixed")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("cholesky.stat.data.fixed")
+
+            SumMatOrDiag.replicate(name="covariance.covmat_full_p.stat_fixed")
+            outputs.get_value("covariance.data.fixed") >> nodes.get_value("covariance.covmat_full_p.stat_fixed")
+            outputs.get_value("covariance.covmat_syst.sum") >> nodes.get_value("covariance.covmat_full_p.stat_fixed")
+
+            Cholesky.replicate(name="cholesky.covmat_full_p.stat_fixed")
+            outputs.get_value("covariance.covmat_full_p.stat_fixed") >> inputs.get_value("cholesky.covmat_full_p.stat_fixed")
+
+            SumMatOrDiag.replicate(name="covariance.covmat_full_p.stat_variable")
+            outputs.get_value("eventscount.final.concatenated.selected") >> nodes.get_value("covariance.covmat_full_p.stat_variable")
+            outputs.get_value("covariance.covmat_syst.sum") >> nodes.get_value("covariance.covmat_full_p.stat_variable")
+
+            Cholesky.replicate(name="cholesky.covmat_full_p.stat_variable")
+            outputs.get_value("covariance.covmat_full_p.stat_variable") >> inputs.get_value("cholesky.covmat_full_p.stat_variable")
+
+            SumMatOrDiag.replicate(name="covariance.covmat_full_n")
+            outputs.get_value("data.pseudo.proxy") >> nodes.get_value("covariance.covmat_full_n")
+            outputs.get_value("covariance.covmat_syst.sum") >> nodes.get_value("covariance.covmat_full_n")
+
+            Cholesky.replicate(name="cholesky.covmat_full_n")
+            outputs.get_value("covariance.covmat_full_n") >> inputs.get_value("cholesky.covmat_full_n")
+
+
+            # (1) chi-squared Pearson stat (fixed Pearson errors)
+            Chi2.replicate(name="statistic.stat.chi2p_iterative")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.stat.chi2p_iterative.theory")
+            outputs.get_value("cholesky.stat.fixed") >> inputs.get_value("statistic.stat.chi2p_iterative.errors")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.stat.chi2p_iterative.data")
+
+            # (2-2) chi-squared Neyman stat
+            Chi2.replicate(name="statistic.stat.chi2n")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.stat.chi2n.theory")
+            outputs.get_value("cholesky.stat.data.fixed") >> inputs.get_value("statistic.stat.chi2n.errors")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.stat.chi2n.data")
+
+            # (2-1)
+            Chi2.replicate(name="statistic.stat.chi2p")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.stat.chi2p.theory")
+            outputs.get_value("cholesky.stat.variable") >> inputs.get_value("statistic.stat.chi2p.errors")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.stat.chi2p.data")
+
+            # (5) chi-squared Pearson syst (fixed Pearson errors)
+            Chi2.replicate(name="statistic.full.chi2p_covmat_fixed")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.full.chi2p_covmat_fixed.data")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.full.chi2p_covmat_fixed.theory")
+            outputs.get_value("cholesky.covmat_full_p.stat_fixed") >> inputs.get_value("statistic.full.chi2p_covmat_fixed.errors")
+
+            # (2-3) chi-squared Neyman syst
+            Chi2.replicate(name="statistic.full.chi2n_covmat")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.full.chi2n_covmat.data")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.full.chi2n_covmat.theory")
+            outputs.get_value("cholesky.covmat_full_n") >> inputs.get_value("statistic.full.chi2n_covmat.errors")
+
+            # (2-4) Pearson variable stat errors
+            Chi2.replicate(name="statistic.full.chi2p_covmat_variable")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.full.chi2p_covmat_variable.data")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.full.chi2p_covmat_variable.theory")
+            outputs.get_value("cholesky.covmat_full_p.stat_variable") >> inputs.get_value("statistic.full.chi2p_covmat_variable.errors")
+
+            CNPStat.replicate(name="statistic.staterr.cnp")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.staterr.cnp.data")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.staterr.cnp.theory")
+
+            # (3) chi-squared CNP stat
+            Chi2.replicate(name="statistic.stat.chi2cnp")
+            outputs.get_value("data.pseudo.proxy") >> inputs.get_value("statistic.stat.chi2cnp.data")
+            outputs.get_value("eventscount.final.concatenated.selected") >> inputs.get_value("statistic.stat.chi2cnp.theory")
+            outputs.get_value("statistic.staterr.cnp") >> inputs.get_value("statistic.stat.chi2cnp.errors")
+
+            # (2) chi-squared Pearson stat + pull (fixed Pearson errors)
+            Sum.replicate(
+                outputs.get_value("statistic.stat.chi2p_iterative"),
+                outputs.get_value("statistic.nuisance.all"),
+                name="statistic.full.chi2p_iterative",
+            )
+            # (4) chi-squared CNP stat + pull (fixed Pearson errors)
+            Sum.replicate(
+                outputs.get_value("statistic.stat.chi2cnp"),
+                outputs.get_value("statistic.nuisance.all"),
+                name="statistic.full.chi2cnp",
+            )
+
+            LogProdDiag.replicate(name="statistic.log_prod_diag")
+            outputs.get_value("cholesky.covmat_full_p.stat_variable") >> inputs.get_value("statistic.log_prod_diag")
+
+            # (7) chi-squared Pearson stat + log|V| (unfixed Pearson errors)
+            Sum.replicate(
+                outputs.get_value("statistic.stat.chi2p"),
+                outputs.get_value("statistic.log_prod_diag"),
+                name="statistic.stat.chi2p_unbiased",
+            )
+
+            # (8) chi-squared Pearson stat + log|V| + pull (unfixed Pearson errors)
+            Sum.replicate(
+                outputs.get_value("statistic.stat.chi2p_unbiased"),
+                outputs.get_value("statistic.nuisance.all"),
+                name="statistic.full.chi2p_unbiased",
+            )
+
+            Product.replicate(
+                parameters.get_value("all.stats.pearson"),
+                outputs.get_value("statistic.full.chi2p_covmat_variable"),
+                name="statistic.helper.pearson",
+            )
+            Product.replicate(
+                parameters.get_value("all.stats.neyman"),
+                outputs.get_value("statistic.full.chi2n_covmat"),
+                name="statistic.helper.neyman",
+            )
+            # (2-4) CNP covmat
+            Sum.replicate(
+                outputs.get_value("statistic.helper.pearson"),
+                outputs.get_value("statistic.helper.neyman"),
+                name="statistic.full.chi2cnp_covmat",
+            )
+            # fmt: on
+
+        self._setup_labels()
+
+        # Ensure stem nodes are calculated
+        self._touch()
+
+    @staticmethod
+    def _create_random_generator(seed: int) -> Generator:
+        from numpy.random import MT19937, SeedSequence
+
+        (sequence,) = SeedSequence(seed).spawn(1)
+        algo = MT19937(seed=sequence.spawn(1)[0])
+        return Generator(algo)
+
+    def _touch(self):
+        for output in (
+            self.storage["outputs"].get_dict("eventscount.final.detector").walkvalues()
+        ):
+            output.touch()
+
+    def update_frozen_nodes(self):
+        for nodes in self._frozen_nodes.values():
+            for node in nodes:
+                node.unfreeze()
+                node.touch()
+
+    def update_covariance_matrix(self):
+        self._covariance_matrix.update_matrices()
+
+    def set_parameters(
+        self,
+        parameter_values: (
+            Mapping[str, float | str] | Sequence[tuple[str, float | int]]
+        ) = (),
+        *,
+        mode: Literal["value", "normvalue"] = "value",
+    ):
+        parameters_storage = self.storage("parameters.all")
+        if isinstance(parameter_values, Mapping):
+            iterable = parameter_values.items()
+        else:
+            iterable = parameter_values
+
+        match mode:
+            case "value":
+
+                def setter(par, value):
+                    par.push(value)
+                    print(f"Push {parname}={svalue}")
+
+            case "normvalue":
+
+                def setter(par, value):
+                    par.normvalue = value
+                    print(f"Set norm {parname}={svalue}")
+
+            case _:
+                raise ValueError(mode)
+
+        for parname, svalue in iterable:
+            value = float(svalue)
+            par = parameters_storage[parname]
+            setter(par, value)
+
+    def next_sample(
+        self, *, mc_parameters: bool = True, mc_statistics: bool = True
+    ) -> None:
+        if mc_parameters:
+            self.storage.get_value("nodes.mc.parameters.toymc").next_sample()
+            self.storage.get_value("nodes.mc.parameters.inputs").touch()
+
+        if mc_statistics:
+            self.storage.get_value("nodes.data.pseudo.self").next_sample()
+
+        if mc_parameters:
+            self.storage.get_value("nodes.mc.parameters.toymc").reset()
+            self.storage.get_value("nodes.mc.parameters.inputs").touch()
+
+    @staticmethod
+    def systematic_uncertainties_groups() -> dict[str, str]:
+        return dict(_SYSTEMATIC_UNCERTAINTIES_GROUPS)
+
+    def _setup_labels(self):
+        labels = LoadYaml(relpath(__file__.replace(".py", "_labels.yaml")))
+
+        processed_keys_set = set()
+        self.storage("nodes").read_labels(labels, processed_keys_set=processed_keys_set)
+        self.storage("outputs").read_labels(
+            labels, processed_keys_set=processed_keys_set
+        )
+        self.storage("inputs").remove_connected_inputs()
+        self.storage.read_paths(index=self.index)
+        self.graph.build_index_dict(self.index)
+
+        labels_mk = NestedMKDict(labels, sep=".")
+        if not self._strict:
+            return
+
+        for key in processed_keys_set:
+            labels_mk.delete_with_parents(key)
+
+        if not labels_mk:
+            return
+
+        unused_keys = list(labels_mk.walkjoinedkeys())
+        may_ignore = {} # keep it for future uses
+        for key_may_ignore in may_ignore:
+            for i, key_unused in reversed(tuple(enumerate(unused_keys))):
+                if key_unused.startswith(key_may_ignore):
+                    del unused_keys[i]
+
+        if not unused_keys:
+            return
+
+        raise RuntimeError(
+            f"The following label groups were not used: {', '.join(unused_keys)}"
+        )
+
+    def make_summary_table(
+        self, period: Literal["total", "6AD", "8AD", "7AD"] = "total"
+    ) -> DataFrame:
+        match period:
+            case "total":
+                source_fmt = f"summary.{period}.{{name}}"
+            case "6AD" | "8AD" | "7AD":
+                source_fmt = f"summary.periods.{{name}}.{period}"
+            case _:
+                raise ValueError(period)
+
+        columns_sources = {
+            # "count_ibd_candidates": "",
+            "daq_time_day": source_fmt.format(name="livetime"),
+            "eff": source_fmt.format(name="eff"),
+            "rate_acc": source_fmt.format(name="bkg_rate.acc"),
+            "rate_fastn": source_fmt.format(name="bkg_rate.fastn"),
+            "rate_muonx": source_fmt.format(name="bkg_rate.muonx"),
+            "rate_fastn_muonx": source_fmt.format(name="bkg_rate_fastn_muonx"),
+            "rate_lihe": source_fmt.format(name="bkg_rate.lihe"),
+            "rate_amc": source_fmt.format(name="bkg_rate.amc"),
+            "rate_alphan": source_fmt.format(name="bkg_rate.alphan"),
+            "rate_bkg_total": source_fmt.format(name="bkg_rate_total"),
+            # "rate_nu": ""
+        }
+
+        columns = ["name"] + list(self.index["detector"])
+        df = DataFrame(columns=columns, index=range(len(columns_sources)), dtype="f8")
+        df = df.astype({"name": str})
+
+        for i, (key, path) in enumerate(columns_sources.items()):
+            try:
+                source = self.storage["outputs"].get_dict(path)
+            except KeyError:
+                continue
+            for k, output in source.walkitems():
+                value = output.data[0]
+
+                df.loc[i, k] = value
+                df.loc[i, "name"] = key
+        df[df.isna()] = 0.0
+        df = df.astype({"name": "S"})
+
+        return df
+
+    def print_summary_table(self):
+        df = self.make_summary_table()
+        print(df.to_string())
diff --git a/models/dayabay_v0e_labels.yaml b/models/dayabay_v0e_labels.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e912a2e56d6af1b5c5b77ddb54925b2f8e9701bb
--- /dev/null
+++ b/models/dayabay_v0e_labels.yaml
@@ -0,0 +1,726 @@
+edges:
+  costheta:
+    text: "Positron angle cos胃"
+    latex: "Positron angle $\\cos\\theta$"
+    axis: "$\\cos\\theta$"
+    plotmethod: 'none'
+  energy_common:
+    text: "Common energy (E) bin edges [MeV]"
+    axis: "$E$ [MeV]"
+    plotmethod: 'none'
+  energy_final:
+    text: "Final reconstructed energy (Erec) bin edges [MeV]"
+    axis: "$E_{\\rm rec}$ [MeV]"
+    plotmethod: 'none'
+  energy_enu:
+    text: "Neutrino energy (E谓) bin edges [MeV]"
+    latex: "Neutrino energy $E_{\\nu}$ bin edges [MeV]"
+    axis: "$E_{\\nu}$ [MeV]"
+    plotmethod: 'none'
+  energy_edep:
+    text: "Deposited energy (Edep) bin edges [MeV]"
+    latex: "Deposited energy $E_{\\rm dep}$ bin edges [MeV]"
+    axis: "$E_{\\rm dep}$ [MeV]"
+    plotmethod: 'none'
+  energy_escint:
+    text: "Deposited in scintillator energy (Escint) bin edges [MeV]"
+    latex: "Deposited in scintillator energy $E_{\\rm scint}$ bin edges [MeV]"
+    axis: "$E_{\\rm scint}$ [MeV]"
+    plotmethod: 'none'
+  energy_evis:
+    text: "Visible energy (Evis) bin edges [MeV]"
+    latex: "Visible energy $E_{\\rm vis}$ bin edges [MeV]"
+    axis: "$E_{\\rm vis}$ [MeV]"
+    plotmethod: 'none'
+  energy_erec:
+    text: "Reconstructed energy (Erec) bin edges [MeV]"
+    latex: "Reconstructed energy $E_{\\rm rec}$ bin edges [MeV]"
+    axis: "$E_{\\rm rec}$ [MeV]"
+    plotmethod: 'none'
+kinematics:
+  integration:
+    orders_edep:
+      text: "Evis integration orders (precision)"
+      latex: "$E_{\\rm vis}$ integration orders (precision)"
+      axis: "order"
+      plotmethod: 'none'
+    orders_costheta:
+      text: "cos胃 integration orders (precision)"
+      latex: "$\\cos\\theta$ integration orders (precision)"
+      axis: "order"
+      plotmethod: 'none'
+  sampler:
+    node:
+      text: "dE, dcos胃 integration sampler"
+    mesh_edep:
+      text: "Deposinted energy integration mesh"
+      axis: "$E_{\\rm dep}$ [MeV]"
+      plotmethod: 'none'
+    mesh_costheta:
+      text: "cos胃 integration mesh"
+      axis: "$\\cos\\theta$"
+      plotmethod: 'none'
+  ibd:
+    crosssection:
+      text:  "IBD cross section 蟽(E谓,cos胃) [cm鈦宦�/proton]"
+      plottitle: "IBD cross section $\\sigma(E_{\\nu}, \\cos\\theta)$"
+      latex: "IBD cross section $\\sigma(E_{\\nu}, \\cos\\theta)$ [cm$^{-2}$]"
+      axis:  "$\\sigma(E_{\\nu}, \\cos\\theta)$ [cm$^{-2}$]"
+    enu:
+      text: "E谓 mesh [MeV]\\nfor integration"
+      latex: "$E_{\\nu}$ mesh [MeV]"
+      axis: "$E_{\\nu}$ [MeV]"
+    jacobian:
+      text: "dE谓/dEdep Jacobian"
+      axis: "$dE_{\\nu}/dE_{\\rm dep}$"
+      latex: "$dE_{\\nu}/dE_{\\rm dep}$ Jacobian"
+    crosssection_jacobian:
+      text: "Integrable 蟽(E谓,cos胃)脳J(E谓,cos胃) [cm鈦宦�/proton]"
+    crosssection_jacobian_oscillations:
+      group:
+        text: "Integrable 蟽(E谓,cos胃)脳J(E谓,cos胃)脳Psur(E谓) {key} [cm鈦宦�/proton]"
+  neutrino_cm2_per_MeV_per_fission_per_proton:
+    part:
+      nu_main:
+        group:
+          text: "Partial differential IBD rate for reator (main) for {key} [#谓路cm虏/MeV/fission/proton]"
+      nu_neq:
+        group:
+          text: "Partial differential IBD rate for reactor (non-equilibrium) for {key} [#谓路cm虏/MeV/fission/proton]"
+      nu_snf:
+        group:
+          text: "Partial differential IBD rate for spent nuclear fuel for {key} [#谓路cm虏/MeV/s/proton]"
+  integral:
+    nu_main:
+      group:
+        text: "Partial integrated rate of IBD events from reactor (main) for {key} [#谓路cm虏/fission/proton]\\nintegrated"
+        axis: "IBD flux [cm虏/fission/proton]"
+    nu_neq:
+      group:
+        text: "Partial integrated rate of IBD events from reactor (non-equilibrium) for {key} [#谓路cm虏/fission/proton]\\nintegrated"
+        axis: "IBD flux [cm虏/fission/proton]"
+    nu_snf:
+      group:
+        text: "Partial integrated rate of IBD events from spent nuclear fuel for {key} [#谓路cm虏/s/proton]\\nintegrated"
+        axis: "IBD flux [cm虏/proton]"
+daily_data:
+  detector:
+    eff:
+      group:
+        text: "Daily detector efficiency {key}"
+    efflivetime:
+      group:
+        text: "Daily detector effective livetime {key} [s]"
+    livetime:
+      group:
+        text: "Daily detector wall clock livetime {key} [s]"
+    rate_acc:
+      group:
+        text: "Daily rate of accidental background events [#/day]"
+    num_acc_s_day:
+      group:
+        text: "Daily # of accidental background events 脳 day/s [#]"
+  reactor:
+    power:
+      group:
+        text: "Daily fractional reactor power for {key}"
+    fission_fraction:
+      group:
+        text: "Daily fission fraction for {key}"
+    fission_fraction_scaled:
+      group:
+        text: "Corrected (fit) daily fission fraction for {key}"
+reactor:
+  energy_per_fission_weighted_MeV:
+    group:
+      text: "Partial energy per fission for {key} [MeV/fission]\\nweighted with fission fraction"
+  energy_per_fission_average_MeV:
+    group:
+      text: "Average energy per fission in {index[0]} during {index[1]} [MeV/fission]"
+  thermal_power_nominal_MeVs:
+    group:
+      text: "Reactor {key} thermal power (fit) Wth [MeV/s]"
+  thermal_power_nominal_MeVs_central:
+    group:
+      text: "Reactor {key} fixed (central) thermal power Wth [MeV/s]"
+  thermal_power_isotope_MeV_per_second:
+    group:
+      text: "Fractional thermal power for {index[1]} in {index[0]} during {index[2]} [MeV/s]"
+  fissions_per_second:
+    group:
+      text: "Fissions per second of {index[1]} in {index[0]} during {index[2]} [#fissions/s]"
+  energy_per_fission_snf_weighted_MeV:
+    group:
+      text: "Partial energy per fission for {key} [MeV/fission]\\nfor spent nuclear fuel, weighted with fission fraction"
+  energy_per_fission_snf_average_MeV:
+    text: "Average energy per fission for spent nuclear fuel [MeV/fission]"
+  fissions_per_second_snf:
+    group: 
+      text: "# of fissions per second for {key} [#fissions/s]\\nfor spent nuclear fuel"
+  thermal_power_snf_isotope_MeV_per_second:
+    group:
+      text: "Fractional thermal power for {index[1]} in {index[0]} [MeV/s]\\nreference for spent nuclear fuel"
+eventscount:
+  parts:
+    nu_main:
+      group:
+        text: "Partial # of IBD events for {key} [#谓]\\nfrom reactor (main)"
+        axis: "# of IBD events"
+    nu_neq:
+      group:
+        text: "Partial # of IBD events for {key} [#谓]\\nfrom non-equilibrium"
+        axis: "# of IBD events"
+    nu_snf:
+      group:
+        text: "Partial # of IBD events for {key} [#谓]\\nfrom spent nuclear fuel"
+        axis: "# of IBD events"
+  raw:
+    group:
+      text: "# of IBD events in {index[0]} during {index[1]} [#谓]\\nbefore detector effects"
+      axis: "# of IBD events"
+  iav:
+    group:
+      text: "# of IBD events (IAV) in {index[0]} during {index[1]} [#谓]"
+      axis: "# of IBD events"
+  evis:
+    group:
+      text: "# of IBD events (IAV, LSNL) in {index[0]} during {index[1]} [#谓]"
+      axis: "# of IBD events"
+  erec:
+    group:
+      text: "# of IBD events (IAV, LSNL, Erec) in {index[0]} during {index[1]} [#谓]"
+      axis: "# of IBD events"
+  fine:
+    ibd_normalized:
+      group:
+        text: "Scaled (fit) # of IBD events at {index[0]} during {index[1]} [#谓]\\nfine binning, global normalization and relative efficiency applied"
+    ibd_normalized_detector:
+      group:
+        text: "Scaled (fit) # of IBD events at {index[0]} [#谓]\\nfine binning, global normalization and relative efficiency applied"
+    bkg:
+      group:
+        text: "Total # of background events at {index[0]} during {index[1]} [#]\\nfine binning"
+    total:
+      group:
+        text: "# of IBD candidates at {index[0]} during {index[1]} [#]\\nfine binning"
+  final:
+    ibd:
+      group:
+        text: "# of IBD events at {index[0]} during {index[1]} [#]\\nfinal binning, global normalization and relative efficiency applied"
+    bkg:
+      group:
+        text: "Total # of background events at {index[0]} during {index[1]} [#]\\nfinal binning"
+    detector_period:
+      group:
+        text: "# of IBD candidates at {index[0]} during {index[1]} [#谓]\\nfinal binning"
+    detector:
+      group:
+        text: "# of IBD candidates at {index[0]} [#谓]\\nfinal binning"
+    concatenated:
+      detector_period:
+        text: "# of IBD candidates in all detectors and all periods [#谓]\\nconcatenated"
+      detector:
+        text: "# of IBD candidates in all detectors [#谓]\\nconcatenated"
+bkg:
+  erec:
+    text: "Reconstructed energy (Erec) bin edges [MeV]\\nfor background events"
+    latex: "Reconstructed energy $E_{\\rm rec}$ bin edges [MeV] (for background events)"
+    axis: "$E_{\\rm rec}$ [MeV]"
+    plotmethod: 'none'
+  count_acc_fixed_s_day:
+    group:
+      text: "# of accidental background events in {key} 脳 day/s [#]"
+  count_fixed:
+    acc: 
+      group:
+        text: "# of accidental background events in {index[0]} during {index[1]} [#]\\nbefore nuisance"
+    alphan: 
+      group:
+        text: "# of 鹿鲁C(伪,n)鹿鈦禣 background events in {index[0]} during {index[1]} [#]\\nbefore nuisance"
+    amc: 
+      group:
+        text: "# of 虏鈦绰笰m鹿鲁C related background events in {index[0]} during {index[1]} [#]\\nbefore nuisance"
+    fastn: 
+      group:
+        text: "# of fast neutron background events in {index[0]} during {index[1]} [#]\\nbefore nuisance"
+    lihe: 
+      group:
+        text: "# of 鈦筁i/鈦窰e background events in {index[0]} during {index[1]} [#]\\nbefore nuisance"
+    muonx: 
+      group:
+        text: "# of 渭 decay background events in {index[0]} during {index[1]} [#]\\nbefore nuisance"
+  count:
+    acc: 
+      group:
+        text: "# of accidental background events in {index[0]} during {index[1]} [#]"
+    alphan: 
+      group:
+        text: "# of 鹿鲁C(伪,n)鹿鈦禣 background events in {index[0]} during {index[1]} [#]"
+    amc: 
+      group:
+        text: "# of 虏鈦绰笰m鹿鲁C related background events in {index[0]} during {index[1]} [#]"
+    fastn: 
+      group:
+        text: "# of fast neutron background events in {index[0]} during {index[1]} [#]"
+    lihe: 
+      group:
+        text: "# of 鈦筁i/鈦窰e background events in {index[0]} during {index[1]} [#]"
+    muonx: 
+      group:
+        text: "# of 渭 decay background events in {index[0]} during {index[1]} [#]"
+  spectrum:
+    acc: 
+      group:
+        text: "# of accidental background events in {index[0]} during {index[1]} [#]"
+    alphan: 
+      group:
+        text: "# of 鹿鲁C(伪,n)鹿鈦禣 background events in {index[0]} during {index[1]} [#]"
+    amc: 
+      group:
+        text: "# of 虏鈦绰笰m鹿鲁C related background events in {index[0]} during {index[1]} [#]"
+    fastn: 
+      group:
+        text: "# of fast neutron background events in {index[0]} during {index[1]} [#]"
+    lihe: 
+      group:
+        text: "# of 鈦筁i/鈦窰e background events in {index[0]} during {index[1]} [#]"
+    muonx: 
+      group:
+        text: "# of 渭 decay background events in {index[0]} during {index[1]} [#]"
+  spectrum_shape:
+    acc: 
+      group:
+        text: "Shape of spectrum of accidental background events in {key}"
+    alphan: 
+      group:
+        text: "Shape of spectrum of 鹿鲁C(伪,n)鹿鈦禣 background events in {key}"
+    amc: 
+      group:
+        text: "Shape of spectrum of 虏鈦绰笰m鹿鲁C related background events in {key}"
+    fastn: 
+      group:
+        text: "Shape of spectrum of fast neutron background events in {key}"
+    lihe: 
+      group:
+        text: "Shape of spectrum of 鈦筁i/鈦窰e background events in {key}"
+    muonx: 
+      group:
+        text: "Shape of spectrum of 渭 decay background events in {key}"
+oscprob:
+  group:
+    text: "谓虆(e) survival probability {key}"
+    latex: "$\\bar{{\\nu}}_e$ survival probability {key}"
+    axis: "$P_{{\\rm ee}}$"
+    plotmethod: "slicesy"
+reactor_anue:
+  neutrino_per_fission_per_MeV_nominal:
+    group:
+      text: "Nominal 谓虆 spectrum {key} [#谓/fission/MeV]\\ninterpolated"
+  neutrino_per_fission_per_MeV_input:
+    spec:
+      group:
+        text: "Input 谓虆 spectrum for {key} [#谓/fission/MeV]"
+    enu:
+        text: "Input 谓虆 spectrum neutrino energy (E谓) [MeV]"
+        axis: '$E_{\nu}$ [MeV]'
+  part:
+    neutrino_per_fission_per_MeV_main:
+      group:
+        text: "谓虆 spectrum (main) for {key} [#谓/fission/MeV]\\nfit free and constrained corrections"
+    neutrino_per_fission_per_MeV_neq_nominal:
+      group:
+        text: "Nominal 谓虆 spectrum for non-equilibrium for {key} [#谓/fission/MeV]"
+  spec_indexer:
+    text: "Lookup segment index\\nfor interpolation of input 谓虆 spectrum"
+  spectrum_free_correction:
+    spec_model_edges:
+      text: "E谓 segment edges [MeV]\\nfor parametrization of reactor 谓虆 spectrum shape"
+      axis: '$E_{\nu}$ [MeV]'
+    input:
+      text: "Free correction (parameters) for 谓虆 spectrum shape\\nat input E谓"
+    correction:
+      text: "Free correction weights for 谓虆 spectrum shape\\nat input E谓"
+    interpolated:
+      text: "Free correction weights for 谓虆 spectrum shape\\ninterpolated"
+    indexer:
+      text: "Lookup segment index\\nfor interpolation of free 谓虆 spectrum shape"
+  spectrum_uncertainty:
+    enu_centers:
+      text: "E谓 segment centers [MeV]\\nfor parametrization of uncertainties of 谓虆 spectrum shape"
+      axis: '$E_{\nu}$ [MeV]'
+    correction_index:
+      text: "Lookup segment index\\nfor interpolation of uncertainties of 谓虆 spectrum shape"
+    correction_interpolated:
+      group:
+        text: "Constrained correction factor for 谓虆 spectrum shape of {key}\\ncorrelated 脳 uncorrelated, fit, interpolated"
+    uncertainty:
+      corr:
+        group:
+          text: "Correlated 谓虆 spectrum shape uncertainty for {key}\\nat input E谓"
+      uncorr:
+        group:
+          text: "Uncorrelated 谓虆 spectrum shape uncertainty for {key}\\nat input E谓"
+    scale:
+      uncorr:
+        group:
+          text: "Constrained correction to 谓虆 spectrum shape for {key}\\nnormal unit, uncorrelated, fit, at input E谓"
+    correction:
+      corr:
+        group:
+          text: "Constrained correction to 谓虆 spectrum shape for {key}\\ncorrelated, fit, at input E谓"
+      uncorr:
+        group:
+          text: "Constrained correction to 谓虆 spectrum shape for {key}\\nuncorrelated, fit, at input E谓"
+      corr_factor:
+        group:
+          text: "Constrained correction factor to 谓虆 spectrum shape for {key}\\ncorrelated, fit, at input E谓"
+      uncorr_factor:
+        group:
+          text: "Constrained correction factor to 谓虆 spectrum shape for {key}\\nuncorrelated, fit, at input E谓"
+      full:
+        group:
+          text: "Constrained correction factor to 谓虆 spectrum shape for {key}\\ncorrelated脳uncorrelated, fit, at input E谓"
+reactor_nonequilibrium_anue:
+  correction_input:
+    enu:
+      text: "E谓 segment centers [MeV]\\nnon-equilibrium 谓虆 spectrum shape correction"
+      axis: '$E_{\nu}$ [MeV]'
+    nonequilibrium_correction:
+      group:
+        text: "Correction to 谓虆 spectrum due to non-equilibrium correction to {key}\\nat input E谓"
+  correction_indexer:
+    text: "Lookup segment index\\nfor interpolation of NEQ 谓虆 spectrum shape correction"
+  correction_interpolated:
+    group:
+      text: "Correction to 谓虆 spectrum due to non-equilibrium correction to {key}\\ninterpolated"
+snf_anue:
+  correction_input:
+    enu:
+      text: "E谓 segment centers [MeV]\\nspent nuclear fuel 谓虆 spectrum shape correction"
+      axis: '$E_{\nu}$ [MeV]'
+    snf_correction:
+      group:
+        text: "Correction to 谓虆 spectrum due to spent nuclear fuel from {key}\\nat input E谓"
+  correction_indexer:
+    text: "Lookup segment index\\nfor interpolation of SNF 谓虆 spectrum shape correction"
+  correction_interpolated:
+    group:
+      text: "Correction to 谓虆 spectrum due to spent nuclear fuel from {key}\\ninterpolated"
+  neutrino_per_second_isotope:
+    group:
+      text: "Partial rate of 谓虆 from for {key} [#谓/s/MeV]\\nfor spent nuclear fuel "
+  neutrino_per_second:
+    group:
+      text: "Rate of 谓虆 for {key} [#谓/s/MeV]\\nfor spent nuclear fuel"
+  neutrino_per_second_snf:
+    group:
+      text: "Rate of 谓虆 from spent nuclear fuel for {key} [#谓/s/MeV]\\n interpolated"
+reactor_detector:
+  nfissions_daily:
+    group:
+      text: "Daily observable # of fissions for {key} [#fissions]"
+  nfissions:
+    group:
+      text: "Observable # of fissions of {index[1]} in {index[0]} in {index[2]} during {index[3]} [#fissions]"
+  nfissions_nprotons_per_cm2:
+    group:
+      text: "#fissions 脳 #target protons 脳 baseline factor {key} [#fissions路#protons/cm虏]\\nfor main IBD flux"
+  nfissions_nprotons_per_cm2_neq:
+    group:
+      text: "Scaled (fit) #fissions 脳 #target protons 脳 baseline factor {key} [#fissions路#protons/cm虏]\\nfor non-equilibrium IBD flux"
+  livetime_nprotons_per_cm2_snf:
+    group:
+      text: "Scaled (fit) #target protons 脳鈥痚ffective livetime 脳 baseline factor {key} [#protons路s/cm虏]\\nfor spent nuclear fuel IBD flux"
+  baseline_factor_per_cm2:
+    group:
+      text: "1/(4蟺L虏) factor {key} [cm鈦宦瞉"
+      latex: "$1/\\left(4\\pi^2\\right)$ factor {key} [cm$^{-2}$]"
+detector:
+  nprotons:
+    group:
+      text: "Actual # of target protons in {key} [#protons]"
+  normalization:
+    group:
+      text: "Detector normalization for {key}"
+  livetime:
+    group:
+      text: "Wall clock detector livetime {key} [s]"
+  efflivetime:
+    group:
+      text: "Effective detector livetime {key} [s]"
+  efflivetime_days:
+    group:
+      text: "Effective detector livetime {key} [day]"
+  iav:
+    matrix_raw:
+      text: "IAV matrix (MC)"
+    matrix_rescaled:
+      group:
+        text: "IAV matrix with adjusted (fit) offdiagonal elements at {key}"
+  lsnl:
+    curves:
+      edep:
+        text: "LSNL input coarse deposited energy for Evis(Edep) [MeV]"
+      evis_parts:
+        group:
+          text: "Input coarse Evis岬�(Edep) {key} curve for LSNL [MeV]"
+      evis_parts_scaled:
+        group:
+          text: "Input coarse Evis岬�(Edep) {key} curve for LSNL [MeV]\\nscaled (fit relative energy scale)"
+      evis_coarse:
+        text: "Input coarse Evis(Edep) for LSNL [MeV]"
+      evis_coarse_monotonous:
+        text: "Input coarse LSNL Evis(Edep) [MeV]\\nensured to be monotonous"
+      evis_coarse_monotonous_scaled:
+        group:
+          text: "Input coarse LSNL Evis(Edep) for {key} [MeV]\\nensured to be monotonous and scaled (fit relative energy scale)"
+      evis:
+        group:
+          text: "Visible energy (LSNL脳relative energy scale) for {key} [MeV]"
+    indexer_fwd:
+      text: "Segment lookup for Evis(Edep)\\ninterpolation for LSNL correction"
+    interpolated_fwd:
+      text: "Interpolated Evis(Edep) for LSNL correction [MeV]"
+    indexer_bwd:
+      group:
+        text: "Segment lookup for Edep(Evis) for {key}\\ninverse interpolation for LSNL correction"
+    interpolated_bwd:
+      group:
+        text: "Interpolated Edep(Evis) for LSNL correction for {key} [MeV]"
+    matrix:
+      group:
+        text: "Energy scale correction matrix for {key}\\nLSNL脳relative energy scale"
+  eres:
+    e_bincenter:
+      text: "Bin centers for visible energy (Evis) [MeV]"
+    sigma_rel:
+      text: "Relative energy resolution width 蟽(Evis)/Evis"
+    matrix:
+      text: "Energy resolution matrix"
+  rebin:
+    matrix_ibd:
+      text: "Rebinning matrix for IBD events"
+    matrix_bkg:
+      text: "Rebinning matrix for background events"
+mc:
+  parameters:
+    toymc: 
+      text: "MC values for nuisance parameters (nomal unit)"
+    inputs: 
+      text: "Node to set nuisance parameters values"
+data:
+  pseudo:
+    self:
+      text: "Pseudo data (this model) node for likelihood"
+    proxy:
+      text: "Switcher of the data source for likelihood"
+covariance:
+  jacobians:
+    oscprob:
+      text: "Jacobian for constrained neutrino oscillation parameters\\n螖m虏鈧傗倎 and sin虏2胃鈧佲倐"
+    eres:
+      text: "Jacobian for energy resolution parameters"
+    lsnl:
+      text: "Jacobian for Liquid Scintillator Non-Linearity (LSNL) parameters"
+    iav:
+      text: "Jacobian for Inner Acrylic Vessel (IAV) parameters"
+    detector_relative:
+      text: "Jacobian for relative detector efficiency and energy scale parameters"
+    energy_per_fission:
+      text: "Jacobian for average fission energy parameters"
+    nominal_thermal_power:
+      text: "Jacobian for reactor thermal power parameters"
+    snf:
+      text: "Jacobian for spent nuclear fuel parameters"
+    neq:
+      text: "Jacobian for non-equilibrium correction parameters"
+    fission_fraction:
+      text: "Jacobian for fission fraction parameters"
+    bkg_rate:
+      text: "Jacobian for background rate parameters"
+    hm_corr:
+      text: "Jacobian for correlated 谓虆 spectrum parameters"
+    hm_uncorr:
+      text: "Jacobian for uncorrelated 谓虆 spectrum parameters"
+  covmat_syst:
+    oscprob:
+      text: "Systematic covariance matrix for constrained neutrino oscillation parameters\\n螖m虏鈧傗倎 and sin虏2胃鈧佲倐"
+    eres:
+      text: "Systematic covariance matrix for energy resolution parameters"
+    lsnl:
+      text: "Systematic covariance matrix for Liquid Scintillator Non-Linearity (LSNL) parameters"
+    iav:
+      text: "Systematic covariance matrix for Inner Acrylic Vessel (IAV) parameters"
+    detector_relative:
+      text: "Systematic covariance matrix for relative detector efficiency and energy scale parameters"
+    energy_per_fission:
+      text: "Systematic covariance matrix for average fission energy parameters"
+    nominal_thermal_power:
+      text: "Systematic covariance matrix for reactor thermal power parameters"
+    snf:
+      text: "Systematic covariance matrix for spent nuclear fuel parameters"
+    neq:
+      text: "Systematic covariance matrix for non-equilibrium correction parameters"
+    fission_fraction:
+      text: "Systematic covariance matrix for fission fraction parameters"
+    bkg_rate:
+      text: "Systematic covariance matrix for background rate parameters"
+    hm_corr:
+      text: "Systematic covariance matrix for correlated 谓虆 spectrum parameters"
+    hm_uncorr:
+      text: "Systematic covariance matrix for uncorrelated 谓虆 spectrum parameters"
+    sum:
+      text: "Systematic covariance matrix"
+  data:
+    fixed:
+      text: "Statistical variance (fixed) for covariance matrix calculation"
+  covmat_full_p:
+    stat_fixed:
+      text: "Full covariance matrix\\nstat+syst, fixed Pearson's statistical uncertainties"
+    stat_variable:
+      text: "Full covariance matrix\\nstat+syst, variable Pearson's statistical uncertainties"
+  covmat_full_n:
+    text: "Full covariance matrix\\nstat+syst, Neyman's statistical uncertainties"
+cholesky:
+  stat:
+    variable:
+      text: "Pearson's statistical uncertainty, variable"
+    fixed:
+      text: "Pearson's statistical uncertainty, fixed"
+    data:
+      fixed:
+        text: "Neyman's statistical uncertainty"
+  covmat_full_p:
+    stat_fixed:
+      text: "Cholesky decomposition of full covariance matrix\\nstat+syst, fixed Pearson's statistical uncertainties"
+    stat_variable:
+      text: "Cholesky decomposition of full covariance matrix\\nstat+syst, variable Pearson's statistical uncertainties"
+  covmat_full_n:
+    text: "Cholesky decomposition of full covariance matrix\\nstat+syst, Neyman's statistical uncertainties"
+statistic:
+  staterr:
+    cnp:
+      text: "CNP statistical uncertainty (variable)"
+  log_prod_diag:
+    text: "2ln\\|V\\| unbiasing term for 蠂虏"
+  helper:
+    neyman:
+      text: "Neyman's contribution to CNP聽蠂虏\\nstat+syst, covariance matrix"
+    pearson:
+      text: "Pearson's contribution to CNP聽蠂虏\\nstat+syst, covariance matrix"
+  stat:
+    chi2cnp:
+      text: "Combined Neyman-Pearson's 蠂虏 with statistical uncertainties"
+    chi2p:
+      text: "Pearson's 蠂虏 with statistical uncertainties\\nvariable stat uncertainties"
+    chi2p_unbiased:
+      text: "Unbiased Pearson's 蠂虏 with statistical uncertainties\\nvariable stat uncertainties"
+    chi2p_iterative:
+      text: "Pearson's 蠂虏 with statistical uncertainties\\nfixed stat uncertainties"
+    chi2n:
+      text: "Neyman's 蠂虏 with statistical uncertainties"
+  full:
+    chi2cnp:
+      text: "Combined Neyman-Pearson's 蠂虏 with statistical and systematic uncertainties\\nnuisance terms"
+    chi2cnp_covmat:
+      text: "Combined Neyman-Pearson's 蠂虏 with statistical and systematic uncertainties\\ncovariance matrix"
+    chi2p_unbiased:
+      text: "Unbiased Pearson's 蠂虏 with statistical and systematic uncertainties\\nvariable stat uncertainties, nuisance terms"
+    chi2p_covmat_fixed:
+      text: "Pearson's 蠂虏 with statistical and systematic uncertainties\\nfixed stat uncertainties, covariance matrix"
+    chi2p_covmat_variable:
+      text: "Pearson's 蠂虏 with statistical and systematic uncertainties\\nvariable stat uncertainties, covariance matrix"
+    chi2p_iterative:
+      text: "Pearson's 蠂虏 with statistical and systematic uncertainties\\nfixed stat uncertainties"
+    chi2n_covmat:
+      text: "Neyman's 蠂虏 with statistical and systematic uncertainties\\ncovariance matrix"
+  nuisance:
+    all:
+      text: "蠂虏 nuisance term"
+    parts:
+      detector:
+        detector_relative:
+          text: "蠂虏 nuisance term for relative detector efficiency and energy scale parameters"
+        eres:
+          text: "蠂虏 nuisance term for energy resolution parameters"
+        iav_offdiag_scale_factor:
+          text: "蠂虏 nuisance term for Inner Acrylic Vessel (IAV) parameters"
+        lsnl_scale_a:
+          text: "蠂虏 nuisance term for Liquid Scintillator Non-Linearity (LSNL) parameters"
+      reactor:
+        fission_fraction_scale:
+          text: "蠂虏 nuisance term for fission fraction uncertainty"
+        energy_per_fission:
+          text: "蠂虏 nuisance term for average fission energy parameters"
+        nominal_thermal_power:
+          text: "蠂虏 nuisance term for reactor thermal power parameters"
+        snf_scale:
+          text: "蠂虏 nuisance term for spent nuclear fuel parameters"
+        nonequilibrium_scale:
+          text: "蠂虏 nuisance term for non-equilibrium correction parameters"
+      reactor_anue:
+        spectrum_uncertainty:
+          corr:
+            text: "蠂虏 nuisance term for correlated 谓虆 spectrum parameters"
+          uncorr:
+            group:
+              text: "蠂虏 nuisance term for uncorrelated 谓虆 spectrum parameters for {key}"
+      oscprob:
+        text: "蠂虏 nuisance term for constrained neutrino oscillation parameters\\n螖m虏鈧傗倎 and sin虏2胃鈧佲倐"
+      bkg:
+        rate:
+          alphan:
+            text: "蠂虏 nuisance term for parameters for background rate\\n鹿鲁C(伪,n)鹿鈦禣"
+        rate_scale:
+          acc:
+            text: "蠂虏 nuisance term for parameters for background rate\\naccidentals"
+        uncertainty_scale:
+          amc:
+            text: "蠂虏 nuisance term for parameters for background rate\\n虏鈦绰笰m鹿鲁C"
+        uncertainty_scale_by_site:
+          fastn:
+            text: "蠂虏 nuisance term for parameters for background rate\\nfast neutrons"
+          lihe:
+            text: "蠂虏 nuisance term for parameters for background rate\\n鈦筁i/鈦窰e"
+          muonx:
+            text: "蠂虏 nuisance term for parameters for background rate\\n渭 decay"
+summary:
+  total:
+    bkg_count:
+      group:
+        text: "Total # of {index[0]} background events in {index[1]}"
+    bkg_rate_s:
+      group:
+        text: "Total rate of {index[0]} background events in {index[1]}"
+    bkg_rate:
+      group:
+        text: "Total rate {index[0]} background events in {index[1]}"
+    eff:
+      group:
+        text: "Total detector {index[0]} efficiency"
+    efflivetime:
+      group:
+        text: "Total detector {index[0]} effective livetime [s]"
+    livetime:
+      group:
+        text: "Total detector {index[0]} wall clock livetime [s]"
+  periods:
+    bkg_count:
+      group:
+        text: "Total # of {index[0]} background events in {index[2]} during {index[1]}"
+    bkg_rate_s:
+      group:
+        text: "Total rate of {index[0]} background events in {index[2]} during {index[1]}"
+    bkg_rate:
+      group:
+        text: "Total rate {index[0]} background events in {index[2]} during {index[1]}"
+    eff:
+      group:
+        text: "Total detector {index[1]} efficiency during {index[0]}"
+    efflivetime:
+      group:
+        text: "Total detector {index[1]} effective livetime during {index[0]} [s]"
+    livetime:
+      group:
+        text: "Total detector {index[1]} wall clock livetime during {index[0]} [s]"
+
diff --git a/tests/models/test_dayabay_v0.py b/tests/models/test_dayabay_v0.py
index da4683c5c27af0d2f2792b69a519e5de692a9e93..a02a950870c51a5c2e8b5833ed487de4a535829c 100644
--- a/tests/models/test_dayabay_v0.py
+++ b/tests/models/test_dayabay_v0.py
@@ -7,9 +7,6 @@ from models import available_models, load_model
 
 @mark.parametrize("modelname", available_models())
 def test_dayabay_v0(modelname: str):
-    # TODO: remove when the model is done
-    if modelname=="v0d":
-        return
     model = load_model(modelname, close=True, strict=True)
 
     graph = model.graph