diff --git a/subtrees/dagflow/.gitlab-ci.yml b/subtrees/dagflow/.gitlab-ci.yml
index 9e2d801dcf6157cce63bfc00b250530e70301205..b1e3e2e3c2c47b57ff59cda6ab74ee92639cb673 100644
--- a/subtrees/dagflow/.gitlab-ci.yml
+++ b/subtrees/dagflow/.gitlab-ci.yml
@@ -7,7 +7,7 @@ tests:
 
     script:
     - python3 -m pip install -r requirements.txt
-    - coverage run --source=. --omit=subtrees/* -m pytest
+    - coverage run --source=dagflow --omit=subtrees/* -m pytest
     - coverage report
     - coverage xml
     coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
diff --git a/subtrees/dagflow/dagflow/bundles/load_parameters.py b/subtrees/dagflow/dagflow/bundles/load_parameters.py
index 588d2e5d9fb5e947a53657e9178e2f3b44c84e11..dda5f272a7d712d0bdac89b996f604dadb601246 100644
--- a/subtrees/dagflow/dagflow/bundles/load_parameters.py
+++ b/subtrees/dagflow/dagflow/bundles/load_parameters.py
@@ -143,7 +143,7 @@ def get_label(key: tuple, labelscfg: dict) -> dict:
         except KeyError:
             continue
 
-        sidx = '.'.join(key[-n:])
+        sidx = '.'.join(key[n-1:])
         return {k: v.format(sidx) for k, v in lcfg.items()}
 
     return {}
diff --git a/subtrees/dagflow/dagflow/datadescriptor.py b/subtrees/dagflow/dagflow/datadescriptor.py
index 94cdf5a71d42c52f3c3043a9c4f89037c5dd6d63..582978b3e7e652b5b73706fcbc7f092fb43570bb 100755
--- a/subtrees/dagflow/dagflow/datadescriptor.py
+++ b/subtrees/dagflow/dagflow/datadescriptor.py
@@ -1,7 +1,8 @@
-from typing import Optional, Tuple
+from typing import List, Optional
+
 from numpy.typing import DTypeLike
 
-from .types import ShapeLike, EdgesLike
+from .types import EdgesLike, ShapeLike
 
 
 class DataDescriptor:
@@ -11,22 +12,27 @@ class DataDescriptor:
     """
 
     __slots__ = ("dtype", "shape", "axes_edges", "axes_nodes")
-    dtype: DTypeLike # DTypeLike is already Optional
+    dtype: DTypeLike  # DTypeLike is already Optional
     shape: Optional[ShapeLike]
-    axes_edges: Optional[Tuple[EdgesLike]]
-    axes_nodes: Optional[Tuple[EdgesLike]]
+    axes_edges: Optional[List[EdgesLike]]
+    axes_nodes: Optional[List[EdgesLike]]
 
     def __init__(
         self,
-        dtype: DTypeLike, # DTypeLike is already Optional
+        dtype: DTypeLike,  # DTypeLike is already Optional
         shape: Optional[ShapeLike],
-        axes_edges: Optional[Tuple[EdgesLike]] = None,
-        axes_nodes: Optional[Tuple[EdgesLike]] = None,
+        axes_edges: Optional[List[EdgesLike]] = None,
+        axes_nodes: Optional[List[EdgesLike]] = None,
     ) -> None:
         """
         Sets the attributes
         """
         self.dtype = dtype
         self.shape = shape
-        self.axes_edges = axes_edges
-        self.axes_nodes = axes_nodes
+        self.axes_edges = axes_edges or []
+        self.axes_nodes = axes_nodes or []
+
+    @property
+    def dim(self) -> int:
+        """ Return the dimension of the data """
+        return len(self.shape)
diff --git a/subtrees/dagflow/dagflow/input.py b/subtrees/dagflow/dagflow/input.py
index 29a27e285970e2338f010fa86e4d17f28f8efcdf..a632f05ccfb0271acfbe14d18b37d1ce3029be59 100644
--- a/subtrees/dagflow/dagflow/input.py
+++ b/subtrees/dagflow/dagflow/input.py
@@ -1,19 +1,19 @@
 from typing import Iterator, Optional, Tuple, Union
+
 from numpy import zeros
 from numpy.typing import DTypeLike, NDArray
 
-from dagflow.datadescriptor import DataDescriptor
-
+from .datadescriptor import DataDescriptor
 from .edges import EdgeContainer
 from .exception import (
-    ClosedGraphError,
-    ReconnectionError,
     AllocationError,
+    ClosedGraphError,
     InitializationError,
+    ReconnectionError,
 )
+from .iter import StopNesting
 from .output import Output
 from .shift import lshift
-from .iter import StopNesting
 from .types import EdgesLike, InputT, NodeT, ShapeLike
 
 
@@ -264,7 +264,9 @@ class Input:
                 output=self,
             )
         try:
-            self._own_data = zeros(self.own_dd.shape, self.own_dd.dtype, **kwargs)
+            self._own_data = zeros(
+                self.own_dd.shape, self.own_dd.dtype, **kwargs
+            )
         except Exception as exc:
             raise AllocationError(
                 f"Input: {exc.args[0]}", node=self._node, input=self
diff --git a/subtrees/dagflow/dagflow/lib/Array.py b/subtrees/dagflow/dagflow/lib/Array.py
index da6cdeaa30807834472a4ef1456d28ebde7bb575..4d597d3c40ac8ea18173ffeb8a87b983c9158a16 100644
--- a/subtrees/dagflow/dagflow/lib/Array.py
+++ b/subtrees/dagflow/dagflow/lib/Array.py
@@ -1,11 +1,13 @@
+from typing import Optional, Sequence, Union
+
 from numpy import array
+from numpy.typing import ArrayLike, NDArray
 
+from ..exception import InitializationError
 from ..nodes import FunctionNode
 from ..output import Output
-from ..exception import InitializationError
+from ..typefunctions import check_array_edges_consistency, check_edges_type
 
-from numpy.typing import ArrayLike, NDArray
-from typing import Optional
 
 class Array(FunctionNode):
     """Creates a node with a single data output with predefined array"""
@@ -13,11 +15,17 @@ class Array(FunctionNode):
     _mode: str
     _data: NDArray
     _output = Output
-    def __init__(self, name, arr, *,
-        mode: str="store",
+
+    def __init__(
+        self,
+        name,
+        arr,
+        *,
+        mode: str = "store",
         outname="array",
-        mark: Optional[str]=None,
-        **kwargs
+        mark: Optional[str] = None,
+        edges: Optional[Union[Output, Sequence[Output]]] = None,
+        **kwargs,
     ):
         super().__init__(name, **kwargs)
         self._mode = mode
@@ -25,23 +33,44 @@ class Array(FunctionNode):
             self._mark = mark
         self._data = array(arr, copy=True)
 
-        if mode=='store':
+        if mode == "store":
             self._output = self._add_output(outname, data=self._data)
-        elif mode=='store_weak':
-            self._output = self._add_output(outname, data=self._data, owns_buffer=False)
-        elif mode=='fill':
-            self._output = self._add_output(outname, dtype=self._data.dtype, shape=self._data.shape)
+        elif mode == "store_weak":
+            self._output = self._add_output(
+                outname, data=self._data, owns_buffer=False
+            )
+        elif mode == "fill":
+            self._output = self._add_output(
+                outname, dtype=self._data.dtype, shape=self._data.shape
+            )
         else:
-            raise InitializationError(f'Array: invalid mode "{mode}"', node=self)
+            raise InitializationError(
+                f'Array: invalid mode "{mode}"', node=self
+            )
 
-        self._functions.update({
+        self._functions.update(
+            {
                 "store": self._fcn_store,
                 "store_weak": self._fcn_store,
-                "fill": self._fcn_fill
-                })
+                "fill": self._fcn_fill,
+            }
+        )
         self.fcn = self._functions[self._mode]
 
-        if mode=='store':
+        if edges is not None:
+            if isinstance(edges, Output):
+                self._output.dd.axes_edges.append(edges)
+            else:
+                # assume that the edges are Sequence[Output]
+                try:
+                    self._output.dd.axes_edges.extend(edges)
+                except Exception as exc:
+                    raise InitializationError(
+                        "Array: edges must be `Output` or `Sequence[Output]`, "
+                        f"but given {edges=}, {type(edges)=}"
+                    ) from exc
+
+        if mode == "store":
             self.close()
 
     def _fcn_store(self, *args):
@@ -53,13 +82,14 @@ class Array(FunctionNode):
         return data
 
     def _typefunc(self) -> None:
-        pass
+        check_edges_type(self, slice(None), "array") # checks List[Output]
+        check_array_edges_consistency(self, self._output) # checks dim and N+1 size
 
     def _post_allocate(self) -> None:
-        if self._mode=='fill':
+        if self._mode == "fill":
             return
 
         self._data = self._output._data
 
-    def set(self, data: ArrayLike, check_taint: bool=False) -> bool:
+    def set(self, data: ArrayLike, check_taint: bool = False) -> bool:
         return self._output.set(data, check_taint)
diff --git a/subtrees/dagflow/dagflow/lib/Division.py b/subtrees/dagflow/dagflow/lib/Division.py
index b93aef1359294e9fd692a2a0fea2d05df1364384..8fe9805f960772da6e42f366a1957d437a278ffc 100644
--- a/subtrees/dagflow/dagflow/lib/Division.py
+++ b/subtrees/dagflow/dagflow/lib/Division.py
@@ -1,22 +1,10 @@
 from numpy import copyto
 
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    copy_input_shape_to_output,
-)
+from .NodeManyToOne import NodeManyToOne
 
-class Division(FunctionNode):
+class Division(NodeManyToOne):
     """Division of all the inputs together"""
 
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
     def _fcn(self, _, inputs, outputs):
         out = outputs[0].data
         copyto(out, inputs[0].data.copy())
@@ -24,9 +12,3 @@ class Division(FunctionNode):
             for input in inputs[1:]:
                 out /= input.data
         return out
-
-    def _typefunc(self):
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        copy_input_shape_to_output(self, 0, "result")
-        eval_output_dtype(self, slice(None), "result")
diff --git a/subtrees/dagflow/dagflow/lib/Integrator.py b/subtrees/dagflow/dagflow/lib/Integrator.py
index aa0597af9b9e68ae379457cc3026adfa9e9eb1bb..2a36a29606dbad2f785308400f7d57b28b2eda46 100644
--- a/subtrees/dagflow/dagflow/lib/Integrator.py
+++ b/subtrees/dagflow/dagflow/lib/Integrator.py
@@ -1,40 +1,43 @@
-from typing import Literal
-
 from numba import njit
-from numpy import floating, integer, issubdtype, multiply, zeros
+from numpy import empty, floating, integer, multiply
 from numpy.typing import NDArray
 
-from ..exception import InitializationError, TypeFunctionError
+from ..exception import TypeFunctionError
 from ..input_extra import MissingInputAddEach
 from ..nodes import FunctionNode
 from ..typefunctions import (
     check_has_inputs,
     check_input_dimension,
     check_input_dtype,
+    check_input_edges_dim,
+    check_input_edges_equivalence,
     check_input_shape,
+    check_input_subtype,
+    check_output_subtype,
 )
+from ..types import ShapeLike
 
 
 @njit(cache=True)
-def _integrate1d(data: NDArray, weighted: NDArray, ordersX: NDArray):
+def _integrate1d(result: NDArray, data: NDArray, ordersX: NDArray):
     """
-    Summing up `weighted` within `ordersX` and puts the result into `data`.
+    Summing up `data` within `ordersX` and puts the result into `result`.
     The 1-dimensional version of integration.
     """
     iprev = 0
     for i, order in enumerate(ordersX):
         inext = iprev + order
-        data[i] = weighted[iprev:inext].sum()
+        result[i] = data[iprev:inext].sum()
         iprev = inext
 
 
 @njit(cache=True)
 def _integrate2d(
-    data: NDArray, weighted: NDArray, ordersX: NDArray, ordersY: NDArray
+    result: NDArray, data: NDArray, ordersX: NDArray, ordersY: NDArray
 ):
     """
-    Summing up `weighted` within `ordersX` and `ordersY` and then
-    puts the result into `data`. The 2-dimensional version of integration.
+    Summing up `data` within `ordersX` and `ordersY` and then
+    puts the result into `result`. The 2-dimensional version of integration.
     """
     iprev = 0
     for i, orderx in enumerate(ordersX):
@@ -42,7 +45,7 @@ def _integrate2d(
         jprev = 0
         for j, ordery in enumerate(ordersY):
             jnext = jprev + ordery
-            data[i, j] = weighted[iprev:inext, jprev:jnext].sum()
+            result[i, j] = data[iprev:inext, jprev:jnext].sum()
             jprev = jnext
         iprev = inext
 
@@ -50,13 +53,12 @@ def _integrate2d(
 class Integrator(FunctionNode):
     """
     The `Integrator` node performs integration (summation)
-    of every input within the `weight`, `ordersX` and `ordersY` (for `2d` mode).
+    of every input within the `weight`, `ordersX` and `ordersY` (for 2 dim).
 
-    The `Integrator` has two modes: `1d` and `2d`.
-    The `mode` must be set in the constructor, while `precision=dtype`
-    of integration is chosen *automaticly* in the type function.
+    The `dim` and `precision=dtype` of integration are chosen *automaticly*
+    in the type function within the inputs.
 
-    For `2d` integration the `ordersY` input must be connected.
+    For 2d-integration the `ordersY` input must be connected.
 
     Note that the `Integrator` preallocates temporary buffer.
     For the integration algorithm the `Numba`_ package is used.
@@ -64,24 +66,14 @@ class Integrator(FunctionNode):
     .. _Numba: https://numba.pydata.org
     """
 
-    def __init__(self, *args, mode: Literal["1d", "2d"], **kwargs):
+    __slots__ = ("__buffer",)
+
+    def __init__(self, *args, **kwargs):
         kwargs.setdefault("missing_input_handler", MissingInputAddEach())
         super().__init__(*args, **kwargs)
-        if mode not in {"1d", "2d"}:
-            raise InitializationError(
-                f"Argument `mode` must be '1d' or '2d', but given '{mode}'!",
-                node=self,
-            )
-        self._mode = mode
-        if self._mode == "2d":
-            self._add_input("ordersY", positional=False)
         self._add_input("weights", positional=False)
         self._add_input("ordersX", positional=False)
-        self._functions.update({"1d": self._fcn_1d, "2d": self._fcn_2d})
-
-    @property
-    def mode(self) -> str:
-        return self._mode
+        self._functions.update({1: self._fcn_1d, 2: self._fcn_2d})
 
     def _typefunc(self) -> None:
         """
@@ -90,65 +82,63 @@ class Integrator(FunctionNode):
         determines dtype and shape for outputs
         """
         check_has_inputs(self)
-        check_has_inputs(self, ("ordersX", "weights"))
+        check_has_inputs(self, "weights")
+
         input0 = self.inputs[0]
-        ndim = len(input0.dd.shape)
-        if ndim != int(self.mode[:1]):
+        dim = 1 if self.inputs.get("ordersY", None) is None else 2
+        if (ndim := len(input0.dd.shape)) != dim:
             raise TypeFunctionError(
-                f"The Integrator works only with {self.mode} inputs, but one has ndim={ndim}!",
+                f"The Integrator works only with {dim}d inputs, but the first is {ndim}d!",
                 node=self,
             )
-        check_input_dimension(self, (slice(None), "weights"), ndim)
-        check_input_dimension(self, "ordersX", 1)
+        check_input_dimension(self, (slice(None), "weights"), dim)
         check_input_shape(self, (slice(None), "weights"), input0.dd.shape)
-        ordersX = self.inputs["ordersX"]
-        if not issubdtype(ordersX.dd.dtype, integer):
-            raise TypeFunctionError(
-                "The `ordersX` must be array of integers, but given '{ordersX.dd.dtype}'!",
-                node=self,
-                input=ordersX,
-            )
+        check_input_subtype(self, input0, floating)
         dtype = input0.dd.dtype
-        if not issubdtype(dtype, floating):
-            raise TypeFunctionError(
-                "The Integrator works only within `float` or `double` "
-                f"precision, but given '{dtype}'!",
-                node=self,
-            )
         check_input_dtype(self, (slice(None), "weights"), dtype)
-        if sum(ordersX.data) != input0.dd.shape[0]:
-            raise TypeFunctionError(
-                "ordersX must be consistent with inputs shape, "
-                f"but given {ordersX.data=} and {input0.dd.shape=}!",
-                node=self,
-                input=ordersX,
+
+        edgeslenX, edgesX = self.__check_orders("ordersX", input0.dd.shape[0])
+        shape = [edgeslenX]
+        edges = [edgesX]
+        if dim == 2:
+            edgeslenY, edgesY = self.__check_orders(
+                "ordersY", input0.dd.shape[1]
             )
-        if self.mode == "2d":
-            check_has_inputs(self, "ordersY")
-            check_input_dimension(self, "ordersY", 1)
-            ordersY = self.inputs["ordersY"]
-            if not issubdtype(ordersY.dd.dtype, integer):
-                raise TypeFunctionError(
-                    "The `ordersY` must be array of integers, but given '{ordersY.dd.dtype}'!",
-                    node=self,
-                    input=ordersY,
-                )
-            if sum(ordersY.data) != input0.dd.shape[1]:
-                raise TypeFunctionError(
-                    "ordersY must be consistent with inputs shape, "
-                    f"but given {ordersY.data=} and {input0.dd.shape=}!",
-                    node=self,
-                    input=ordersX,
-                )
-        self.fcn = self._functions[self.mode]
+            shape.append(edgeslenY)
+            edges.append(edgesY)
+        check_input_edges_equivalence(self, slice(None), edges)
+
+        shape = tuple(shape)
+        self.fcn = self._functions[dim]
         for output in self.outputs:
             output.dd.dtype = dtype
-            output.dd.shape = input0.dd.shape
+            output.dd.shape = shape
+            output.dd.axes_edges = edges
+            # TODO: copy axes_nodes?
+
+    def __check_orders(self, name: str, shape: ShapeLike) -> tuple:
+        """
+        The method checks dimension (==1) of the input `name`, type (==`integer`),
+        and `sum(orders) == len(input)`
+        """
+        check_input_dimension(self, name, 1)
+        orders = self.inputs[name]
+        check_output_subtype(self, orders, integer)
+        if (y := sum(orders.data)) != shape:
+            raise TypeFunctionError(
+                f"Orders '{name}' must be consistent with inputs len={shape}, "
+                f"but given '{y}'!",
+                node=self,
+                input=orders,
+            )
+        check_input_edges_dim(self, name, 1)
+        edges = orders.dd.axes_edges[0]
+        return edges.dd.shape[0] - 1, edges
 
     def _post_allocate(self):
         """Allocates the `buffer` within `weights`"""
-        weights = self.inputs["weights"]
-        self.__buffer = zeros(shape=weights.dd.shape, dtype=weights.dd.dtype)
+        weights = self.inputs["weights"].dd
+        self.__buffer = empty(shape=weights.shape, dtype=weights.dtype)
 
     def _fcn_1d(self, _, inputs, outputs):
         """1d version of integration function"""
@@ -158,15 +148,15 @@ class Integrator(FunctionNode):
             multiply(input, weights, out=self.__buffer)
             _integrate1d(output, self.__buffer, ordersX)
         if self.debug:
-            return [outputs.iter_data()]
+            return list(outputs.iter_data())
 
     def _fcn_2d(self, _, inputs, outputs):
         """2d version of integration function"""
-        weights = inputs["weights"].data
-        ordersX = inputs["ordersX"].data
-        ordersY = inputs["ordersY"].data
+        weights = inputs["weights"].data  # (n, m)
+        ordersX = inputs["ordersX"].data  # (n, )
+        ordersY = inputs["ordersY"].data  # (m, )
         for input, output in zip(inputs.iter_data(), outputs.iter_data()):
             multiply(input, weights, out=self.__buffer)
             _integrate2d(output, self.__buffer, ordersX, ordersY)
         if self.debug:
-            return [outputs.iter_data()]
+            return list(outputs.iter_data())
diff --git a/subtrees/dagflow/dagflow/lib/IntegratorSampler.py b/subtrees/dagflow/dagflow/lib/IntegratorSampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..e03427eabfb277a4e7de96c14357a2367e4c2cd1
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/IntegratorSampler.py
@@ -0,0 +1,262 @@
+from typing import Literal, Optional
+
+from numpy import (
+    empty,
+    errstate,
+    integer,
+    linspace,
+    matmul,
+    meshgrid,
+    newaxis,
+)
+from numpy.polynomial.legendre import leggauss
+from numpy.typing import DTypeLike, NDArray
+
+from ..exception import InitializationError
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_input_dimension,
+    check_input_edges_dim,
+    check_inputs_number,
+    check_output_subtype,
+)
+
+
+def _gl_sampler(
+    orders: NDArray, sample: NDArray, weights: NDArray, edges: NDArray
+):
+    """
+    Uses `numpy.polynomial.legendre.leggauss` to sample points with weights
+    on the range [-1,1] and transforms to any range [a, b]
+    """
+    offset = 0
+    for i, n in enumerate(orders):
+        if n < 1:
+            continue
+        (
+            sample[offset : offset + n],
+            weights[offset : offset + n],
+        ) = leggauss(n)
+        # transforms to the original range [a, b]
+        sample[offset : offset + n] = 0.5 * (
+            sample[offset : offset + n] * (edges[i + 1] - edges[i])
+            + (edges[i + 1] + edges[i])
+        )
+        weights[offset : offset + n] *= 0.5 * (edges[i + 1] - edges[i])
+        # NOTE: the operations above may allocate additional memory in runtime!
+        offset += n
+
+
+class IntegratorSampler(FunctionNode):
+    """
+    The `IntegratorSampler` node creates a sample for the `Integrator` node.
+
+    There are several samplers for `1d` (`rect`, `trap`, `gl`) and only `2d`
+    for `2d` integrator, where `rect` is the rectangular, `trap` is the trapezoidal,
+    `gl` is the 1d Gauss-Legendre, and `2d` is the 2d Gauss-Legendre.
+
+    There is optional argument `offset` for the `rect` sampler,
+    taking the following values: `left`, `center`, or `right`.
+
+    There is no positional inputs. It is supposed that `orders` already have `edges`.
+    There are two outputs: 0 - `sample`, 1 - `weights`
+    """
+
+    __slots__ = ("__bufferX", "__bufferY")
+
+    def __init__(
+        self,
+        *args,
+        mode: Literal["rect", "trap", "gl", "2d"],
+        dtype: DTypeLike = "d",
+        align: Optional[Literal["left", "center", "right"]] = None,
+        **kwargs,
+    ) -> None:
+        super().__init__(*args, **kwargs)
+        if mode not in {"rect", "trap", "gl", "2d"}:
+            raise InitializationError(
+                f"Argument `mode` must be 'rect', 'trap', 'gl', or '2d', but given '{mode}'!",
+                node=self,
+            )
+        if align is not None and mode != "rect":
+            raise InitializationError(
+                "Argument 'align' is used only within 'rect' mode!", node=self
+            )
+        self._dtype = dtype
+        self._mode = mode
+        self._align = align if align is not None else "center"
+        self._add_input("ordersX", positional=False)
+        self._add_output("x")
+        if mode == "2d":
+            self._add_input("ordersY", positional=False)
+            self._add_output("y")
+        self._add_output("weights", positional=False)
+        self._functions.update(
+            {
+                "rect": self._fcn_rect,
+                "trap": self._fcn_trap,
+                "gl": self._fcn_gl1d,
+                "2d": self._fcn_gl2d,
+            }
+        )
+
+    @property
+    def mode(self) -> str:
+        return self._mode
+
+    @property
+    def dtype(self) -> DTypeLike:
+        return self._dtype
+
+    @property
+    def align(self) -> Optional[str]:
+        return self._align
+
+    def _typefunc(self) -> None:
+        """
+        The function to determine the dtype and shape.
+        Checks inputs dimension and, selects an integration algorithm,
+        determines dtype and shape for outputs
+        """
+        check_inputs_number(self, 0)
+        lenX, edgesX = self.__check_orders("ordersX")
+        if self.mode == "2d":
+            lenY, edgesY = self.__check_orders("ordersY")
+            shape = (lenX, lenY)
+            edges = [edgesX, edgesY]
+        else:
+            shape = (lenX,)
+            edges = [edgesX]
+        for output in (*self.outputs, self.outputs["weights"]):
+            output.dd.dtype = self.dtype
+            output.dd.shape = shape
+            output.dd.axes_edges = edges
+        self.fcn = self._functions[self.mode]
+
+    def __check_orders(self, name: str) -> tuple:
+        """
+        The method checks dimension (==1) of the input `name`, type (==`integer`),
+        and returns the `dd.shape[0]`
+        """
+        check_input_dimension(self, name, 1)
+        orders = self.inputs[name]
+        check_output_subtype(self, orders, integer)
+        check_input_edges_dim(self, name, 1)
+        return sum(orders.data), orders.dd.axes_edges[0]
+
+    def _post_allocate(self) -> None:
+        """Allocates the `buffer`"""
+        ordersX = self.inputs["ordersX"]
+        edgeshapeX = ordersX.dd.axes_edges[0].dd.shape[0] - 1
+        if self.mode == "rect":
+            shapeX = (4, edgeshapeX)
+        elif self.mode in {"trap", "gl"}:
+            shapeX = (edgeshapeX,)
+        else:
+            lenY = sum(self.inputs["ordersY"].data)
+            shapeY = (2, lenY)
+            self.__bufferY = empty(shape=shapeY, dtype=self.dtype)
+            lenX = sum(ordersX.data)
+            shapeX = (2, lenX)
+        self.__bufferX = empty(shape=shapeX, dtype=self.dtype)
+
+    def _fcn_rect(self, _, inputs, outputs) -> Optional[list]:
+        """The rectangular sampling"""
+        ordersX = inputs["ordersX"]
+        edges = ordersX.dd.axes_edges[0]._data  # n+1
+        orders = ordersX.data  # n
+        sample = outputs[0].data  # m = sum(orders)
+        weights = outputs["weights"].data
+        binwidths = self.__bufferX[0]  # n
+        samplewidths = self.__bufferX[1]  # n
+        low = self.__bufferX[2]  # n
+        high = self.__bufferX[3]  # n
+
+        binwidths[:] = edges[1:] - edges[:-1]
+        with errstate(invalid="ignore"):  # to ignore division by zero
+            samplewidths[:] = binwidths / orders
+        if self.align == "left":
+            low[:] = edges[:-1]
+            high[:] = edges[1:] - samplewidths
+        elif self.align == "center":
+            low[:] = edges[:-1] + samplewidths * 0.5
+            high[:] = edges[1:] - samplewidths * 0.5
+        else:
+            low[:] = edges[:-1] + samplewidths
+            high[:] = edges[1:]
+
+        offset = 0
+        for i, n in enumerate(orders):
+            if n > 1:
+                sample[offset : offset + n] = linspace(low[i], high[i], n)
+                weights[offset : offset + n] = samplewidths[i]
+            else:
+                sample[offset : offset + n] = low[i]
+                weights[offset : offset + n] = binwidths[i]
+            offset += n
+
+        if self.debug:
+            return list(outputs.iter_data())
+
+    def _fcn_trap(self, _, inputs, outputs) -> Optional[list]:
+        """The trapezoidal sampling"""
+        ordersX = inputs["ordersX"]
+        edges = ordersX.dd.axes_edges[0]._data  # n+1
+        orders = ordersX.data  # n
+        sample = outputs[0].data  # m = sum(orders)
+        weights = outputs["weights"].data
+        samplewidths = self.__bufferX  # n
+
+        samplewidths[:] = edges[1:] - edges[:-1]
+        with errstate(invalid="ignore"):  # to ignore division by zero
+            samplewidths[:] = samplewidths[:] / (orders - 2.0)
+
+        offset = 0
+        for i, n in enumerate(orders):
+            sample[offset : offset + n] = linspace(edges[i], edges[i + 1], n)
+            weights[offset] = samplewidths[i] * 0.5
+            if n > 2:
+                weights[offset + 1 : offset + n - 2] = samplewidths[i]
+            offset += n - 1
+        weights[-1] = samplewidths[-1] * 0.5
+
+        if self.debug:
+            return list(outputs.iter_data())
+
+    def _fcn_gl1d(self, _, inputs, outputs) -> Optional[list]:
+        """The 1d Gauss-Legendre sampling"""
+        ordersX = inputs["ordersX"]
+        edges = ordersX.dd.axes_edges[0]._data
+        orders = ordersX.data
+        sample = outputs[0].data
+        weights = outputs["weights"].data
+
+        _gl_sampler(orders, sample, weights, edges)
+
+        if self.debug:
+            return list(outputs.iter_data())
+
+    def _fcn_gl2d(self, _, inputs, outputs) -> Optional[list]:
+        """The 2d Gauss-Legendre sampling"""
+        ordersX = inputs["ordersX"]
+        ordersY = inputs["ordersY"]
+        edgesX = ordersX.dd.axes_edges[0]._data  # p + 1
+        edgesY = ordersY.dd.axes_edges[0]._data  # q + 1
+        ordersX = ordersX.data
+        ordersY = ordersY.data
+        weightsX = self.__bufferX[0]  # (n, )
+        weightsY = self.__bufferY[0]  # (m, )
+        sampleX = self.__bufferX[1]  # (n, )
+        sampleY = self.__bufferY[1]  # (m, )
+        X = outputs[0].data  # (n, m)
+        Y = outputs[1].data  # (n, m)
+        weights = outputs["weights"].data  # (n, m)
+
+        _gl_sampler(ordersX, sampleX, weightsX, edgesX)
+        _gl_sampler(ordersY, sampleY, weightsY, edgesY)
+
+        X[:], Y[:] = meshgrid(sampleX, sampleY, indexing="ij")
+        matmul(weightsX[newaxis].T, weightsY[newaxis], out=weights)
+
+        if self.debug:
+            return list(outputs.iter_data())
diff --git a/subtrees/dagflow/dagflow/lib/NodeManyToOne.py b/subtrees/dagflow/dagflow/lib/NodeManyToOne.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ef594b7ac2a1b9e8ce7fc52d07febf8a7496f56
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/NodeManyToOne.py
@@ -0,0 +1,31 @@
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    AllPositionals,
+    check_has_inputs,
+    check_inputs_equivalence,
+    copy_input_edges_to_output,
+    copy_input_shape_to_output,
+    eval_output_dtype,
+)
+
+
+class NodeManyToOne(FunctionNode):
+    """
+    The abstract node with only one output `result`,
+    which is the result of some function on all the positional inputs
+    """
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self) # at least one input
+        check_inputs_equivalence(self) # all the inputs are have same dd fields
+        copy_input_shape_to_output(self, 0, "result") # copy shape to result
+        copy_input_edges_to_output(self, 0, "result") # copy edges to result
+        eval_output_dtype(self, AllPositionals, "result") # eval dtype of result
diff --git a/subtrees/dagflow/dagflow/lib/NodeOneToOne.py b/subtrees/dagflow/dagflow/lib/NodeOneToOne.py
new file mode 100644
index 0000000000000000000000000000000000000000..98b8e37ef0afca7270fbdabb7029800d81c765ee
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/NodeOneToOne.py
@@ -0,0 +1,22 @@
+from ..input_extra import MissingInputAddEach
+from ..nodes import FunctionNode
+from ..typefunctions import check_has_inputs
+
+
+class NodeOneToOne(FunctionNode):
+    """
+    The abstract node with an output for every positional input
+    """
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault("missing_input_handler", MissingInputAddEach())
+        super().__init__(*args, **kwargs)
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        for inp, out in zip(self.inputs, self.outputs):
+            out.dd.axes_edges = inp.dd.axes_edges
+            out.dd.axes_nodes = inp.dd.axes_nodes
+            out.dd.dtype = inp.dd.dtype
+            out.dd.shape = inp.dd.shape
diff --git a/subtrees/dagflow/dagflow/lib/Product.py b/subtrees/dagflow/dagflow/lib/Product.py
index 967ed1641a4444d71fa37671e01a3bb1f1ece264..524831af867239acc36902caf9981fafae55d1a2 100644
--- a/subtrees/dagflow/dagflow/lib/Product.py
+++ b/subtrees/dagflow/dagflow/lib/Product.py
@@ -1,23 +1,10 @@
 from numpy import copyto
 
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    copy_input_shape_to_output,
-    check_inputs_equivalence,
-    AllPositionals
-)
+from .NodeManyToOne import NodeManyToOne
 
-class Product(FunctionNode):
-    """Product of all the inputs together"""
 
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
+class Product(NodeManyToOne):
+    """Product of all the inputs together"""
 
     def _fcn(self, _, inputs, outputs):
         out = outputs["result"].data
@@ -26,10 +13,3 @@ class Product(FunctionNode):
             for input in inputs[1:]:
                 out *= input.data
         return out
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        copy_input_shape_to_output(self, 0, "result")
-        check_inputs_equivalence(self)
-        eval_output_dtype(self, AllPositionals, "result")
diff --git a/subtrees/dagflow/dagflow/lib/Sum.py b/subtrees/dagflow/dagflow/lib/Sum.py
index e58d9ab416eeb227ace67eacf88c4d209eb9e49f..1751902f37b47a2dc1894d2ed5775d8986bd497f 100644
--- a/subtrees/dagflow/dagflow/lib/Sum.py
+++ b/subtrees/dagflow/dagflow/lib/Sum.py
@@ -1,23 +1,13 @@
-from numpy import copyto, add
+from numpy import add, copyto
 
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    copy_input_shape_to_output,
-    check_inputs_equivalence,
-    AllPositionals
-)
+from .NodeManyToOne import NodeManyToOne
 
-class Sum(FunctionNode):
+
+class Sum(NodeManyToOne):
     """Sum of all the inputs together"""
 
     _mark = '危'
     def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
         super().__init__(*args, **kwargs)
 
     def _fcn(self, _, inputs, outputs):
@@ -27,10 +17,3 @@ class Sum(FunctionNode):
             for input in inputs[1:]:
                 add(out, input.data, out=out)
         return out
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        copy_input_shape_to_output(self, 0, "result")
-        check_inputs_equivalence(self)
-        eval_output_dtype(self, AllPositionals, "result")
diff --git a/subtrees/dagflow/dagflow/lib/trigonometry.py b/subtrees/dagflow/dagflow/lib/trigonometry.py
new file mode 100644
index 0000000000000000000000000000000000000000..01f5b23a6327a027246041787ed4118a08295bcc
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/trigonometry.py
@@ -0,0 +1,56 @@
+from numpy import arctan, cos, sin, tan, arccos, arcsin
+
+from .NodeOneToOne import NodeOneToOne
+
+
+class Cos(NodeOneToOne):
+    """Cos function"""
+
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            cos(inp.data, out=out.data)
+        return list(outputs.iter_data())
+
+
+class Sin(NodeOneToOne):
+    """Sin function"""
+
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            sin(inp.data, out=out.data)
+        return list(outputs.iter_data())
+
+class ArcCos(NodeOneToOne):
+    """ArcCos function"""
+
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            arccos(inp.data, out=out.data)
+        return list(outputs.iter_data())
+
+
+class ArcSin(NodeOneToOne):
+    """ArcSin function"""
+
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            arcsin(inp.data, out=out.data)
+        return list(outputs.iter_data())
+
+
+class Tan(NodeOneToOne):
+    """Tan function"""
+
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            tan(inp.data, out=out.data)
+        return list(outputs.iter_data())
+
+
+class Arctan(NodeOneToOne):
+    """Arctan function"""
+
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            arctan(inp.data, out=out.data)
+        return list(outputs.iter_data())
diff --git a/subtrees/dagflow/dagflow/node.py b/subtrees/dagflow/dagflow/node.py
index 56b94ecb5425de5f570704d8dd3b66c80c5bf86f..bc684028a55e834457c147ae458047d08a9009da 100644
--- a/subtrees/dagflow/dagflow/node.py
+++ b/subtrees/dagflow/dagflow/node.py
@@ -1,19 +1,21 @@
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
 from .exception import (
     AllocationError,
-    CriticalError,
     ClosedGraphError,
     ClosingError,
-    OpeningError,
+    CriticalError,
     DagflowError,
+    InitializationError,
+    OpeningError,
     ReconnectionError,
     UnclosedGraphError,
-    InitializationError,
 )
 from .input import Input
+from .iter import IsIterable
 from .legs import Legs
 from .logger import Logger, get_logger
 from .output import Output
-from .iter import IsIterable
 from .types import GraphT
 from typing import Optional, List, Dict, Union, Callable, Any, Tuple, Generator
 
@@ -44,9 +46,10 @@ class Node(Legs):
     # _always_tainted: bool = False
 
     def __init__(
-        self, name,
+        self,
+        name,
         *,
-        label: Union[str, dict, None]=None,
+        label: Union[str, dict, None] = None,
         graph: Optional[GraphT] = None,
         fcn: Optional[Callable] = None,
         typefunc: Optional[Callable] = None,
@@ -56,7 +59,7 @@ class Node(Legs):
         immediate: bool = False,
         auto_freeze: bool = False,
         frozen: bool = False,
-        **kwargs
+        **kwargs,
     ):
         super().__init__(missing_input_handler=missing_input_handler)
         self._name = name
@@ -70,6 +73,7 @@ class Node(Legs):
         self._fcn_chain = []
         if graph is None:
             from .graph import Graph
+
             self.graph = Graph.current()
         else:
             self.graph = graph
@@ -221,13 +225,14 @@ class Node(Legs):
     #
     # Methods
     #
-    def __call__(self, name, child_output: Optional[Output]=None):
+    def __call__(self, name, child_output: Optional[Output] = None, **kwargs):
         self.logger.debug(f"Node '{self.name}': Get input '{name}'")
+        kwargs.setdefault("positional", False)
         inp = self.inputs.get(name, None)
         if inp is None:
             if self.closed:
                 raise ClosedGraphError(node=self)
-            return self._add_input(name, child_output=child_output)
+            return self._add_input(name, child_output=child_output, **kwargs)
         elif inp.connected and (output := inp.parent_output):
             raise ReconnectionError(input=inp, node=self, output=output)
         return inp
@@ -282,20 +287,18 @@ class Node(Legs):
             return self._add_output(name, **kwargs)
         raise ClosedGraphError(node=self)
 
-    def _add_output(self, name, *, keyword: bool=True, positional: bool=True, **kwargs) -> Union[Output, Tuple[Output]]:
+    def _add_output(
+        self, name, *, keyword: bool = True, positional: bool = True, **kwargs
+    ) -> Union[Output, Tuple[Output]]:
         if IsIterable(name):
-            return tuple(
-                self._add_output(n, **kwargs) for n in name
-            )
+            return tuple(self._add_output(n, **kwargs) for n in name)
         self.logger.debug(f"Node '{self.name}': Add output '{name}'")
         if isinstance(name, Output):
             if name.name in self.outputs or name.node:
                 raise ReconnectionError(output=name, node=self)
             name._node = self
             return self.__add_output(
-                name,
-                positional=positional,
-                keyword=keyword
+                name, positional=positional, keyword=keyword
             )
         if name in self.outputs:
             raise ReconnectionError(output=name, node=self)
@@ -303,21 +306,31 @@ class Node(Legs):
         return self.__add_output(
             Output(name, self, **kwargs),
             positional=positional,
-            keyword=keyword
+            keyword=keyword,
         )
 
-    def __add_output(self, out, positional: bool = True, keyword: bool = True) -> Union[Output, Tuple[Output]]:
+    def __add_output(
+        self, out, positional: bool = True, keyword: bool = True
+    ) -> Union[Output, Tuple[Output]]:
         self.outputs.add(out, positional=positional, keyword=keyword)
         if self._graph:
             self._graph._add_output(out)
         return out
 
-    def add_pair(self, iname: str, oname: str, **kwargs) -> Tuple[Input, Output]:
+    def add_pair(
+        self, iname: str, oname: str, **kwargs
+    ) -> Tuple[Input, Output]:
         if not self.closed:
             return self._add_pair(iname, oname, **kwargs)
         raise ClosedGraphError(node=self)
 
-    def _add_pair(self, iname: str, oname: str, input_kws: Optional[dict]=None, output_kws: Optional[dict]=None) -> Tuple[Input, Output]:
+    def _add_pair(
+        self,
+        iname: str,
+        oname: str,
+        input_kws: Optional[dict] = None,
+        output_kws: Optional[dict] = None,
+    ) -> Tuple[Input, Output]:
         input_kws = input_kws or {}
         output_kws = output_kws or {}
         output = self._add_output(oname, **output_kws)
@@ -450,7 +463,9 @@ class Node(Legs):
         if not self._types_tainted:
             return True
         if recursive:
-            self.logger.debug(f"Node '{self.name}': Trigger recursive update types...")
+            self.logger.debug(
+                f"Node '{self.name}': Trigger recursive update types..."
+            )
             for input in self.inputs.iter_all():
                 input.parent_node.update_types(recursive)
         self.logger.debug(f"Node '{self.name}': Update types...")
@@ -461,9 +476,12 @@ class Node(Legs):
         if self._allocated:
             return True
         if recursive:
-            self.logger.debug(f"Node '{self.name}': Trigger recursive memory allocation...")
+            self.logger.debug(
+                f"Node '{self.name}': Trigger recursive memory allocation..."
+            )
             if not all(
-                input.parent_node.allocate(recursive) for input in self.inputs.iter_all()
+                input.parent_node.allocate(recursive)
+                for input in self.inputs.iter_all()
             ):
                 return False
         self.logger.debug(f"Node '{self.name}': Allocate memory on inputs")
@@ -481,7 +499,9 @@ class Node(Legs):
         self._allocated = True
         return True
 
-    def close(self, recursive: bool = True, together: List['Node'] = []) -> bool:
+    def close(
+        self, recursive: bool = True, together: List["Node"] = []
+    ) -> bool:
         # Caution: `together` list should not be written in!
 
         if self._closed:
@@ -489,21 +509,22 @@ class Node(Legs):
         if self.invalid:
             raise ClosingError("Cannot close an invalid node!", node=self)
         self.logger.debug(f"Node '{self.name}': Trigger recursive close")
-        for node in [self]+together:
+        for node in [self] + together:
             node.update_types(recursive=recursive)
-        for node in [self]+together:
+        for node in [self] + together:
             node.allocate(recursive=recursive)
         if recursive and not all(
-            input.parent_node.close(recursive) for input in self.inputs.iter_all()
+            input.parent_node.close(recursive)
+            for input in self.inputs.iter_all()
         ):
             return False
         for node in together:
             if not node.close(recursive=recursive):
                 return False
-        self.logger.debug(f"Node '{self.name}': Close")
         self._closed = self._allocated
         if not self._closed:
             raise ClosingError(node=self)
+        self.logger.debug(f"Node '{self.name}': Closed")
         return self._closed
 
     def open(self, force: bool = False) -> bool:
diff --git a/subtrees/dagflow/dagflow/nodes.py b/subtrees/dagflow/dagflow/nodes.py
index 668976652d021486cdbcc2fd5c6cb3cff0e6fcf7..42b4dce1a6489691c1057aec5bb3903c8ea20643 100644
--- a/subtrees/dagflow/dagflow/nodes.py
+++ b/subtrees/dagflow/dagflow/nodes.py
@@ -1,4 +1,3 @@
-from dagflow.exception import CriticalError
 from .node import Node
 
 
diff --git a/subtrees/dagflow/dagflow/parameters.py b/subtrees/dagflow/dagflow/parameters.py
index 0c218a0b2e1b2495d8926ecffa2e428ed16168c8..e7581d4c4ad04cb996d41f18748b418628eb6a16 100644
--- a/subtrees/dagflow/dagflow/parameters.py
+++ b/subtrees/dagflow/dagflow/parameters.py
@@ -7,7 +7,7 @@ from .lib.CovmatrixFromCormatrix import CovmatrixFromCormatrix
 
 from numpy import zeros_like, array
 from numpy.typing import DTypeLike
-from typing import Optional, Dict, List, Generator
+from typing import Optional, Dict, List
 
 class Parameter:
     __slots__ = ('_idx','_parent', '_value_output', '_labelfmt')
diff --git a/subtrees/dagflow/dagflow/tools/schema.py b/subtrees/dagflow/dagflow/tools/schema.py
index 629241dad5cfa61168253a8fa81983702c57d5f6..598f586333b198088e8f4f7d2bfc228e1ba2dffb 100644
--- a/subtrees/dagflow/dagflow/tools/schema.py
+++ b/subtrees/dagflow/dagflow/tools/schema.py
@@ -26,7 +26,7 @@ def LoadFileWithExt(*, key: Union[str, dict]=None, update: bool=False, **kwargs:
             dct = None
         for ext, loader in kwargs.items():
             if filename.endswith(f'.{ext}'):
-                logger.log(SUBINFO, f'Read filename')
+                logger.log(SUBINFO, f'Read: {filename}')
                 ret = loader(filename)
 
                 if update and dct is not None:
diff --git a/subtrees/dagflow/dagflow/typefunctions.py b/subtrees/dagflow/dagflow/typefunctions.py
index f923f66710769f577aacd3e9461374b535b91c14..89a08e3de8d50390b0f8bc250a08b32428f82239 100644
--- a/subtrees/dagflow/dagflow/typefunctions.py
+++ b/subtrees/dagflow/dagflow/typefunctions.py
@@ -1,10 +1,13 @@
 from collections.abc import Sequence
-from typing import Union
-
-from numpy import result_type
 from itertools import repeat
+from typing import Optional, Tuple, Union
+
+from numpy import issubdtype, result_type
+from numpy.typing import DTypeLike
 
 from .exception import TypeFunctionError
+from .input import Input
+from .output import Output
 from .types import NodeT
 
 AllPositionals = slice(None)
@@ -24,6 +27,35 @@ except TypeError:
             yield combo
 
 
+class MethodSequenceCaller:
+    """Class to call a sequence of methods"""
+
+    methods: list
+
+    def __init__(self) -> None:
+        self.methods = []
+
+    def __call__(self, inputs, outputs):
+        for method in self.methods:
+            method(inputs, outputs)
+
+
+def cpy_dtype(input, output):
+    output.dd.dtype = input.dd.dtype
+
+
+def cpy_shape(input, output):
+    output.dd.shape = input.dd.shape
+
+
+def cpy_edges(input, output):
+    output.dd.axes_edges = input.dd.axes_edges
+
+
+def cpy_nodes(input, output):
+    output.dd.axes_nodes = input.dd.axes_nodes
+
+
 def check_has_inputs(
     node: NodeT, inputkey: Union[str, int, slice, Sequence, None] = None
 ) -> None:
@@ -44,6 +76,14 @@ def check_has_inputs(
             ) from exc
 
 
+def check_inputs_number(node: NodeT, n: int) -> None:
+    """Checking if the node has only `n` inputs"""
+    if (ninp := len(node.inputs)) != n:
+        raise TypeFunctionError(
+            f"The node must have only {n} inputs, but given {ninp}!", node=node
+        )
+
+
 def eval_output_dtype(
     node: NodeT,
     inputkey: Union[str, int, slice, Sequence] = AllPositionals,
@@ -64,35 +104,31 @@ def copy_input_to_output(
     outputkey: Union[str, int, slice, Sequence] = AllPositionals,
     dtype: bool = True,
     shape: bool = True,
+    edges: bool = True,
+    nodes: bool = True,
 ) -> None:
     """Coping input dtype and setting for the output"""
     inputs = tuple(node.inputs.iter(inputkey))
     outputs = tuple(node.outputs.iter(outputkey))
 
-    if dtype and shape:
-
-        def cpy(input, output):
-            output.dd.dtype = input.dd.dtype
-            output.dd.shape = input.dd.shape
-
-    elif dtype:
-
-        def cpy(input, output):
-            output.dd.dtype = input.dd.dtype
-
-    elif shape:
-
-        def cpy(input, output):
-            output.dd.shape = input.dd.shape
-
-    else:
+    if not any((dtype, shape, edges, nodes)):
         return
 
+    caller = MethodSequenceCaller()
+    if dtype:
+        caller.methods.append(cpy_dtype)
+    if shape:
+        caller.methods.append(cpy_shape)
+    if edges:
+        caller.methods.append(cpy_edges)
+    if nodes:
+        caller.methods.append(cpy_nodes)
+
     if len(inputs) == 1:
         inputs = repeat(inputs[0], len(outputs))
 
     for input, output in zip(inputs, outputs, strict=True):
-        cpy(input, output)
+        caller(input, output)
 
 
 def copy_input_dtype_to_output(
@@ -127,6 +163,22 @@ def copy_input_shape_to_output(
         output.dd.shape = input.dd.shape
 
 
+def copy_input_edges_to_output(
+    node: NodeT,
+    inputkey: Union[str, int] = 0,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+) -> None:
+    """Coping input edges and setting for the output"""
+    inputs = tuple(node.inputs.iter(inputkey))
+    outputs = tuple(node.outputs.iter(outputkey))
+
+    if len(inputs) == 1:
+        inputs = repeat(inputs[0], len(outputs))
+
+    for input, output in zip(inputs, outputs, strict=True):
+        output.dd.axes_edges = input.dd.axes_edges
+
+
 def combine_inputs_shape_to_output(
     node: NodeT,
     inputkey: Union[str, int, slice, Sequence] = AllPositionals,
@@ -188,6 +240,7 @@ def check_input_square_or_diag(
             )
     return dim_max
 
+
 def check_input_shape(
     node: NodeT, inputkey: Union[str, int, slice, Sequence], shape: tuple
 ):
@@ -219,19 +272,31 @@ def check_input_dtype(
 def check_inputs_equivalence(
     node: NodeT, inputkey: Union[str, int, slice, Sequence] = AllPositionals
 ):
-    """Checking the equivalence of the dtype and shape of all the inputs"""
+    """Checking the equivalence of the dtype, shape, axes_edges and axes_nodes of all the inputs"""
     inputs = tuple(node.inputs.iter(inputkey))
     input0, inputs = inputs[0], inputs[1:]
 
-    dtype, shape = input0.dd.dtype, input0.dd.shape
+    dtype, shape, edges, nodes = (
+        input0.dd.dtype,
+        input0.dd.shape,
+        input0.dd.axes_edges,
+        input0.dd.axes_nodes,
+    )
     for input in inputs:
-        if input.dd.dtype != dtype or input.dd.shape != shape:
+        if (
+            input.dd.dtype != dtype
+            or input.dd.shape != shape
+            or input.dd.axes_edges != edges
+            or input.dd.axes_nodes != nodes
+        ):
             raise TypeFunctionError(
-                f"Input data {input.dtype} [{input.shape}] is inconsistent with {dtype} [{shape}]",
+                f"Input data [{input.dtype=}, {input.shape=}, {input.axes_edges=}, {input.axes_nodes=}]"
+                f" is inconsistent with [{dtype=}, {shape=}, {edges=}, {nodes=}]",
                 node=node,
                 input=input,
             )
 
+
 def check_inputs_square_or_diag(
     node: NodeT,
     inputkey: Union[str, int, slice, Sequence] = AllPositionals,
@@ -247,7 +312,9 @@ def check_inputs_square_or_diag(
         shape = input.dd.shape
         dim = len(shape)
         dim_max = max(dim, dim_max)
-        if shape0 != shape[0] or ((dim == 2 and shape[0] != shape[1]) and dim != 1):
+        if shape0 != shape[0] or (
+            (dim == 2 and shape[0] != shape[1]) and dim != 1
+        ):
             raise TypeFunctionError(
                 f"The node supports only square inputs (or 1d as diagonal) of size {shape0}x{shape0}. Got {shape}!",
                 node=node,
@@ -273,6 +340,26 @@ def check_inputs_same_dtype(
             )
 
 
+def check_input_subtype(node: NodeT, input: Input, dtype: DTypeLike):
+    """Checks if the input dtype is some subtype of `dtype`."""
+    if not issubdtype(input.dd.dtype, dtype):
+        raise TypeFunctionError(
+            f"The input must be an array of {dtype}, but given '{input.dd.dtype}'!",
+            node=node,
+            input=input,
+        )
+
+
+def check_output_subtype(node: NodeT, output: Output, dtype: DTypeLike):
+    """Checks if the output dtype is some subtype of `dtype`."""
+    if not issubdtype(output.dd.dtype, dtype):
+        raise TypeFunctionError(
+            f"The output must be an array of {dtype}, but given '{output.dd.dtype}'!",
+            node=node,
+            output=output,
+        )
+
+
 def check_inputs_multiplicable_mat(
     node: NodeT,
     inputkey1: Union[str, int, slice, Sequence],
@@ -299,3 +386,118 @@ def check_inputs_multiplicable_mat(
                 node=node,
                 input=input,
             )
+
+
+def check_input_edges_dim(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
+    dim: int = 1,
+):
+    """Checking the existence and dim of the edges of the inputs"""
+    for input in node.inputs.iter(inputkey):
+        edges = input.dd.axes_edges
+        if len(edges) == 0:
+            raise TypeFunctionError(
+                f"The input must have edges, but given {edges=}!",
+                node=node,
+                input=input,
+            )
+        for edge in edges:
+            if not isinstance(edge, Output):
+                raise TypeFunctionError(
+                    f"The input edge must be an `Output`, but given {edge=}!",
+                    node=node,
+                    input=input,
+                )
+            if edge.dd.dim != dim:
+                raise TypeFunctionError(
+                    f"The input edge must be a {dim}d array, but given {edge.dd.dim=}!",
+                    node=node,
+                    input=input,
+                )
+
+
+def check_input_edges_equivalence(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
+    reference: Optional[Tuple[Output]] = None,
+):
+    """Checking the equivalence of the edges of the inputs."""
+    inputs = tuple(node.inputs.iter(inputkey))
+    if reference is None:
+        input0, inputs = inputs[0], inputs[1:]
+        reference = input0.dd.axes_edges
+    for input in inputs:
+        edges = input.dd.axes_edges
+        if edges != reference:
+            raise TypeFunctionError(
+                f"The input edge must be {reference}, but given {edges=}!",
+                node=node,
+                input=input,
+            )
+
+
+def check_edges_type(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+):
+    """Checking of the edges type (must be `List[Output]`) of the inputs and outputs."""
+    # check inputs
+    for input in node.inputs.iter(inputkey):
+        edges = input.dd.axes_edges
+        if not isinstance(edges, list):
+            raise TypeFunctionError(
+                f"The `input.dd.axes_edges` must be `List[Output]`, but given {edges=}!",
+                node=node,
+                input=input,
+            )
+        for edge in edges:
+            if not isinstance(edge, Output):
+                raise TypeFunctionError(
+                    f"The edge must be `Output`, but given {edge=}!",
+                    node=node,
+                    input=input,
+                )
+    # check outputs
+    for output in node.outputs.iter(outputkey):
+        edges = output.dd.axes_edges
+        if not isinstance(edges, list):
+            raise TypeFunctionError(
+                f"The `output.dd.axes_edges` must be `List[Output]`, but given {edges=}!",
+                node=node,
+                output=output,
+            )
+        for edge in edges:
+            if not isinstance(edge, Output):
+                raise TypeFunctionError(
+                    f"The edge must be `Output`, but given {edge=}!",
+                    node=node,
+                    iutput=output,
+                )
+
+
+def check_array_edges_consistency(node: NodeT, output: Output):
+    """
+    Checks the dimension equivalence of edges and the output, then checks that
+    `len(output) = N` and `len(edges) = N+1` for each dimension.
+    Tht type function is passed if the edges are empty.
+    """
+    dd = output.dd
+    edges = dd.axes_edges
+    if (y := len(edges)) > 0:
+        if y != dd.dim:
+            raise TypeFunctionError(
+                f"Array: the data ({dd.dim}d) and edges "
+                f"({len(edges)}d) must have the same dimension!",
+                node=node,
+                output=output,
+            )
+        for i, edge in enumerate(edges):
+            if edge.dd.shape[0] != dd.shape[i] + 1:
+                raise TypeFunctionError(
+                    f"Array: the data lenght (={dd.shape[i]} + 1) must be "
+                    f"consistent with edges (={edge.dd.shape[0]})!",
+                    node=node,
+                    output=output,
+                )
diff --git a/subtrees/dagflow/requirements.txt b/subtrees/dagflow/requirements.txt
index 1d87e5b26ad044f5844f5999cbe6ee098f589a04..5e2a13d7898d88e0827c45802a0ca6833ca4616a 100644
--- a/subtrees/dagflow/requirements.txt
+++ b/subtrees/dagflow/requirements.txt
@@ -1,7 +1,7 @@
 contextlib2
 coverage
 numba
-numpy==1.23.1
+numpy==1.23.5
 pygraphviz
 pytest
 pytest-cov
diff --git a/subtrees/dagflow/subtrees/dictwrapper/.envrc b/subtrees/dagflow/subtrees/dictwrapper/.envrc
deleted file mode 100644
index e780e09108ca62a21880576ed949d9b5d515ac90..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/.envrc
+++ /dev/null
@@ -1 +0,0 @@
-export PYTHONPATH=$PWD
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py
index a3e1756a0a41ed0eadc11054191e882c6958c751..bf01147b082f51c3c510308e999e42753fb9d888 100644
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py
+++ b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py
@@ -5,19 +5,20 @@ from collections.abc import Sequence
 from typing import Any, Callable, Generator, Optional
 
 class FlatMKDict(UserDict):
-    __slots__ = ('_protect',)
+    __slots__ = ('_protect', '_merge_flatdicts')
     _protect: bool
 
     def __init__(*args, protect: bool = False, **kwargs) -> None:
         self = args[0]
         self._protect = protect
+        self._merge_flatdicts = True
         UserDict.__init__(*args, **kwargs)
 
     def _process_key(self, key: Any) -> tuple:
-        if isinstance(key, Sequence):
-            return tuple(sorted(key))
-        else:
-            return frozenset((key,))
+        # if isinstance(key, Sequence):
+        return tuple(sorted(key))
+        # else:
+        #     return frozenset((key,))
 
     def __getitem__(self, key: Any) -> Any:
         key = self._process_key(key)
@@ -30,6 +31,15 @@ class FlatMKDict(UserDict):
                 f"Reassigning of the existed key '{key}' is restricted, "
                 "due to the protection!"
             )
+
+        if self._merge_flatdicts and isinstance(val, FlatMKDict):
+            print('here', key, val)
+            for subkey, subval in val.items():
+                newkey = key+subkey
+                self[newkey] = subval
+
+            return
+
         super().__setitem__(key, val)
 
     def __contains__(self, key: Any) -> bool:
@@ -49,7 +59,7 @@ class FlatMKDict(UserDict):
         *args,
         filterkey: Optional[Callable[[Any], bool]] = None,
         filterkeyelem: Optional[Callable[[Any], bool]] = None,
-    ) -> tuple:
+    ) -> Generator:
         """
         Returns items from the slice by `args`.
         If `args` are empty returns all items.
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatten.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatten.py
new file mode 100644
index 0000000000000000000000000000000000000000..c03832857e390cb719ddade6bad5060848d04915
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatten.py
@@ -0,0 +1,35 @@
+from .nestedmkdict import NestedMKDict
+from .flatmkdict import FlatMKDict
+
+from typing import Sequence, Tuple
+
+def _select(seq: Sequence, elems_mask: set) -> Tuple[Tuple, Tuple]:
+	selected = []
+	rest = ()
+	for i, el in enumerate(reversed(seq), 0):
+		if el in elems_mask:
+			selected.append(el)
+		else:
+			rest = tuple(seq[:len(seq)-i])
+			break
+	return tuple(rest), tuple(reversed(selected))
+
+def flatten(mkdict, selkeys: Sequence=()) -> NestedMKDict:
+	selkeys_set = set(selkeys)
+	newdict = mkdict._wrap({}, parent=mkdict)
+	newdict._parent = None
+
+	for key, v in mkdict.walkitems():
+		keys_nested, keys_flat = _select(key, selkeys_set)
+		if keys_flat:
+			flatdict = newdict.get(keys_nested, None)
+			if flatdict is None:
+				newdict[keys_nested] = (flatdict:=FlatMKDict(((keys_flat, v),),))
+			elif isinstance(flatdict, FlatMKDict):
+				flatdict[keys_flat] = v
+			else:
+				raise KeyError(f'Unable to flatten: {".".join(key)}')
+		else:
+			newdict[key] = v
+
+	return newdict
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py
index 1d588ac8d98908b132c827374edf3baacef6dbb3..7b5406bc98ebf96d93106e9e335910eebd196ae1 100644
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py
+++ b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py
@@ -3,7 +3,7 @@ from .visitor import MakeNestedMKDictVisitor
 from .nestedmkdictaccess import NestedMKDictAccess
 
 from collections.abc import Sequence, MutableMapping
-from typing import Any
+from typing import Any, Optional
 
 class NestedMKDict(ClassWrapper):
     """Dictionary wrapper managing nested dictionaries
@@ -22,7 +22,7 @@ class NestedMKDict(ClassWrapper):
             return dic
         return ClassWrapper.__new__(cls)
 
-    def __init__(self, dic, *, sep: str=None, parent=None, recursive_to_others: bool=False):
+    def __init__(self, dic, *, sep: str=None, parent: Optional[Any]=None, recursive_to_others: bool=False):
         if isinstance(dic, NestedMKDict):
             if sep is None:
                 sep = dic._sep
@@ -105,20 +105,25 @@ class NestedMKDict(ClassWrapper):
     def __getitem__(self, key):
         if key==():
             return self
-        key, rest=self.splitkey(key)
+        head, rest=self.splitkey(key)
 
-        sub = self._object.__getitem__(key)
-        sub = self._wrap(sub, parent=self)
+        sub = self._object.__getitem__(head)
         if not rest:
+            sub = self._wrap(sub, parent=self)
             return sub
 
         if sub is None:
             raise KeyError( f"No nested key '{key}'" )
 
+        sub = self._wrap(sub, parent=self)
         if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
             raise TypeError(f"Nested value for '{key}' has wrong type")
 
-        return sub[rest]
+        try:
+            return sub[rest]
+        except KeyError as e:
+            raise KeyError(key) from e
+
 
     def __delitem__(self, key):
         if key==():
@@ -171,7 +176,7 @@ class NestedMKDict(ClassWrapper):
         if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
             raise TypeError(f"Nested value for '{key}' has wrong type")
 
-        return sub.set(rest, value)
+        return sub.__setitem__(rest, value)
 
     __setitem__= set
 
@@ -308,3 +313,4 @@ class NestedMKDict(ClassWrapper):
         return self
 
     __ixor__ = update_missing
+
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py
index d1199579df83c3b4e1ce0be4bb14c0798c251832..f30ef435ebe52d7834f3b5525e557e450804ee8c 100644
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py
@@ -60,3 +60,19 @@ def test_slice_filter():
         ("a", "b", "c"): 2,
         ("a", "b"): 1,
     }
+
+def test_merge():
+    fd = FlatMKDict()
+    fdsub = FlatMKDict()
+    fdsub['d', 'e', 'f'] = 3
+    fdsub['d', 'e', 'g'] = 4
+
+    fd['a', 'b', 'c1'] = 1
+    fd['a', 'b', 'c2'] = 2
+    fd['a', 'b', 'c4'] = fdsub
+
+    assert fd['a', 'b', 'c1'] == 1
+    assert fd['a', 'b', 'c2'] == 2
+    fd['a', 'b', 'c4', 'd', 'e', 'f'] = 3
+    fd['a', 'b', 'c4', 'd', 'e', 'g'] = 4
+
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py
index 7351a6dd6a9c3bde1d810117e842d55316dd1b2a..e0f8eae4f682615b5a5aae799fbc873c44fb4b98 100644
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py
@@ -26,7 +26,7 @@ def test_nestedmkdict_03():
     assert tuple(dw.keys())==('a','b','c')
 
 @pytest.mark.parametrize('sep', [None, '.'])
-def test_nestedmkdict_03(sep):
+def test_nestedmkdict_04(sep):
     dct = dict(a=1, b=2, c=3, d=dict(e=4), f=dict(g=dict(h=5)))
     dct['z.z.z'] = 0
     print(dct)
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatten.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatten.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9a6ded7afa6ae9a1efe87a94a876b68af6785f6
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatten.py
@@ -0,0 +1,141 @@
+from multikeydict.nestedmkdict import NestedMKDict
+from multikeydict.flatten import flatten, _select
+from pytest import raises
+
+from pprint import pprint
+
+def test__select():
+    a, b = _select(tuple('abcd'), set('cd'))
+    assert a==tuple('ab')
+    assert b==tuple('cd')
+
+    a, b = _select(tuple('abcd'), set('bd'))
+    assert a==tuple('abc')
+    assert b==tuple('d')
+
+    a, b = _select(tuple('abcd'), set('ab'))
+    assert a==tuple('abcd')
+    assert b==tuple()
+
+    a, b = _select(tuple('abcd'), set('ef'))
+    assert a==tuple('abcd')
+    assert b==tuple()
+
+
+def test_nestedmkdict_flatten_v01():
+    dct = {'root': {
+        'subfolder1': {
+            'key1': 'value1',
+            'key2': 'value2'
+        },
+        'subfolder2': {
+            'key1': 'value1',
+            'key2': 'value2',
+            'st': {
+                'a1': {
+                       'b1': {
+                           'c1': 'v1'
+                           }
+                       },
+                'a2': {
+                       'b2': {
+                           'c2': 'v2'
+                           }
+                       },
+                }
+        },
+        'key0': 'value0'
+    }}
+    dw = NestedMKDict(dct, recursive_to_others=True)
+    dws = NestedMKDict(dct, sep='.', recursive_to_others=True)
+
+    dwf = flatten(dw, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
+    dwsf = flatten(dws, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
+
+    for obj in (dwf, dwsf):
+        assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
+
+def test_nestedmkdict_flatten_v02():
+    dct = {'root': {
+        'subfolder1': {
+            'key1': 'value1',
+            'key2': 'value2'
+        },
+        'subfolder2': {
+            'key1': 'value1',
+            'key2': 'value2',
+            'st': {
+                'a1': {
+                       'b1': {
+                           'd1': 'extra',
+                           'c1': 'v1'
+                           }
+                       },
+                'a2': {
+                       'b2': {
+                           'c2': 'v2'
+                           }
+                       },
+                }
+        },
+        'key0': 'value0'
+    }}
+    dw = NestedMKDict(dct, recursive_to_others=True)
+
+    with raises(KeyError):
+        dwf = flatten(dw, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
+    # import IPython; IPython.embed(colors='neutral')
+    # for obj in (dwf,):
+    #     assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
+    #     assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
+    #     assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
+    #     assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
+    #     assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
+    #     assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
+    #     # FlatDict is unable to pass keys
+    #     # assert obj['root', 'subfolder2', 'st', 'd1', 'a2', 'b2']=='extra'
+
+def test_nestedmkdict_flatten_v03():
+    dct = {'root': {
+        'subfolder1': {
+            'key1': 'value1',
+            'key2': 'value2'
+        },
+        'subfolder2': {
+            'key1': 'value1',
+            'key2': 'value2',
+            'st': {
+                'a1': {
+                       'b1': {
+                           'c1': 'v1'
+                           }
+                       },
+                'a2': {
+                       'b2': {
+                           'c2': 'v2',
+                           'd1': 'extra'
+                           }
+                       },
+                }
+        },
+        'key0': 'value0'
+    }}
+    dw = NestedMKDict(dct, recursive_to_others=True)
+    dwf = flatten(dw, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
+    # TODO: this test is insconsistent with test_nestedmkdict_flatten_v02
+    # It does the same, but in different order.
+    pprint(dwf.object)
+    for obj in (dwf,):
+        assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'd1', 'a2', 'b2']=='extra'
+
diff --git a/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py b/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py
index 3a4e95586b5ea7a8b4ccf3284d0e38b342fa87db..605ca28847b854da04f197970aa7146e828c5380 100644
--- a/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py
+++ b/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py
@@ -56,7 +56,7 @@ class GIndexNameDict(UserDict):
 @define(hash=True, slots=True)
 class GNIndexInstance:
     """
-    The n-dimensional index instance class, storing `indices`
+    The n-dimensional index instance class, storing `values`
     (`type=list[GIndexInstance]`) and `names` (`type=dict[GIndexName, ]`).
     Contains `format` method, which substitutes `value` instead of `name.short`
     and `name.full`.
@@ -66,7 +66,7 @@ class GNIndexInstance:
         that is the `string` for formatting
     """
 
-    _indices: Tuple[GIndexInstance, ...] = field(default=tuple(), alias='indices')
+    _instances: Tuple[GIndexInstance, ...] = field(default=tuple(), alias='instances')
     order: tuple = field(default=tuple())
     sep: str = field(validator=instance_of(str), default="_")
     withname: bool = field(validator=instance_of(bool), default=False)
@@ -81,10 +81,10 @@ class GNIndexInstance:
 
     @property
     def values(self) -> Tuple[str]:
-        return tuple(instance.value for instance in self._indices)
+        return tuple(instance.value for instance in self._instances)
 
     def __attrs_post_init__(self) -> None:
-        self._check_indices()
+        self._check_instances()
         if not self.order:
             self.order = self._auto_order()
         else:
@@ -96,21 +96,21 @@ class GNIndexInstance:
     ) -> None:
         if not order:
             order = self.order
-        indices = [self.dict[name] for name in order if name in self.dict]
-        if len(self._indices) != len(indices):
+        values = [self.dict[name] for name in order if name in self.dict]
+        if len(self._instances) != len(values):
             names = set(self.dict.keys()) - set(order)
             for name in names:
                 if rest2end:
-                    indices.append(self.dict[name])
+                    values.append(self.dict[name])
                 else:
-                    indices.insert(0, self.dict[name])
-        self._indices = tuple(indices)
+                    values.insert(0, self.dict[name])
+        self._instances = tuple(values)
 
     def _create_dict(self) -> GIndexNameDict:
-        return GIndexNameDict({val.name: val for val in self._indices})
+        return GIndexNameDict({val.name: val for val in self._instances})
 
     def _auto_order(self) -> tuple:
-        return (True,) + tuple(val.name.s for val in self._indices)
+        return (True,) + tuple(val.name.s for val in self._instances)
 
     def _check_order(self, order: Sequence) -> None:
         if not isinstance(order, Sequence):
@@ -120,18 +120,18 @@ class GNIndexInstance:
         elif not isinstance(order, tuple):
             order = tuple(order)
 
-    def _check_indices(self) -> None:
-        if not isinstance(self._indices, (Sequence, set)):
+    def _check_instances(self) -> None:
+        if not isinstance(self._instances, (Sequence, set)):
             raise TypeError(
-                f"'indices' must be `Sequence`, but given '{type(self._indices)}'!"
+                f"'values' must be `Sequence`, but given '{type(self._instances)}'!"
             )
-        elif not all(isinstance(x, GIndexInstance) for x in self._indices):
+        elif not all(isinstance(x, GIndexInstance) for x in self._instances):
             raise ValueError(
-                "'indices' must be `Sequence[GIndexInstance]`, "
-                f"but given '{self._indices}'!"
+                "'values' must be `Sequence[GIndexInstance]`, "
+                f"but given '{self._instances}'!"
             )
-        elif not isinstance(self._indices, tuple):
-            self._indices = tuple(self._indices)
+        elif not isinstance(self._instances, tuple):
+            self._instances = tuple(self._instances)
 
     def formatwith(
         self,
@@ -214,69 +214,60 @@ class GNIndexInstance:
         )
 
     def size(self) -> int:
-        """Returns the size of the list with indices (number of variants)"""
-        return len(self._indices)
+        """Returns the size of the list with values (number of variants)"""
+        return len(self._instances)
 
     def copy(self, deep: bool = False) -> GNIndexInstance:
         """Returns a copy of the object"""
-
-        if deep:
-            ret = GNIndexInstance(
-                indices=tuple(self._indices),
+        return (
+            GNIndexInstance(
+                instances=tuple(self._instances),
                 order=tuple(self.order),
                 sep=str(self.sep),
                 withname=bool(self.withname),
                 namemode=str(self.namemode),  # type: ignore
                 namesep=str(self.namesep),
             )
-        else:
-            ret = GNIndexInstance(
-                indices=self._indices,
+            if deep
+            else GNIndexInstance(
+                instances=self._instances,
                 order=self.order,
                 sep=self.sep,
                 withname=self.withname,
                 namemode=self.namemode,
                 namesep=self.namesep,
             )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndexInstance.copy() unparsed arguments: {kwargs}")
-
-        return ret
+        )
 
     def copywith(self, **kwargs) -> GNIndexInstance:
         """Returns a copy of the object with updated fields from `kwargs`"""
-        if kwargs.pop("deep", True):
-            ret = GNIndexInstance(
-                indices=kwargs.pop("indices", tuple(self._indices)),
+        return (
+            GNIndexInstance(
+                instances=kwargs.pop("values", tuple(self._instances)),
                 order=kwargs.pop("order", tuple(self.order)),
                 sep=kwargs.pop("sep", str(self.sep)),
                 withname=kwargs.pop("withname", bool(self.withname)),
                 namemode=kwargs.pop("namemode", str(self.namemode)),
                 namesep=kwargs.pop("namesep", str(self.namesep)),
             )
-        else:
-            ret = GNIndexInstance(
-                indices=kwargs.pop("indices", self._indices),
+            if kwargs.pop("deep", True)
+            else GNIndexInstance(
+                instances=kwargs.pop("values", self._instances),
                 order=kwargs.pop("order", self.order),
                 sep=kwargs.pop("sep", self.sep),
                 withname=kwargs.pop("withname", self.withname),
                 namemode=kwargs.pop("namemode", self.namemode),
                 namesep=kwargs.pop("namesep", self.namesep),
             )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndexInstance.copywith() unparsed arguments: {kwargs}")
-
-        return ret
+        )
 
     def __iter__(self) -> Iterator[GIndexInstance]:
-        yield from self._indices
+        yield from self._instances
 
     def __getitem__(self, key: int) -> GIndexInstance:
         if not isinstance(key, int):
             raise TypeError(f"'key' must be 'int', but given '{type(key)}'!")
-        return self._indices[key]
+        return self._instances[key]
 
 
 @define(hash=True, slots=True)
@@ -286,7 +277,7 @@ class GNIndex:
     (set of the 1-dim indices), the indices `order` and usefull methods
     """
 
-    _indices: Tuple[GIndex] = field(default=tuple(), alias='indices')
+    values: Tuple[GIndex] = field(default=tuple())
     order: tuple = field(default=tuple())
     sep: str = field(validator=instance_of(str), default="_")
     withname: bool = field(validator=instance_of(bool), default=False)
@@ -310,7 +301,7 @@ class GNIndex:
                 )
 
     def __attrs_post_init__(self) -> None:
-        self._check_indices()
+        self._check_values()
         if not self.order:
             self.order = self._auto_order()
         else:
@@ -318,7 +309,7 @@ class GNIndex:
         self.sort()
 
     def _auto_order(self) -> tuple:
-        return (True,) + tuple(val.name.s for val in self._indices)
+        return (True,) + tuple(val.name.s for val in self.values)
 
     def _check_order(self, order: Sequence) -> None:
         if not isinstance(order, Sequence):
@@ -328,21 +319,21 @@ class GNIndex:
         elif not isinstance(order, tuple):
             order = tuple(order)
 
-    def _check_indices(self) -> None:
-        if not isinstance(self._indices, (Sequence, set)):
+    def _check_values(self) -> None:
+        if not isinstance(self.values, (Sequence, set)):
             raise TypeError(
-                f"'indices' must be `Sequence`, but given '{type(self._indices)}'!"
+                f"'indices' must be `Sequence`, but given '{type(self.values)}'!"
             )
-        elif not all(isinstance(x, GIndex) for x in self._indices):
+        elif not all(isinstance(x, GIndex) for x in self.values):
             raise ValueError(
                 "'indices' must be `Sequence[GIndex]`, "
-                f"but given '{self._indices}'!"
+                f"but given '{self.values}'!"
             )
-        elif not isinstance(self._indices, tuple):
-            self._indices = tuple(self._indices)
+        elif not isinstance(self.values, tuple):
+            self.values = tuple(self.values)
 
     def _create_dict(self) -> GIndexNameDict:
-        return GIndexNameDict({val.name: val for val in self._indices})
+        return GIndexNameDict({val.name: val for val in self.values})
 
     def rest(
         self,
@@ -362,14 +353,14 @@ class GNIndex:
 
         return: A `GNIndex` with tuple of the rest indices
         """
-        if len(self._indices) == 0:
+        if len(self.values) == 0:
             return None
         if isinstance(names, (list, tuple, set)):
             return (
-                self.copywith(indices=indices)
+                self.copywith(values=values)
                 if len(names) != 0
                 and (
-                    indices := tuple(
+                    values := tuple(
                         self._rest(self.dict.copy(), names).values()
                     )
                 )
@@ -378,7 +369,7 @@ class GNIndex:
         elif isinstance(names, (str, GIndexName)):
             tmpdict = self.dict.copy()
             tmpdict.pop(names)
-            return self.copywith(indices=tuple(tmpdict.values()))
+            return self.copywith(values=tuple(tmpdict.values()))
         raise TypeError(
             f"'names' must be `Sequence[str]`, but given '{type(names)}'!"
         )
@@ -427,7 +418,7 @@ class GNIndex:
             res.extend(self.__split(name) for name in names)
         else:
             res.append(self.__split(names))
-        return self.copywith(indices=tuple(res))
+        return self.copywith(values=tuple(res))
 
     def __split(self, name: Any) -> GIndex:
         if not isinstance(name, (str, GIndexName)):
@@ -440,9 +431,9 @@ class GNIndex:
 
     def sub(self, names: tuple) -> GNIndex:
         return self.copywith(
-            indices=tuple(
+            values=tuple(
                 val
-                for val in self._indices
+                for val in self.values
                 if (val.name.s in names or val.name.f in names)
             )
         )
@@ -450,15 +441,15 @@ class GNIndex:
     subindex = sub
 
     def union(self, *args, **kwargs) -> GNIndex:
-        indices = [*self._indices]
+        values = [*self.values]
         for arg in args:
             if not isinstance(arg, GNIndex):
                 raise TypeError(
                     f"'args' must be `GNIndex`, but given '{type(arg)}'"
                 )
 
-            indices.extend(index for index in arg._indices if index not in indices)
-        return self.copywith(indices=indices, **kwargs)
+            values.extend(value for value in arg.values if value not in values)
+        return self.copywith(values=values, **kwargs)
 
     def __add__(self, right: GNIndex) -> GNIndex:
         if not isinstance(right, GNIndex):
@@ -470,7 +461,7 @@ class GNIndex:
                 "'right' must have the same `order` as the left,"
                 f"but given '{self.order=}', '{right.order=}'"
             )
-        return self.copywith(indices=set(self._indices + right._indices))
+        return self.copywith(values=set(self.values + right.values))
 
     def __or__(self, right: GNIndex) -> GNIndex:
         return self.__add__(right)
@@ -485,7 +476,7 @@ class GNIndex:
                 "'right' must have the same `order` as the left,"
                 f"but given '{self.order=}', '{right.order=}'"
             )
-        return self.copywith(indices=set(self._indices) - set(right._indices))
+        return self.copywith(values=set(self.values) - set(right.values))
 
     def __xor__(self, right: GNIndex) -> GNIndex:
         return self.__sub__(right)
@@ -494,35 +485,35 @@ class GNIndex:
         if not order:
             order = self.order
         tmpdict = self.dict.copy()
-        indices = [tmpdict.pop(name) for name in order if name in tmpdict]
-        if idxs := tmpdict.values():
-            indices.extend(idxs)
-        self._indices = tuple(indices)
+        values = [tmpdict.pop(name) for name in order if name in tmpdict]
+        if vals := tmpdict.values():
+            values.extend(vals)
+        self.values = tuple(values)
 
     def dim(self) -> int:
         """Returns the dimension of the index (size of the indices list)"""
-        return len(self._indices)
+        return len(self.values)
 
     def instances(self) -> Tuple[Tuple[GNIndexInstance, ...], ...]:
         """Returns a tuple of the indices instances tuples (2D version)"""
-        return tuple(ind.instances() for ind in self._indices)
+        return tuple(ind.instances() for ind in self.values)
 
     def instances1d(self) -> Tuple[GNIndexInstance, ...]:
         """Returns a tuple of the indices instances (1D version)"""
-        return tuple(inst for ind in self._indices for inst in ind.instances())
+        return tuple(inst for ind in self.values for inst in ind.instances())
 
     def names(self) -> tuple:
-        return tuple(val.name for val in self._indices)
+        return tuple(val.name for val in self.values)
 
     def names1d(
         self, namemode: Literal["s", "f", "short", "full"] = "s"
     ) -> tuple:
-        return tuple(val.name[namemode] for val in self._indices)
+        return tuple(val.name[namemode] for val in self.values)
 
     def __iter__(self) -> Iterator[GNIndexInstance]:
         for val in product(*self.instances()):
             yield GNIndexInstance(
-                indices=val,  # type:ignore
+                instances=val,  # type:ignore
                 order=self.order,
                 sep=self.sep,
                 withname=self.withname,
@@ -532,53 +523,44 @@ class GNIndex:
 
     def copy(self, deep: bool = False) -> GNIndex:
         """Returns a copy of the object"""
-        if deep:
-            ret = GNIndex(
-                indices=tuple(self._indices),
+        return (
+            GNIndex(
+                values=tuple(self.values),
                 order=tuple(self.order),
                 sep=str(self.sep),
                 withname=bool(self.withname),
                 namemode=str(self.namemode),  # type:ignore
                 namesep=str(self.namesep),
             )
-        else:
-            ret = GNIndex(
-                indices=self._indices,
+            if deep
+            else GNIndex(
+                values=self.values,
                 order=self.order,
                 sep=self.sep,
                 withname=self.withname,
                 namemode=self.namemode,
                 namesep=self.namesep,
             )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndex.copy() unparsed arguments: {kwargs}")
-
-        return ret
+        )
 
     def copywith(self, **kwargs) -> GNIndex:
         """Returns a copy of the object with updated fields from `kwargs`"""
-
-        if kwargs.pop("deep", True):
-            ret = GNIndex(
-                indices=kwargs.pop("indices", tuple(self._indices)),
+        return (
+            GNIndex(
+                values=kwargs.pop("values", tuple(self.values)),
                 order=kwargs.pop("order", tuple(self.order)),
                 sep=kwargs.pop("sep", str(self.sep)),
                 withname=kwargs.pop("withname", bool(self.withname)),
                 namemode=kwargs.pop("namemode", str(self.namemode)),
                 namesep=kwargs.pop("namesep", str(self.namesep)),
             )
-        else:
-            ret = GNIndex(
-                indices=kwargs.pop("indices", self._indices),
+            if kwargs.pop("deep", True)
+            else GNIndex(
+                values=kwargs.pop("values", self.values),
                 order=kwargs.pop("order", self.order),
                 sep=kwargs.pop("sep", self.sep),
                 withname=kwargs.pop("withname", self.withname),
                 namemode=kwargs.pop("namemode", self.namemode),
                 namesep=kwargs.pop("namesep", self.namesep),
             )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndex.copywith() unparsed arguments: {kwargs}")
-
-        return ret
+        )
diff --git a/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py b/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py
index f0e24bdf4d353d2fc31e6cb48871740b611f6095..ae4b1912e3b134060834bda2dce40ae5c7850981 100644
--- a/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py
+++ b/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py
@@ -109,7 +109,7 @@ def test_gindex_format(detector01, detector_name):
 
 
 def test_gnindex(detector, subdetector):
-    nind = GNIndexInstance(indices=(detector[0], subdetector[0]))
+    nind = GNIndexInstance(instances=(detector[0], subdetector[0]))
     assert nind
     assert nind.format(string="Spectrum") == (
         f"Spectrum{detector[0].sep}{detector[0].value}"
@@ -130,7 +130,7 @@ def test_gnindex(detector, subdetector):
 
 def test_gnindex_iter(detector, subdetector, index_values):
     sep = "_"
-    nind = GNIndex(indices=(detector, subdetector), sep=sep)
+    nind = GNIndex(values=(detector, subdetector), sep=sep)
     nvals = tuple(
         sep.join(pair) for pair in product(index_values, index_values)
     )
@@ -141,29 +141,29 @@ def test_gnindex_iter(detector, subdetector, index_values):
 
 def test_gnindex_arithmetic(detector, subdetector):
     gorder = ("det", "subdet", "i")
-    nind = GNIndex(indices=(detector, subdetector), order=gorder)
+    nind = GNIndex(values=(detector, subdetector), order=gorder)
     ind = GIndex(GIndexName("i", "index"), ("1", "2"))
-    nind2 = GNIndex(indices=(detector, ind), order=gorder)
-    nind3 = GNIndex(indices=(ind,), order=gorder)
+    nind2 = GNIndex(values=(detector, ind), order=gorder)
+    nind3 = GNIndex(values=(ind,), order=gorder)
     # `sub` and `-`
-    assert all(x - x == x.copywith(indices=tuple()) for x in (nind, nind2))
+    assert all(x - x == x.copywith(values=tuple()) for x in (nind, nind2))
     assert all(
-        x.sub(("new",)) == x.copywith(indices=tuple()) for x in (nind, nind2)
+        x.sub(("new",)) == x.copywith(values=tuple()) for x in (nind, nind2)
     )
     assert all(x.sub(x.names1d()) == x for x in (nind, nind2))
-    assert nind2.sub(("i",)) == nind.copywith(indices=(ind,))
+    assert nind2.sub(("i",)) == nind.copywith(values=(ind,))
     # `merge` and  `+`
     assert all(
-        len(x._indices) == len(nind._indices)
-        and set(x._indices) == set(nind._indices)
+        len(x.values) == len(nind.values)
+        and set(x.values) == set(nind.values)
         and x.order == gorder
         for x in (nind + nind, nind | nind, nind.union(nind))
     )
     assert all(
         (y := nind + nind2) and y == x and y.order == gorder
         for x in (
-            nind.copywith(indices={detector, subdetector, ind}),
-            nind2.copywith(indices={detector, subdetector, ind}),
+            nind.copywith(values={detector, subdetector, ind}),
+            nind2.copywith(values={detector, subdetector, ind}),
             nind.union(nind3),
             nind | nind2,
         )
@@ -176,7 +176,7 @@ def test_gnindex_rest_split(
     gorder = ("det", "subdet", "i")
     iname = GIndexName("i", "index")
     ind = GIndex(iname, ("1", "2"))
-    nind = GNIndex(indices=(detector, subdetector, ind), order=gorder)
+    nind = GNIndex(values=(detector, subdetector, ind), order=gorder)
     # test `dict`
     assert all(
         x in nind.dict
@@ -197,22 +197,22 @@ def test_gnindex_rest_split(
     ):
         assert isinstance(elem, GNIndex)
         assert elem.order == nind.order
-        assert elem._indices == (subdetector, ind)
+        assert elem.values == (subdetector, ind)
     for elem in (
         nind.rest(val) for val in (iname, "i", "index", ("i",), ("index",))
     ):
         assert isinstance(elem, GNIndex)
         assert elem.order == nind.order
-        assert elem._indices == (detector, subdetector)
+        assert elem.values == (detector, subdetector)
     # test `split`
     assert nind, None == nind.split(nind.names1d())
-    assert nind.copywith(indices=tuple()), nind == nind.split(tuple())
+    assert nind.copywith(values=tuple()), nind == nind.split(tuple())
     for elem, rest in (
         nind.split(val) for val in (("det",), ("detector",), (detector_name,))
     ):
         assert isinstance(elem, GNIndex) and isinstance(rest, GNIndex)
         assert elem.order == nind.order and rest.order == nind.order
-        assert elem._indices == (detector,) and rest._indices == (subdetector, ind)
+        assert elem.values == (detector,) and rest.values == (subdetector, ind)
     for elem, rest in (
         nind.split(val)
         for val in (
@@ -223,7 +223,7 @@ def test_gnindex_rest_split(
     ):
         assert isinstance(elem, GNIndex) and isinstance(rest, GNIndex)
         assert elem.order == nind.order and rest.order == nind.order
-        assert elem._indices == (subdetector,) and rest._indices == (detector, ind)
+        assert elem.values == (subdetector,) and rest.values == (detector, ind)
     for elem, rest in (
         nind.split(val)
         for val in (
@@ -234,12 +234,12 @@ def test_gnindex_rest_split(
     ):
         assert isinstance(elem, GNIndex) and isinstance(rest, GNIndex)
         assert elem.order == nind.order and rest.order == nind.order
-        assert elem._indices == (detector, subdetector) and rest._indices == (ind,)
+        assert elem.values == (detector, subdetector) and rest.values == (ind,)
 
 
 def test_gnindex_order_exception(detector, subdetector, detector_name):
     orders = (object, 12, {4, 3, 2}, detector_name, detector)
     with pytest.raises(TypeError):
         for order in orders:
-            GNIndexInstance(indices=(detector[0], subdetector[0]), order=order)  # type: ignore
+            GNIndexInstance(instances=(detector[0], subdetector[0]), order=order)  # type: ignore
             GNIndex(values=(detector, subdetector), order=order)  # type: ignore
diff --git a/subtrees/dagflow/test/nodes/test_Integrator.py b/subtrees/dagflow/test/nodes/test_Integrator.py
index 1463982a4278ad6febae798d86bb106b759079a5..23459b5a0ced4755b8996d3454d62743bb809216 100644
--- a/subtrees/dagflow/test/nodes/test_Integrator.py
+++ b/subtrees/dagflow/test/nodes/test_Integrator.py
@@ -3,126 +3,197 @@ from dagflow.exception import TypeFunctionError
 from dagflow.graph import Graph
 from dagflow.lib.Array import Array
 from dagflow.lib.Integrator import Integrator
+from dagflow.lib.IntegratorSampler import IntegratorSampler
+from dagflow.lib.NodeManyToOne import NodeManyToOne
+from dagflow.lib.NodeOneToOne import NodeOneToOne
+from dagflow.lib.trigonometry import Cos, Sin
+from numpy import allclose, linspace, meshgrid, pi, vectorize
+from pytest import mark, raises
 
-from pytest import raises
 
-
-def test_Integrator_00(debug_graph):
+@mark.parametrize("align", ("left", "center", "right"))
+def test_Integrator_rect_center(align, debug_graph):
     with Graph(debug=debug_graph, close=True):
-        arr1 = Array("array", [1.0, 2.0, 3.0])
-        arr2 = Array("array", [3.0, 2.0, 1.0])
-        weights = Array("weights", [2.0, 2.0, 2.0])
-        ordersX = Array("ordersX", [1, 1, 1])
-        integrator = Integrator("integrator", mode="1d")
-        arr1 >> integrator
-        arr2 >> integrator
-        weights >> integrator("weights")
+        npoints = 10
+        edges = Array("edges", linspace(0, pi, npoints + 1))
+        ordersX = Array("ordersX", [1000] * npoints, edges=edges["array"])
+        A = Array("A", edges._data[:-1])
+        B = Array("B", edges._data[1:])
+        sampler = IntegratorSampler("sampler", mode="rect", align=align)
+        integrator = Integrator("integrator")
+        cosf = Cos("cos")
+        sinf = Sin("sin")
+        ordersX >> sampler("ordersX")
+        sampler.outputs["x"] >> cosf
+        A >> sinf
+        B >> sinf
+        sampler.outputs["weights"] >> integrator("weights")
+        cosf.outputs[0] >> integrator
         ordersX >> integrator("ordersX")
-    assert (integrator.outputs[0].data == [2, 4, 6]).all()
-    assert (integrator.outputs[1].data == [6, 4, 2]).all()
+    res = sinf.outputs[1].data - sinf.outputs[0].data
+    assert allclose(integrator.outputs[0].data, res, atol=1e-4)
+    assert integrator.outputs[0].dd.axes_edges == [edges["array"]]
 
 
-def test_Integrator_01(debug_graph):
+def test_Integrator_trap(debug_graph):
     with Graph(debug=debug_graph, close=True):
-        arr1 = Array("array", [1.0, 2.0, 3.0])
-        arr2 = Array("array", [3.0, 2.0, 1.0])
-        weights = Array("weights", [2.0, 2.0, 2.0])
-        ordersX = Array("ordersX", [2, 0, 1])
-        integrator = Integrator("integrator", mode="1d")
-        arr1 >> integrator
-        arr2 >> integrator
-        weights >> integrator("weights")
+        npoints = 10
+        edges = Array("edges", linspace(0, pi, npoints + 1))
+        ordersX = Array("ordersX", [1000] * npoints, edges=edges["array"])
+        A = Array("A", edges._data[:-1])
+        B = Array("B", edges._data[1:])
+        sampler = IntegratorSampler("sampler", mode="trap")
+        integrator = Integrator("integrator")
+        cosf = Cos("cos")
+        sinf = Sin("sin")
+        ordersX >> sampler("ordersX")
+        sampler.outputs["x"] >> cosf
+        A >> sinf
+        B >> sinf
+        sampler.outputs["weights"] >> integrator("weights")
+        cosf.outputs[0] >> integrator
         ordersX >> integrator("ordersX")
-    assert (integrator.outputs[0].data == [6, 0, 6]).all()
-    assert (integrator.outputs[1].data == [10, 0, 2]).all()
+    res = sinf.outputs[1].data - sinf.outputs[0].data
+    assert allclose(integrator.outputs[0].data, res, atol=1e-2)
+    assert integrator.outputs[0].dd.axes_edges == [edges["array"]]
 
 
-def test_Integrator_02(debug_graph):
-    arr123 = [1.0, 2.0, 3.0]
-    with Graph(debug=debug_graph, close=True):
-        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
-        arr2 = Array("array", [arr123, arr123])
-        weights = Array("weights", [[1.0, 1.0, 1.0], arr123])
-        ordersX = Array("ordersX", [1, 1])
-        ordersY = Array("ordersY", [1, 1, 1])
-        integrator = Integrator("integrator", mode="2d")
-        arr1 >> integrator
-        arr2 >> integrator
-        weights >> integrator("weights")
-        ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    assert (integrator.outputs[0].data == [[1, 1, 1], [1, 2, 3]]).all()
-    assert (integrator.outputs[1].data == [[1, 2, 3], [1, 4, 9]]).all()
+def f0(x: float) -> float:
+    return 4 * x**3 + 3 * x**2 + 2 * x - 1
 
 
-def test_Integrator_03(debug_graph):
-    arr123 = [1.0, 2.0, 3.0]
+def fres(x: float) -> float:
+    return x**4 + x**3 + x**2 - x
+
+
+vecF0 = vectorize(f0)
+vecFres = vectorize(fres)
+
+
+class Polynomial0(NodeOneToOne):
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            out.data[:] = vecF0(inp.data)
+        return list(outputs.iter_data())
+
+
+class PolynomialRes(NodeOneToOne):
+    def _fcn(self, _, inputs, outputs):
+        for inp, out in zip(inputs, outputs):
+            out.data[:] = vecFres(inp.data)
+        return list(outputs.iter_data())
+
+
+def test_Integrator_gl1d(debug_graph):
     with Graph(debug=debug_graph, close=True):
-        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
-        arr2 = Array("array", [arr123, arr123])
-        weights = Array("weights", [[1.0, 1.0, 1.0], arr123])
-        ordersX = Array("ordersX", [1, 1])
-        ordersY = Array("ordersY", [1, 2, 0])
-        integrator = Integrator("integrator", mode="2d")
-        arr1 >> integrator
-        arr2 >> integrator
-        weights >> integrator("weights")
+        npoints = 10
+        edges = Array("edges", linspace(0, 10, npoints + 1))
+        ordersX = Array("ordersX", [2] * npoints, edges=edges["array"])
+        A = Array("A", edges._data[:-1])
+        B = Array("B", edges._data[1:])
+        sampler = IntegratorSampler("sampler", mode="gl")
+        integrator = Integrator("integrator")
+        poly0 = Polynomial0("poly0")
+        polyres = PolynomialRes("polyres")
+        ordersX >> sampler("ordersX")
+        sampler.outputs["x"] >> poly0
+        A >> polyres
+        B >> polyres
+        sampler.outputs["weights"] >> integrator("weights")
+        poly0.outputs[0] >> integrator
         ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    assert (integrator.outputs[0].data == [[1, 2, 0], [1, 5, 0]]).all()
-    assert (integrator.outputs[1].data == [[1, 5, 0], [1, 13, 0]]).all()
+    res = polyres.outputs[1].data - polyres.outputs[0].data
+    assert allclose(integrator.outputs[0].data, res, atol=1e-10)
+    assert integrator.outputs[0].dd.axes_edges == [edges["array"]]
 
 
-def test_Integrator_04(debug_graph):
-    arr123 = [1.0, 2.0, 3.0]
+def test_Integrator_gl2d(debug_graph):
+    class Polynomial1(NodeManyToOne):
+        def _fcn(self, _, inputs, outputs):
+            outputs["result"].data[:] = vecF0(inputs[1].data) * vecF0(
+                inputs[0].data
+            )
+            return list(outputs.iter_data())
+
     with Graph(debug=debug_graph, close=True):
-        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
-        arr2 = Array("array", [arr123, arr123])
-        weights = Array("weights", [[1.0, 1.0, 1.0], arr123])
-        ordersX = Array("ordersX", [0, 2])
-        ordersY = Array("ordersY", [1, 1, 1])
-        integrator = Integrator("integrator", mode="2d")
+        npointsX, npointsY = 10, 20
+        edgesX = Array("edgesX", linspace(0, 10, npointsX + 1))
+        edgesY = Array("edgesY", linspace(0, 10, npointsY + 1))
+        ordersX = Array("ordersX", [2] * npointsX, edges=edgesX["array"])
+        ordersY = Array("ordersY", [2] * npointsY, edges=edgesY["array"])
+        x0, y0 = meshgrid(edgesX._data[:-1], edgesY._data[:-1], indexing="ij")
+        x1, y1 = meshgrid(edgesX._data[1:], edgesY._data[1:], indexing="ij")
+        X0, X1 = Array("X0", x0), Array("X1", x1)
+        Y0, Y1 = Array("Y0", y0), Array("Y1", y1)
+        sampler = IntegratorSampler("sampler", mode="2d")
+        integrator = Integrator("integrator")
+        poly0 = Polynomial1("poly0")
+        polyres = PolynomialRes("polyres")
+        ordersX >> sampler("ordersX")
+        ordersY >> sampler("ordersY")
+        sampler.outputs["x"] >> poly0
+        sampler.outputs["y"] >> poly0
+        X0 >> polyres
+        X1 >> polyres
+        Y0 >> polyres
+        Y1 >> polyres
+        sampler.outputs["weights"] >> integrator("weights")
+        poly0.outputs[0] >> integrator
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    res = (polyres.outputs[1].data - polyres.outputs[0].data) * (
+        polyres.outputs[3].data - polyres.outputs[2].data
+    )
+    assert allclose(integrator.outputs[0].data, res, atol=1e-10)
+    assert integrator.outputs[0].dd.axes_edges == [
+        edgesX["array"],
+        edgesY["array"],
+    ]
+
+
+# test wrong ordersX: edges not given
+def test_Integrator_edges_0(debug_graph):
+    arr = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph):
+        arr1 = Array("array", arr)
+        weights = Array("weights", arr)
+        ordersX = Array("ordersX", [1, 2, 3])
+        integrator = Integrator("integrator")
         arr1 >> integrator
-        arr2 >> integrator
         weights >> integrator("weights")
         ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    assert (integrator.outputs[0].data == [[0, 0, 0], [2, 3, 4]]).all()
-    assert (integrator.outputs[1].data == [[0, 0, 0], [2, 6, 12]]).all()
+    with raises(TypeFunctionError):
+        integrator.close()
 
 
-def test_Integrator_05(debug_graph):
-    unity = [1.0, 1.0, 1.0]
-    with Graph(debug=debug_graph, close=True):
-        arr1 = Array(
-            "array", [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
-        )
-        arr2 = Array("array", [unity, unity, unity])
-        weights = Array("weights", [unity, unity, unity])
-        ordersX = Array("ordersX", [1, 1, 1])
-        ordersY = Array("ordersY", [1, 0, 2])
-        integrator = Integrator("integrator", mode="2d")
+# test wrong ordersX: edges is wrong
+def test_Integrator_edges_1(debug_graph):
+    arr = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph, close=False):
+        edges = Array("edges", [0.0, 1.0, 2.0])
+        with raises(TypeFunctionError):
+            arr1 = Array("array", arr, edges=edges["array"])
+        edges = Array("edges", [0.0, 1.0, 2.0, 3.0])
+        arr1 = Array("array", arr, edges=edges["array"])
+        weights = Array("weights", arr)
+        ordersX = Array("ordersX", [1, 2, 3])
+        integrator = Integrator("integrator")
         arr1 >> integrator
-        arr2 >> integrator
         weights >> integrator("weights")
         ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    assert (
-        integrator.outputs[0].data == [[1, 0, 0], [0, 0, 1], [0, 0, 1]]
-    ).all()
-    assert (
-        integrator.outputs[1].data == [[1, 0, 2], [1, 0, 2], [1, 0, 2]]
-    ).all()
+    with raises(TypeFunctionError):
+        integrator.close()
 
 
 # test wrong ordersX: sum(ordersX) != shape
-def test_Integrator_06(debug_graph):
+def test_Integrator_02(debug_graph):
     arr = [1.0, 2.0, 3.0]
     with Graph(debug=debug_graph):
-        arr1 = Array("array", arr)
-        weights = Array("weights", arr)
-        ordersX = Array("ordersX", [1, 2, 3])
-        integrator = Integrator("integrator", mode="1d")
+        edges = Array("edges", [0.0, 1.0, 2.0, 3.0])
+        arr1 = Array("array", arr, edges=edges["array"])
+        weights = Array("weights", arr, edges=edges["array"])
+        ordersX = Array("ordersX", [1, 2, 3], edges=edges["array"])
+        integrator = Integrator("integrator")
         arr1 >> integrator
         weights >> integrator("weights")
         ordersX >> integrator("ordersX")
@@ -131,14 +202,20 @@ def test_Integrator_06(debug_graph):
 
 
 # test wrong ordersX: sum(ordersX[i]) != shape[i]
-def test_Integrator_07(debug_graph):
+def test_Integrator_03(debug_graph):
     arr = [1.0, 2.0, 3.0]
-    with Graph(debug=debug_graph):
-        arr1 = Array("array", [arr, arr])
-        weights = Array("weights", [arr, arr])
-        ordersX = Array("ordersX", [1, 3])
-        ordersY = Array("ordersY", [1, 0, 0])
-        integrator = Integrator("integrator", mode="2d")
+    with Graph(debug=debug_graph, close=False):
+        edgesX = Array("edgesX", [-1.0, 0.0, 1.0])
+        edgesY = Array("edgesY", [-2.0, -1, 0.0, 1.0])
+        arr1 = Array(
+            "array", [arr, arr], edges=[edgesX["array"], edgesY["array"]]
+        )
+        weights = Array(
+            "weights", [arr, arr], edges=[edgesX["array"], edgesY["array"]]
+        )
+        ordersX = Array("ordersX", [1, 3], edges=edgesX["array"])
+        ordersY = Array("ordersY", [1, 0, 0], edges=edgesY["array"])
+        integrator = Integrator("integrator")
         arr1 >> integrator
         weights >> integrator("weights")
         ordersX >> integrator("ordersX")
@@ -148,14 +225,14 @@ def test_Integrator_07(debug_graph):
 
 
 # test wrong shape
-def test_Integrator_08(debug_graph):
+def test_Integrator_04(debug_graph):
     with Graph(debug=debug_graph, close=False):
         arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
         arr2 = Array("array", [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]])
         weights = Array("weights", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
         ordersX = Array("ordersX", [0, 2])
         ordersY = Array("ordersY", [1, 1, 1, 3])
-        integrator = Integrator("integrator", mode="2d")
+        integrator = Integrator("integrator")
         arr1 >> integrator
         arr2 >> integrator
         weights >> integrator("weights")