diff --git a/subtrees/dagflow/.gitignore b/subtrees/dagflow/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f11cd0888ffc8c5026b90c05182545a12c042b41
--- /dev/null
+++ b/subtrees/dagflow/.gitignore
@@ -0,0 +1,54 @@
+output
+build
+_build
+__pycache__
+
+# Local configuration files and folders
+config_local
+.local/
+.vscode
+.direnv/
+.envrc
+.fish_functions
+matplotlibrc
+.coverage
+cov.*
+
+# Transient files (vim, etc)
+*~
+*.swp
+\#*
+.\#*
+.cache
+.lark_cache*
+.lark-cache*
+*.bak
+*.backup
+
+# vim
+UltiSnips/*
+.viminfo
+.vimrc
+.nvimrc
+*.vim
+.ycm_extra_conf.py
+
+# Latex
+*.aux
+*.pda
+*.toc
+*.log
+*.fdb*
+*.out
+*.pdf
+*.png
+*.blg
+*.snm
+*.nav
+# code
+
+# Code
+tags
+*.pyc
+*.o
+coverage.json
diff --git a/subtrees/dagflow/.gitlab-ci.yml b/subtrees/dagflow/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cd58f936bd5dacc3d4934dfc95b53924ea02d579
--- /dev/null
+++ b/subtrees/dagflow/.gitlab-ci.yml
@@ -0,0 +1,24 @@
+stages:
+    - tests
+
+tests:
+    image: git.jinr.ru:5005/gna/gna-base-docker-image:latest
+    stage: tests
+
+    script:
+    - python3 -m pip install -r requirements.txt
+    - coverage run --source=. -m pytest
+    - coverage report
+    - coverage xml
+    coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
+    artifacts:
+        paths:
+            - test/output
+        reports:
+            coverage_report:
+                coverage_format: cobertura
+                path: coverage.xml
+    only:
+        - master
+        - update-to-data-preservation
+        - merge_requests
diff --git a/subtrees/dagflow/README.md b/subtrees/dagflow/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d691822eb40f841818fa3a1b0abe16aeb134022b
--- /dev/null
+++ b/subtrees/dagflow/README.md
@@ -0,0 +1,80 @@
+# Summary
+
+[![python](https://img.shields.io/badge/python-3.10-purple.svg)](https://www.python.org/)
+[![pipeline](https://git.jinr.ru/dag-computing/dag-flow/badges/master/pipeline.svg)](https://git.jinr.ru/dag-computing/dag-flow/commits/master)
+[![coverage report](https://git.jinr.ru/dag-computing/dag-flow/badges/master/coverage.svg)](https://git.jinr.ru/dag-computing/dag-flow/-/commits/master)
+<!--- Uncomment here after adding docs!
+[![pages](https://img.shields.io/badge/pages-link-white.svg)](http://dag-computing.pages.jinr.ru/dag-flow)
+-->
+
+DAGFlow is python implementation of dataflow programming with lazy graph evaluation.
+
+Main goals:
+*  Lazy evaluated directed acyclic graph
+*  Concise connection syntax
+*  Plotting with graphviz
+*  Flexibility. The goal of DAGFlow is not to be efficient, but rather flexible.
+
+Here is an animation, showing the process of the graph evaluation
+
+![Image](example/graph_evaluation.gif)
+
+# Minimal example
+An example of small, graph calculating the formula (n1 + n2 + n3) * n4 may be 
+found in the [example](example/example.py):
+```python
+#!/usr/bin/env python
+
+from dagflow.node_deco import NodeClass
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.input_extra import MissingInputAddOne
+import numpy as N
+
+# Node functions
+@NodeClass(output='array')
+def Array(node, inputs, output):
+    """Creates a note with single data output with predefined array"""
+    outputs[0].data = N.arange(5, dtype='d')
+
+@NodeClass(missing_input_handler=MissingInputAddOne(output_fmt='result'))
+def Adder(node, inputs, output):
+    """Adds all the inputs together"""
+    out = None
+    for input in inputs:
+        if out is None:
+            out=outputs[0].data = input.data.copy()
+        else:
+            out+=input.data
+
+@NodeClass(missing_input_handler=MissingInputAddOne(output_fmt='result'))
+def Multiplier(node, inputs, output):
+    """Multiplies all the inputs together"""
+    out = None
+    for input in inputs:
+        if out is None:
+            out = outputs[0].data = input.data.copy()
+        else:
+            out*=input.data
+
+# The actual code
+with Graph() as graph:
+    (in1, in2, in3, in4) = [Array(name) for name in ['n1', 'n2', 'n3', 'n4']]
+    s = Adder('add')
+    m = Multiplier('mul')
+
+(in1, in2, in3) >> s
+(in4, s) >> m
+
+print('Result is:', m.outputs["result"].data)
+savegraph(graph, 'output/dagflow_example.png')
+```
+
+The code produces the following graph:
+
+![Image](example/dagflow_example.png)
+
+For `n=[1, 2, 3, 4]` the output is:
+```
+Result is: [ 0.  3. 12. 27. 48.]
+```
diff --git a/subtrees/dagflow/TODO.md b/subtrees/dagflow/TODO.md
new file mode 100644
index 0000000000000000000000000000000000000000..502b50a7ae594b309f53aa52e46c1efab793367a
--- /dev/null
+++ b/subtrees/dagflow/TODO.md
@@ -0,0 +1,72 @@
+# Update to Daya Bay data preservation
+
+## Common tasks
+
+- [x] Input renaming: `output -> input`, `corresponding_output -> output`
+- [x] Automatic creation of outputs is **restricted**
+- [x] Parentheses operator `()` as getter `[]` of inputs, but with creation
+of the input, instead of `KeyError`
+- [x] Implementing of flexible shift operators `>>` and `<<` or *using current*?
+  - Now using curren implementation.
+- [x] Implement `hooks`:
+  - At an input connection
+  - At a function evaluation
+- [x] Two types of `Exceptions`:
+  - connection and type checking (`non-critical` exception)
+  - call function (`critical`)
+- [x] Recursive close of a graph
+- [x] Solve troubles with a connection of an input or output and closure
+- [x] Implement 2 descriptors for the `Output`:
+  - `Shape` and `dtype`
+  - `Allocation` and `view`
+- [x] Move `handlers` to the `binding` stage
+- [x] Memory allocation:
+  - See `core/transformation/TransformationEntry.cc` method `updateTypes()`
+- [x] Datatype: `allocatable`, `non-alloc`
+- [x] Datadescr: `dtype`, `shape`
+- [x] Dict as `kwargs`:
+  - `ws = WeightedSum()"`;
+  -`{'weight' : data} >> ws` is the same as `data >> ws('weight')`
+- [x] Logging
+- [x] Inputs problem: there is a difference between node and output inputs
+- [x] Update naming for the second order `input` and `output`: `parent`, `child`
+- [x] `iinput` is a meta data, do not use in allocation and closure;
+use `Node` to do this stuff; do not use second order `input` and `output`
+- [x] Loops scheme:
+  1) Close:
+      - Typing:
+        - Update types
+        - Update shapes
+      - Allocation
+  2) Graph:
+      - Node:
+        - Inputs
+        - Outputs
+  3) See <https://hackmd.io/mMNrlOp7Q7i9wkVFvP4W4Q>
+- [x] `Tainted`
+- [x] Fix decorators
+- [x] Move common checks in `typefunc` into standalone module
+- [ ] Update wrapping
+
+## Transformations
+
+- [x] Implementing of some simple transformations with only `args` in function:
+`Sum`, `Product`, `Division`, ...
+- [x] Implementing of some simple transformations with `args` and `kwargs`:
+`WeightedSum` with `weight`, ...
+- [x] Check the style of the implementation
+- [x] Update the inputs checks before evaluation
+- [x] Concatenation
+- [x] Update `WeightedSum`
+- [ ] Implement `Integrator`
+
+## Tests
+
+- [x] Test the graph workflow with transformations
+- [x] Test of open and closure of the several graphs
+
+## Questions and suggestions
+
+- [x] Should we use only `numpy.ndarray` or also `numpy.number` for single element:
+  1) only `numpy.ndarray`!
+- [] Should we implement `zero`, `unity` objects with automatic dimension?
diff --git a/subtrees/dagflow/conftest.py b/subtrees/dagflow/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e7b36a0544b50a29f1af6cfdeef050ee9cc1d22
--- /dev/null
+++ b/subtrees/dagflow/conftest.py
@@ -0,0 +1,37 @@
+from os import chdir, getcwd, mkdir
+from os.path import isdir
+
+from pytest import fixture, skip
+
+
+def pytest_sessionstart(session):
+    """
+    Called after the Session object has been created and
+    before performing collection and entering the run test loop.
+
+    Automatic change path to the `dag-flow/test` and create `test/output` dir
+    """
+    path = getcwd()
+    lastdir = path.split("/")[-1]
+    if lastdir == "dag-flow":  # rootdir
+        chdir("./test")
+    elif lastdir in (
+        "dagflow",
+        "example",
+        "doc",
+        "docs",
+        "source",
+        "sources",
+    ):  # childdir
+        chdir("../test")
+    if not isdir("output"):
+        mkdir("output")
+
+
+def pytest_addoption(parser):
+    parser.addoption("--debug_graph", action="store_true", default=False)
+
+
+@fixture(scope="session")
+def debug_graph(request):
+    return request.config.option.debug_graph
diff --git a/subtrees/dagflow/dagflow/__init__.py b/subtrees/dagflow/dagflow/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/subtrees/dagflow/dagflow/bundles/__init__.py b/subtrees/dagflow/dagflow/bundles/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/subtrees/dagflow/dagflow/bundles/load_variables.py b/subtrees/dagflow/dagflow/bundles/load_variables.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd9af0a5118ee367097062214dd553f72da15394
--- /dev/null
+++ b/subtrees/dagflow/dagflow/bundles/load_variables.py
@@ -0,0 +1,153 @@
+from dictwrapper.dictwrapper import DictWrapper
+# from storage.storage import Storage # To be used later
+
+from schema import Schema, Or, Optional, Use, And, Schema, SchemaError
+
+from ..tools.schema import NestedSchema, LoadFileWithExt, LoadYaml
+
+class ParsCfgHasProperFormat(object):
+    def validate(self, data: dict) -> dict:
+        format = data['format']
+        if isinstance(format, str):
+            nelements = 1
+        else:
+            nelements = len(format)
+
+        dtin = DictWrapper(data)
+        for key, subdata in dtin['variables'].walkitems():
+            if isinstance(subdata, tuple):
+                if len(subdata)==nelements: continue
+            else:
+                if nelements==1: continue
+
+            key = ".".join(str(k) for k in key)
+            raise SchemaError(f'Key "{key}" has  value "{subdata}"" inconsistent with format "{format}"')
+
+        return data
+
+IsNumber = Or(float, int, error='Invalid number "{}", expect int of float')
+IsNumberOrTuple = Or(IsNumber, (IsNumber,), error='Invalid number/tuple {}')
+IsLabel = Or({
+        'text': str,
+        Optional('latex'): str,
+        Optional('graph'): str,
+        Optional('mark'): str,
+        Optional('name'): str
+    },
+    And(str, Use(lambda s: {'text': s}), error='Invalid string: {}')
+)
+IsValuesDict = NestedSchema(IsNumberOrTuple)
+IsLabelsDict = NestedSchema(IsLabel, processdicts=True)
+def IsFormatOk(format):
+    if not isinstance(format, tuple):
+        return format=='value'
+
+    if len(format)==1:
+        f1,=format
+        return f1=='value'
+    else:
+        if len(format)==2:
+            f1, f3 = format
+        elif len(format)==3:
+            f1, f2, f3 = format
+
+            if f2 not in ('value', 'central') or f1==f2:
+                return False
+        else:
+            return False
+
+        if f3 not in ('sigma_absolute', 'sigma_relative', 'sigma_percent'):
+            return False
+
+        return f1 in ('value', 'central')
+
+IsFormat = Schema(IsFormatOk, error='Invalid variable format "{}".')
+IsVarsCfgDict = Schema({
+    'variables': IsValuesDict,
+    'labels': IsLabelsDict,
+    'format': IsFormat
+    },
+    error = 'Invalid parameters configuration: {}'
+)
+IsProperVarsCfgDict = And(IsVarsCfgDict, ParsCfgHasProperFormat())
+IsLoadableDict = And(
+            {'load': str},
+            Use(LoadFileWithExt(yaml=LoadYaml, key='load'), error='Failed to load {}'),
+            IsProperVarsCfgDict,
+            error = 'Failed to load parameters configuration file: {}'
+        )
+IsProperVarsCfg = Or(IsProperVarsCfgDict, IsLoadableDict)
+
+def process_var_fixed1(vcfg, _, __):
+    return {'central': vcfg, 'value': vcfg, 'sigma': None}
+
+def process_var_fixed2(vcfg, format, hascentral) -> dict:
+    ret = dict(zip(format, vcfg))
+    if hascentral:
+        ret.setdefault('value', ret['central'])
+    else:
+        ret.setdefault('central', ret['value'])
+    ret['sigma'] = None
+    return ret
+
+def process_var_absolute(vcfg, format, hascentral) -> dict:
+    ret = process_var_fixed2(vcfg, format, hascentral)
+    ret['sigma'] = ret['sigma_absolute']
+    return ret
+
+def process_var_relative(vcfg, format, hascentral) -> dict:
+    ret = process_var_fixed2(vcfg, format, hascentral)
+    ret['sigma'] = ret['sigma_relative']*ret['central']
+    return ret
+
+def process_var_percent(vcfg, format, hascentral) -> dict:
+    ret = process_var_fixed2(vcfg, format, hascentral)
+    ret['sigma'] = 0.01*ret['sigma_percent']*ret['central']
+    return ret
+
+def get_format_processor(format):
+    if isinstance(format, str):
+        return process_var_fixed1
+
+    errfmt = format[-1]
+    if not errfmt.startswith('sigma'):
+        return process_var_fixed2
+
+    if errfmt.endswith('_absolute'):
+        return process_var_absolute
+    elif errfmt.endswith('_relative'):
+        return process_var_relative
+    else:
+        return process_var_percent
+
+def iterate_varcfgs(cfg: DictWrapper):
+    variablescfg = cfg['variables']
+    labelscfg = cfg['labels']
+    format = cfg['format']
+
+    hascentral = 'central' in format
+    process = get_format_processor(format)
+
+    for key, varcfg in variablescfg.walkitems():
+        varcfg = process(varcfg, format, hascentral)
+        try:
+            varcfg['label'] = labelscfg[key]
+        except KeyError:
+            varcfg['label'] = {}
+        yield key, varcfg
+
+from dagflow.variable import Parameters
+
+def load_variables(acfg):
+    cfg = IsProperVarsCfg.validate(acfg)
+    cfg = DictWrapper(cfg)
+
+    ret = DictWrapper({}, sep='.')
+    for key, varcfg in iterate_varcfgs(cfg):
+        skey = '.'.join(key)
+        label = varcfg['label']
+        label['key'] = skey
+        label.setdefault('text', skey)
+        ret[key] = Parameters.from_numbers(**varcfg)
+
+    return ret
diff --git a/subtrees/dagflow/dagflow/datadescriptor.py b/subtrees/dagflow/dagflow/datadescriptor.py
new file mode 100755
index 0000000000000000000000000000000000000000..94cdf5a71d42c52f3c3043a9c4f89037c5dd6d63
--- /dev/null
+++ b/subtrees/dagflow/dagflow/datadescriptor.py
@@ -0,0 +1,32 @@
+from typing import Optional, Tuple
+from numpy.typing import DTypeLike
+
+from .types import ShapeLike, EdgesLike
+
+
+class DataDescriptor:
+    """
+    The data descriptor class stores `dtype`, `shape`,
+    `axes_edges` and `axes_nodes` information.
+    """
+
+    __slots__ = ("dtype", "shape", "axes_edges", "axes_nodes")
+    dtype: DTypeLike # DTypeLike is already Optional
+    shape: Optional[ShapeLike]
+    axes_edges: Optional[Tuple[EdgesLike]]
+    axes_nodes: Optional[Tuple[EdgesLike]]
+
+    def __init__(
+        self,
+        dtype: DTypeLike, # DTypeLike is already Optional
+        shape: Optional[ShapeLike],
+        axes_edges: Optional[Tuple[EdgesLike]] = None,
+        axes_nodes: Optional[Tuple[EdgesLike]] = None,
+    ) -> None:
+        """
+        Sets the attributes
+        """
+        self.dtype = dtype
+        self.shape = shape
+        self.axes_edges = axes_edges
+        self.axes_nodes = axes_nodes
diff --git a/subtrees/dagflow/dagflow/edges.py b/subtrees/dagflow/dagflow/edges.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a094fdc4e5a3774a6732d7abd1d3f84999c5a92
--- /dev/null
+++ b/subtrees/dagflow/dagflow/edges.py
@@ -0,0 +1,141 @@
+from collections.abc import Sequence
+
+from .exception import CriticalError
+from .iter import IsIterable
+
+from typing import List, Dict, Union
+
+class EdgeContainer:
+    _kw_edges: Dict
+    _pos_edges: List
+    _all_edges: Dict
+    _dtype = None
+
+    def __init__(self, iterable=None):
+        self._kw_edges = {}
+        self._pos_edges = []
+        self._all_edges = {}
+        if iterable:
+            self.add(iterable)
+
+    def add(self, value, *, positional: bool=True, keyword: bool=True):
+        if positional==keyword==False:
+            raise RuntimeError('Edge should be at least positional or a keyword')
+
+        if IsIterable(value):
+            for v in value:
+                self.add(v, positional=positional, keyword=keyword)
+            return self
+        if self._dtype and not isinstance(value, self._dtype):
+            raise RuntimeError(
+                f"The type {type(value)} of the data doesn't correpond "
+                f"to {self._dtype}!"
+            )
+        name = value.name
+        if not name:
+            raise RuntimeError("May not add objects with undefined name")
+        if name in self._all_edges:
+            raise RuntimeError("May not add duplicated items")
+
+        if positional:
+            self._pos_edges.append(value)
+        if keyword:
+            self._kw_edges[name] = value
+        self._all_edges[name]=value
+        return self
+
+    def allocate(self) -> bool:
+        return all(edge.allocate() for edge in self._all_edges.values())
+
+    def __getitem__(self, key):
+        if isinstance(key, str):
+            return self._kw_edges[key]
+        elif isinstance(key, (int, slice)):
+            return self._pos_edges[key]
+        elif isinstance(key, Sequence):
+            return tuple(self.__getitem__(k) for k in key)
+        raise TypeError(f"Unsupported key type: {type(key).__name__}")
+
+    def get(self, key, default = None):
+        try:
+            return self.__getitem__(key)
+        except Exception:
+            return default
+
+    def has_key(self, key: str) -> bool:
+        return key in self._kw_edges
+
+    def get_pos(self, idx: int):
+        """Get positional leg"""
+        return self._pos_edges[idx]
+    iat = get_pos
+
+    def index(self, arg):
+        return self._pos_edges.index(arg)
+
+    def get_kw(self, key: str):
+        """Return keyword leg"""
+        return self._kw_edges[key]
+    kat = get_kw
+
+    def len_pos(self) -> int:
+        """Returns a number of the positional legs"""
+        return len(self._pos_edges)
+    __len__ = len_pos
+
+    def len_kw(self) -> int:
+        """Returns a number of the keyword legs"""
+        return len(self._kw_edges)
+
+    def len_all(self) -> int:
+        """Returns a number of the all legs"""
+        return len(self._all_edges)
+
+    def __iter__(self):
+        return iter(self._pos_edges)
+
+    def iter_all(self):
+        return iter(self._all_edges.values())
+
+    def iter_data(self):
+        for edge in self._pos_edges:
+            yield edge.data
+
+    def iter(self, key: Union[int, str, slice, Sequence]):
+        if isinstance(key, int):
+            yield self._pos_edges[key]
+        elif isinstance(key, str):
+            yield self._kw_edges[key]
+        elif isinstance(key, slice):
+            yield from self._pos_edges[key]
+        elif isinstance(key, Sequence):
+            for subkey in key:
+                if isinstance(subkey, int):
+                    yield self._pos_edges[subkey]
+                elif isinstance(subkey, str):
+                    yield self._kw_edges[subkey]
+                elif isinstance(subkey, slice):
+                    yield from self._pos_edges[subkey]
+                else:
+                    raise CriticalError(f'Invalid subkey type {type(subkey).__name__}')
+        else:
+            raise CriticalError(f'Invalid key type {type(key).__name__}')
+
+    def __contains__(self, name):
+        return name in self._all_edges
+
+    def _replace(self, old, new):
+        replaced = False
+
+        for k, v in self._kw_edges.items():
+            if old is v:
+                self._kw_edges[k] = new
+                replaced = True
+
+        for i, v in enumerate(self._pos_edges):
+            if old is v:
+                self._pos_edges[i] = new
+                replaced = True
+
+        if not replaced:
+            raise CriticalError('Unable to replace an output/input (not found)')
diff --git a/subtrees/dagflow/dagflow/exception.py b/subtrees/dagflow/dagflow/exception.py
new file mode 100644
index 0000000000000000000000000000000000000000..b57aaf94616a82daa43c318f0667524d6d5beb1c
--- /dev/null
+++ b/subtrees/dagflow/dagflow/exception.py
@@ -0,0 +1,93 @@
+from typing import Optional
+from .types import NodeT, InputT, OutputT
+
+
+class DagflowError(Exception):
+    node: Optional[NodeT]
+    input: Optional[InputT]
+    output: Optional[OutputT]
+
+    def __init__(
+        self,
+        message: str,
+        node: Optional[NodeT] = None,
+        *,
+        input: Optional[InputT] = None,
+        output: Optional[OutputT] = None,
+    ):
+        if node:
+            message = f"{message} [node={node.name if 'name' in dir(node) else node}]"
+        if input:
+            message = f"{message} [input={input.name if 'name' in dir(input) else input}]"
+        if output:
+            message = f"{message} [output={output.name if 'name' in dir(output) else output}]"
+        super().__init__(message)
+        self.node = node
+        self.input = input
+        self.output = output
+
+        if node is not None:
+            node._exception = message
+
+class CriticalError(DagflowError):
+    pass
+
+
+class NoncriticalError(DagflowError):
+    pass
+
+class InitializationError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "Wrong initialization!"
+        super().__init__(message, *args, **kwargs)
+
+
+class AllocationError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "Unable to allocate memory!"
+        super().__init__(message, *args, **kwargs)
+
+class ClosingError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "An exception occured during closing procedure!"
+        super().__init__(message, *args, **kwargs)
+
+class OpeningError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "An exception occured during opening procedure!"
+        super().__init__(message, *args, **kwargs)
+
+class ClosedGraphError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "Unable to modify a closed graph!"
+        super().__init__(message, *args, **kwargs)
+
+class UnclosedGraphError(CriticalError):
+    def __init__(self, message : Optional[str]=None, *args, **kwargs):
+        if not message:
+            message = "The graph is not closed!"
+        super().__init__(message, *args, **kwargs)
+
+
+class TypeFunctionError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "An exception occurred during type function processing!"
+        super().__init__(message, *args, **kwargs)
+
+class ReconnectionError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "The object is already connected!"
+        super().__init__(message, *args, **kwargs)
+
+class ConnectionError(CriticalError):
+    def __init__(self, message: Optional[str] = None, *args, **kwargs):
+        if not message:
+            message = "An exception occurred during connection!"
+        super().__init__(message, *args, **kwargs)
diff --git a/subtrees/dagflow/dagflow/graph.py b/subtrees/dagflow/dagflow/graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd0b9f1c32dca1f8b5f608bd16c48e6b43db506e
--- /dev/null
+++ b/subtrees/dagflow/dagflow/graph.py
@@ -0,0 +1,138 @@
+from .exception import (
+    UnclosedGraphError,
+    ClosedGraphError,
+    InitializationError
+)
+from .logger import Logger, get_logger
+from .node_group import NodeGroup
+
+from typing import Optional
+
+class Graph(NodeGroup):
+    """
+    The graph class:
+    holds nodes as a list, has name, label, logger and uses context
+    """
+
+    _context_graph: Optional['Graph'] = None
+    _label: Optional[str] = None
+    _name = "graph"
+    _close: bool = False
+    _closed: bool = False
+    _debug: bool = False
+    _logger: Logger
+
+    def __init__(self, *args, close: bool = False, **kwargs):
+        super().__init__(*args)
+        self._label = kwargs.pop("label", None)
+        self._name = kwargs.pop("name", "graph")
+        self._debug = kwargs.pop("debug", False)
+        self._close = close
+        # init or get default logger
+        self._logger = get_logger(
+            filename=kwargs.pop("logfile", None),
+            debug=self.debug,
+            console=kwargs.pop("console", True),
+            formatstr=kwargs.pop("logformat", None),
+            name=kwargs.pop("loggername", None),
+        )
+        if kwargs:
+            raise InitializationError(f"Unparsed arguments: {kwargs}!")
+
+    @property
+    def debug(self) -> bool:
+        return self._debug
+
+    @property
+    def logger(self) -> Logger:
+        return self._logger
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+    @property
+    def closed(self) -> bool:
+        return self._closed
+
+    def _add_output(self, *args, **kwargs):
+        """Dummy method"""
+        pass
+
+    def _add_input(self, *args, **kwargs):
+        """Dummy method"""
+        pass
+
+    def label(self):
+        """Returns formatted label"""
+        if self._label:
+            return self._label.format(self._label, nodes=len(self._nodes))
+
+    def add_node(self, name, **kwargs):
+        """
+        Adds a node, if the graph is opened.
+        It is possible to pass the node class via the `nodeclass` arg
+        (default: `FunctionNode`)
+        """
+        if not self.closed:
+            from .nodes import FunctionNode
+            return kwargs.pop("nodeclass", FunctionNode)(
+                name, graph=self, **kwargs
+            )
+        raise ClosedGraphError(node=name)
+
+    def add_nodes(self, nodes, **kwargs):
+        """Adds nodes"""
+        if not self.closed:
+            return (self.add_node(node, **kwargs) for node in nodes)
+        raise ClosedGraphError(node=nodes)
+
+    def print(self):
+        print(f"Graph with {len(self._nodes)} nodes")
+        for node in self._nodes:
+            node.print()
+
+    @classmethod
+    def current(cls):
+        return cls._context_graph
+
+    def __enter__(self):
+        Graph._context_graph = self
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        Graph._context_graph = None
+        if exc_val is not None:
+            raise exc_val
+
+        if self._close:
+            self.close()
+
+    def close(self, **kwargs) -> bool:
+        """Closes the graph"""
+        # TODO: implement cross-closure of several graphs
+        if self._closed:
+            return True
+        self.logger.debug(f"Graph '{self.name}': Closing...")
+        self.logger.debug(f"Graph '{self.name}': Update types...")
+        for node in self._nodes:
+            node.update_types()
+        self.logger.debug(f"Graph '{self.name}': Allocate memory...")
+        for node in self._nodes:
+            node.allocate(**kwargs)
+        self.logger.debug(f"Graph '{self.name}': Closing nodes...")
+        self._closed = all(node.close(**kwargs) for node in self._nodes)
+        if not self._closed:
+            raise UnclosedGraphError("The graph is still open!")
+        self.logger.debug(f"Graph '{self.name}': The graph is closed!")
+        return self._closed
+
+    def open(self, force: bool = False) -> bool:
+        """Opens the graph recursively"""
+        if not self._closed and not force:
+            return True
+        self.logger.debug(f"Graph '{self.name}': Opening...")
+        self._closed = not all(node.open(force) for node in self._nodes)
+        if self._closed:
+            raise UnclosedGraphError("The graph is still open!")
+        return not self._closed
diff --git a/subtrees/dagflow/dagflow/graphviz.py b/subtrees/dagflow/dagflow/graphviz.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6a9cb74a9bb3f8de874838c7aeaba706456dc16
--- /dev/null
+++ b/subtrees/dagflow/dagflow/graphviz.py
@@ -0,0 +1,373 @@
+from .input import Input
+from .output import Output
+from .printl import printl
+from .types import NodeT
+
+from numpy import square
+from collections.abc import Sequence
+from typing import Union, Set, Optional, Dict
+
+try:
+    import pygraphviz as G
+except ImportError:
+    GraphDot = None
+    savegraph = None
+else:
+
+    def savegraph(graph, *args, **kwargs):
+        gd = GraphDot(graph, **kwargs)
+        gd.savegraph(*args)
+
+    class EdgeDef:
+        __slots__ = ('nodein', 'nodemid', 'nodeout', 'edges')
+        def __init__(self, nodeout, nodemid, nodein, edge):
+            self.nodein = nodein
+            self.nodemid = nodemid
+            self.nodeout = nodeout
+            self.edges = [edge]
+
+        def append(self, edge):
+            self.edges.append(edge)
+
+    class GraphDot:
+        _graph = None
+        _node_id_map: dict
+
+        _show: Set[str]
+        def __init__(
+            self,
+            dag,
+            graphattr: dict={}, edgeattr: dict={}, nodeattr: dict={},
+            show: Union[Sequence,str] = ['type', 'mark', 'label'],
+            **kwargs
+        ):
+            if show=='all' or 'all' in show:
+                self._show = {'type', 'mark', 'label', 'status', 'data', 'data_summary'}
+            else:
+                self._show = set(show)
+
+            graphattr = dict(graphattr)
+            graphattr.setdefault("rankdir", "LR")
+            graphattr.setdefault("dpi", 300)
+
+            edgeattr = dict(edgeattr)
+            edgeattr.setdefault("fontsize", 10)
+            edgeattr.setdefault("labelfontsize", 9)
+            edgeattr.setdefault("labeldistance", 1.2)
+
+            nodeattr = dict(nodeattr)
+
+            self._node_id_map = {}
+            self._nodes = {}
+            self._nodes_open_input = {}
+            self._nodes_open_output = {}
+            self._edges: Dict[str, EdgeDef] = {}
+            self._graph = G.AGraph(directed=True, strict=False, **kwargs)
+
+            if graphattr:
+                self._graph.graph_attr.update(graphattr)
+            if edgeattr:
+                self._graph.edge_attr.update(edgeattr)
+            if nodeattr:
+                self._graph.node_attr.update(nodeattr)
+
+            if label := kwargs.pop("label", dag.label()):
+                self.set_label(label)
+            self._transform(dag)
+
+        def _transform(self, dag):
+            for nodedag in dag._nodes:
+                self._add_node(nodedag)
+            for nodedag in dag._nodes:
+                self._add_open_inputs(nodedag)
+                self._add_edges(nodedag)
+            self.update_style()
+
+        def get_id(self, object, suffix: str="") -> str:
+            name = type(object).__name__
+            omap = self._node_id_map.setdefault(name, {})
+            onum = omap.setdefault(object, len(omap))
+            return f"{name}_{onum}{suffix}"
+
+        def get_label(self, node: NodeT) -> str:
+            text = node.label('graph') or node.name
+            try:
+                out0 = node.outputs[0]
+            except IndexError:
+                shape0 = '?'
+                dtype0 = '?'
+            else:
+                shape0 = out0.dd.shape
+                if shape0 is None:
+                    shape0 = '?'
+                shape0="x".join(str(s) for s in shape0)
+
+                dtype0 = out0.dd.dtype
+                if dtype0 is None:
+                    dtype0 = '?'
+                else:
+                    dtype0 = dtype0.char
+
+            nout_pos = len(node.outputs)
+            nout_nonpos = node.outputs.len_all()-nout_pos
+            if nout_nonpos==0:
+                if nout_pos>1:
+                    nout = f'→{nout_pos}'
+                else:
+                    nout = ''
+            else:
+                nout=f'→{nout_pos}+{nout_nonpos}'
+
+            nin_pos = len(node.inputs)
+            nin_nonpos = node.inputs.len_all() - nin_pos
+            if nin_nonpos==0:
+                if nin_pos>1:
+                    nin = f'{nin_pos}→'
+                else:
+                    nin = ''
+            else:
+                nin=f'{nin_pos}+{nin_nonpos}→'
+
+            nlegs = f' {nin}{nout}'.replace('→→', '→')
+
+            left, right = [], []
+            info_type = f"[{shape0}]{dtype0}{nlegs}"
+            if 'type' in self._show:
+                left.append(info_type)
+            if 'mark' in self._show and node.mark is not None:
+                left.append(node.mark)
+            if 'label' in self._show:
+                right.append(text)
+            if 'status' in self._show:
+                status = []
+                if node.types_tainted: status.append('types_tainted')
+                if node.tainted: status.append('tainted')
+                if node.frozen: status.append('frozen')
+                if node.frozen_tainted: status.append('frozen_tainted')
+                if node.invalid: status.append('invalid')
+                if not node.closed: status.append('open')
+                if status:
+                    right.append(status)
+
+            show_data = 'data' in self._show
+            show_data_summary = 'data_summary' in self._show
+            if show_data or show_data_summary:
+                data = None
+                tainted = out0.tainted and 'tainted' or 'updated'
+                try:
+                    data = out0.data
+                except Exception:
+                    right.append('cought exception')
+                    data = out0._data
+
+                if show_data:
+                    right.append(str(data).replace('\n', '\\l')+'\\l')
+                if show_data_summary:
+                    sm = data.sum()
+                    sm2 = square(data).sum()
+                    mn = data.min()
+                    mx = data.max()
+                    right.append((f'Σ={sm:.2g}', f'Σ²={sm2:.2g}', f'min={mn:.2g}', f'max={mx:.2g}', f'{tainted}'))
+
+            if node.exception is not None:
+                right.append(node.exception)
+
+            return self._combine_labels((left, right))
+
+        def _combine_labels(self, labels: Union[Sequence,str]) -> str:
+            if isinstance(labels, str):
+                return labels
+
+            slabels = [self._combine_labels(l) for l in labels]
+            return f"{{{'|'.join(slabels)}}}"
+
+        def _add_node(self, nodedag):
+            styledict = {
+                "shape": "Mrecord",
+                "label": self.get_label(nodedag)
+            }
+            target = self.get_id(nodedag)
+            self._graph.add_node(target, **styledict)
+            nodedot = self._graph.get_node(target)
+            self._nodes[nodedag] = nodedot
+
+        def _add_open_inputs(self, nodedag):
+            for input in nodedag.inputs:
+                if not input.connected():
+                    self._add_open_input(input, nodedag)
+
+        def _add_open_input(self, input, nodedag):
+            styledict = {}
+            source = self.get_id(input, "_in")
+            target = self.get_id(nodedag)
+
+            self._graph.add_node(source, label="", shape="none", **styledict)
+            self._graph.add_edge(source, target, **styledict)
+
+            nodein = self._graph.get_node(source)
+            edge = self._graph.get_edge(source, target)
+            nodeout = self._graph.get_node(target)
+
+            self._nodes_open_input[input] = nodein
+            self._edges[input] = EdgeDef(nodein, None, nodeout, edge)
+
+        def _add_edges(self, nodedag):
+            for output in nodedag.outputs:
+                if output.connected():
+                    if len(output.child_inputs)>1:
+                        self._add_edges_multi(nodedag, output)
+                    else:
+                        self._add_edge(nodedag, output, output.child_inputs[0])
+                else:
+                    self._add_open_output(nodedag, output)
+
+        def _add_edges_multi(self, nodedag, output):
+            vnode = self.get_id(output, "_mid")
+            self._graph.add_node(vnode, label="", shape="none", width=0, height=0, penwidth=0, weight=10)
+            firstinput = output.child_inputs[0]
+            self._add_edge(nodedag, output, firstinput, vtarget=vnode)
+            for input in output.child_inputs:
+                self._add_edge(nodedag, output, input, vsource=vnode)
+
+        def _add_open_output(self, nodedag, output):
+            styledict = {}
+            source = self.get_id(nodedag)
+            target = self.get_id(output, "_out")
+            self._get_index(output, styledict, 'taillabel')
+
+            self._graph.add_node(target, label="", shape="none", **styledict)
+            self._graph.add_edge(
+                source, target, arrowhead="empty", **styledict
+            )
+            nodein = self._graph.get_node(source)
+            edge = self._graph.get_edge(source, target)
+            nodeout = self._graph.get_node(target)
+
+            self._nodes_open_output[output] = nodeout
+            self._edges[output] = EdgeDef(nodein, None, nodeout, edge)
+
+        def _get_index(self, leg, styledict: dict, target: str):
+            if isinstance(leg, Input):
+                container = leg.node.inputs
+            else:
+                container = leg.node.outputs
+            if container.len_all()<2:
+                return
+
+            try:
+                idx = container.index(leg)
+            except ValueError:
+                pass
+            else:
+                styledict[target] = str(idx)
+
+        def _add_edge(self, nodedag, output, input, *, vsource: Optional[str]=None, vtarget: Optional[str]=None) -> None:
+            styledict = {}
+
+            if vsource is not None:
+                source = vsource
+                styledict['arrowtail'] = 'none'
+            else:
+                source = self.get_id(nodedag)
+                self._get_index(output, styledict, 'taillabel')
+
+            if vtarget is not None:
+                target = vtarget
+                styledict['arrowhead'] = 'none'
+            else:
+                target = self.get_id(input.node)
+                self._get_index(input, styledict, 'headlabel')
+
+            self._graph.add_edge(source, target, **styledict)
+
+            nodein = self._graph.get_node(source)
+            edge = self._graph.get_edge(source, target)
+            nodeout = self._graph.get_node(target)
+
+            edgedef = self._edges.get(input, None)
+            if edgedef is None:
+                self._edges[input] = EdgeDef(nodein, None, nodeout, edge)
+            else:
+                edgedef.append(edge)
+
+        def _set_style_node(self, node, attr):
+            if node is None:
+                attr["color"] = "gray"
+            else:
+                if node.invalid:
+                    attr["color"] = "black"
+                elif node.being_evaluated:
+                    attr["color"] = "gold"
+                elif node.tainted:
+                    attr["color"] = "red"
+                elif node.frozen_tainted:
+                    attr["color"] = "blue"
+                elif node.frozen:
+                    attr["color"] = "cyan"
+                elif node.immediate:
+                    attr["color"] = "green"
+                else:
+                    attr["color"] = "forestgreen"
+
+                if node.exception is not None:
+                    attr["color"] = "magenta"
+
+        def _set_style_edge(self, obj, attrin, attr, attrout):
+            if isinstance(obj, Input):
+                if obj.connected():
+                    node = obj.parent_output.node
+                else:
+                    node = None
+                    self._set_style_node(node, attrin)
+            else:
+                node = obj.node
+                self._set_style_node(node, attrout)
+
+            self._set_style_node(node, attr)
+
+            if isinstance(obj, Input):
+                allocated_on_input = obj.owns_buffer
+                try:
+                    allocated_on_output = obj.parent_output.owns_buffer
+                except AttributeError:
+                    allocated_on_output = True
+            elif isinstance(obj, Output):
+                allocated_on_input = False
+                allocated_on_output = obj.owns_buffer
+            attr.update({
+                "dir": "both",
+                "arrowsize": 0.5
+                })
+            attr["arrowhead"] = attr["arrowhead"] or allocated_on_input  and 'dotopen' or 'odotopen'
+            attr["arrowtail"] = attr["arrowtail"] or allocated_on_output and 'dot' or 'odot'
+
+            if node:
+                if node.frozen:
+                    attrin["style"] = "dashed"
+                    attr["style"] = "dashed"
+                    # attr['arrowhead']='tee'
+                else:
+                    attr["style"] = ""
+
+        def update_style(self):
+            for nodedag, nodedot in self._nodes.items():
+                self._set_style_node(nodedag, nodedot.attr)
+
+            for object, edgedef in self._edges.items():
+                for edge in edgedef.edges:
+                    self._set_style_edge(
+                        object, edgedef.nodein.attr, edge.attr, edgedef.nodeout.attr
+                    )
+
+        def set_label(self, label):
+            self._graph.graph_attr["label"] = label
+
+        def savegraph(self, fname, verbose=True):
+            if verbose:
+                printl("Write output file:", fname)
+
+            if fname.endswith(".dot"):
+                self._graph.write(fname)
+            else:
+                self._graph.layout(prog="dot")
+                self._graph.draw(fname)
diff --git a/subtrees/dagflow/dagflow/input.py b/subtrees/dagflow/dagflow/input.py
new file mode 100644
index 0000000000000000000000000000000000000000..29a27e285970e2338f010fa86e4d17f28f8efcdf
--- /dev/null
+++ b/subtrees/dagflow/dagflow/input.py
@@ -0,0 +1,299 @@
+from typing import Iterator, Optional, Tuple, Union
+from numpy import zeros
+from numpy.typing import DTypeLike, NDArray
+
+from dagflow.datadescriptor import DataDescriptor
+
+from .edges import EdgeContainer
+from .exception import (
+    ClosedGraphError,
+    ReconnectionError,
+    AllocationError,
+    InitializationError,
+)
+from .output import Output
+from .shift import lshift
+from .iter import StopNesting
+from .types import EdgesLike, InputT, NodeT, ShapeLike
+
+
+class Input:
+    _own_data: Optional[NDArray] = None
+    _own_dd: DataDescriptor
+
+    _node: Optional[NodeT]
+    _name: Optional[str]
+
+    _parent_output: Optional[Output]
+    _child_output: Optional[Output]
+
+    _allocatable: bool = False
+    _owns_buffer: bool = False
+
+    _debug: bool = False
+
+    def __init__(
+        self,
+        name: Optional[str] = None,
+        node: Optional[NodeT] = None,
+        *,
+        child_output: Optional[Output] = None,
+        parent_output: Optional[Output] = None,
+        debug: Optional[bool] = None,
+        allocatable: bool = False,
+        data: Optional[NDArray] = None,
+        dtype: DTypeLike = None,
+        shape: Optional[ShapeLike] = None,
+        axes_edges: Optional[Tuple[EdgesLike]] = None,
+        axes_nodes: Optional[Tuple[EdgesLike]] = None,
+    ):
+        if data is not None and (
+            allocatable or dtype is not None or shape is not None
+        ):
+            raise InitializationError(input=input, node=node)
+
+        self._name = name
+        self._node = node
+        self._child_output = child_output
+        self._parent_output = parent_output
+        self._allocatable = allocatable
+        if debug is not None:
+            self._debug = debug
+        elif node:
+            self._debug = node.debug
+        else:
+            self._debug = False
+
+        self._own_dd = DataDescriptor(dtype, shape, axes_edges, axes_nodes)
+
+        if data is not None:
+            self.set_own_data(data, owns_buffer=True)
+
+    def __str__(self) -> str:
+        return (
+            f"→○ {self._name}"
+            if self._owns_buffer is None
+            else f"→● {self._name}"
+        )
+
+    def __repr__(self) -> str:
+        return self.__str__()
+
+    @property
+    def own_data(self) -> Optional[NDArray]:
+        return self._own_data
+
+    @property
+    def own_dd(self) -> DataDescriptor:
+        return self._own_dd
+
+    @property
+    def owns_buffer(self) -> bool:
+        return self._owns_buffer
+
+    def set_own_data(
+        self,
+        data,
+        *,
+        owns_buffer: bool,
+        axes_edges: EdgesLike = None,
+        axes_nodes: EdgesLike = None,
+    ):
+        if self.closed:
+            raise ClosedGraphError(
+                "Unable to set input data.", node=self._node, input=self
+            )
+        if self.own_data is not None:
+            raise AllocationError(
+                "Input already has data.", node=self._node, input=self
+            )
+
+        self._own_data = data
+        self._owns_buffer = owns_buffer
+        self.own_dd.dtype = data.dtype
+        self.own_dd.shape = data.shape
+        self.own_dd.axes_edges = axes_edges
+        self.own_dd.axes_nodes = axes_nodes
+
+    @property
+    def closed(self):
+        return self._node.closed if self.node else False
+
+    def set_child_output(
+        self, child_output: Output, force: bool = False
+    ) -> None:
+        if not self.closed:
+            return self._set_child_output(child_output, force)
+        raise ClosedGraphError(input=self, node=self.node, output=child_output)
+
+    def _set_child_output(
+        self, child_output: Output, force: bool = False
+    ) -> None:
+        if self.child_output and not force:
+            raise ReconnectionError(output=self.child_output, node=self.node)
+        self._child_output = child_output
+        child_output.parent_input = self
+
+    def set_parent_output(
+        self, parent_output: Output, force: bool = False
+    ) -> None:
+        if not self.closed:
+            return self._set_parent_output(parent_output, force)
+        raise ClosedGraphError(
+            input=self, node=self.node, output=parent_output
+        )
+
+    def _set_parent_output(
+        self, parent_output: Output, force: bool = False
+    ) -> None:
+        if self.connected() and not force:
+            raise ReconnectionError(output=self._parent_output, node=self.node)
+        self._parent_output = parent_output
+
+    @property
+    def name(self) -> str:
+        return self._name
+
+    @name.setter
+    def name(self, name) -> None:
+        self._name = name
+
+    @property
+    def node(self) -> NodeT:
+        return self._node
+
+    @property
+    def parent_node(self) -> NodeT:
+        return self._parent_output.node
+
+    @property
+    def logger(self):
+        return self._node.logger
+
+    @property
+    def child_output(self) -> InputT:
+        return self._child_output
+
+    @property
+    def invalid(self) -> bool:
+        """Checks validity of the parent output data"""
+        return self._parent_output.invalid
+
+    @property
+    def has_data(self) -> bool:
+        return self._own_data is not None
+
+    @property
+    def allocatable(self) -> bool:
+        return self._allocatable
+
+    @property
+    def debug(self) -> bool:
+        return self._debug
+
+    @invalid.setter
+    def invalid(self, invalid) -> None:
+        """Sets the validity of the current node"""
+        self._node.invalid = invalid
+
+    @property
+    def parent_output(self) -> Output:
+        return self._parent_output
+
+    @property
+    def data(self):
+        # NOTE: if the node is being evaluated, we must touch the node
+        #       (trigger deep evaluation), else we get the data directly
+        if self.node.being_evaluated:
+            return self._parent_output.data
+        return self._parent_output.get_data_unsafe()
+
+    def get_data_unsafe(self):
+        return self._parent_output.get_data_unsafe()
+
+    @property
+    def dd(self):
+        return self._parent_output.dd
+
+    @property
+    def tainted(self) -> bool:
+        return self._parent_output.tainted
+
+    def touch(self):
+        return self._parent_output.touch()
+
+    def taint(self, **kwargs) -> None:
+        self._node.taint(caller=self, **kwargs)
+
+    def taint_type(self, *args, **kwargs) -> None:
+        self._node.taint_type(*args, **kwargs)
+
+    def connected(self) -> bool:
+        return bool(self._parent_output)
+
+    def deep_iter_inputs(self, disconnected_only=False):
+        if disconnected_only and self.connected():
+            return iter(tuple())
+        raise StopNesting(self)
+
+    def deep_iter_child_outputs(self):
+        if self._child_output:
+            raise StopNesting(self._child_output)
+        return iter(tuple())
+
+    def __lshift__(self, other):
+        """
+        self << other
+        """
+        return lshift(self, other)
+
+    def __rrshift__(self, other):
+        """
+        other >> self
+        """
+        return lshift(self, other)
+
+    def allocate(self, **kwargs) -> bool:
+        if not self._allocatable or self.has_data:
+            return True
+
+        if self.own_dd.shape is None or self.own_dd.dtype is None:
+            raise AllocationError(
+                "No shape/type information provided for the Input",
+                node=self._node,
+                output=self,
+            )
+        try:
+            self._own_data = zeros(self.own_dd.shape, self.own_dd.dtype, **kwargs)
+        except Exception as exc:
+            raise AllocationError(
+                f"Input: {exc.args[0]}", node=self._node, input=self
+            ) from exc
+
+        return True
+
+
+class Inputs(EdgeContainer):
+    _dtype = Input
+
+    def __init__(self, iterable=None):
+        super().__init__(iterable)
+
+    def __str__(self):
+        return f"→[{tuple(obj.name for obj in self)}]○"
+
+    def deep_iter_inputs(
+        self, disconnected_only: bool = False
+    ) -> Iterator[Input]:
+        for input in self:
+            if disconnected_only and input.connected():
+                continue
+            yield input
+
+    def deep_iter_child_outputs(self) -> Iterator[Union[Input, Output]]:
+        for child_output in self:
+            yield child_output.child_output
+
+    def touch(self) -> None:
+        for input in self:
+            input.touch()
diff --git a/subtrees/dagflow/dagflow/input_extra.py b/subtrees/dagflow/dagflow/input_extra.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb71207f024ed96ef50cec8121cb739173f9188e
--- /dev/null
+++ b/subtrees/dagflow/dagflow/input_extra.py
@@ -0,0 +1,163 @@
+from typing import Optional, Union
+
+class SimpleFormatter():
+    _base: str
+    _numfmt: str
+    def __init__(self, base: str, numfmt: str = '_{:02d}'):
+        self._base = base
+        self._numfmt = numfmt
+
+    @staticmethod
+    def from_string(string: str):
+        if '{' in string:
+             return string
+
+        return SimpleFormatter(string)
+
+    def format(self, num: int) -> str:
+        if num>0:
+            return self._base+self._numfmt.format(num)
+
+        return self._base
+
+
+class MissingInputHandler:
+    """
+    Handler to implement behaviour when output
+    is connected to the missing input with >>/<<
+    """
+
+    _node = None
+
+    def __init__(self, node=None):
+        self.node = node
+
+    @property
+    def node(self):
+        return self._node
+
+    @node.setter
+    def node(self, node):
+        self._node = node
+
+    def __call__(self, idx=None, scope=None):
+        pass
+
+
+class MissingInputFail(MissingInputHandler):
+    """Default missing input handler: issues and exception"""
+
+    def __init__(self, node=None):
+        super().__init__(node)
+
+    def __call__(self, idx=None, scope=None):
+        raise RuntimeError(
+            "Unable to iterate inputs further. "
+            "No additional inputs may be created"
+        )
+
+
+class MissingInputAdd(MissingInputHandler):
+    """Adds an input for each output in >> operator"""
+
+    input_fmt: Union[str,SimpleFormatter] = SimpleFormatter("input", "_{:02d}")
+    input_kws: dict
+    output_fmt: Union[str,SimpleFormatter] = SimpleFormatter("output", "_{:02d}")
+    output_kws: dict
+
+    def __init__(
+        self,
+        node=None,
+        *,
+        input_fmt: Optional[Union[str,SimpleFormatter]] = None,
+        input_kws: Optional[dict] = None,
+        output_fmt: Optional[Union[str,SimpleFormatter]] = None,
+        output_kws: Optional[dict] = None,
+    ):
+        if input_kws is None:
+            input_kws = {}
+        if output_kws is None:
+            output_kws = {}
+        super().__init__(node)
+        self.input_kws = input_kws
+        self.output_kws = output_kws
+        if input_fmt is not None:
+            self.input_fmt = SimpleFormatter.from_string(input_fmt)
+        if output_fmt is not None:
+            self.output_fmt = SimpleFormatter.from_string(output_fmt)
+
+    def __call__(self, idx=None, scope=None, **kwargs):
+        kwargs_combined = dict(self.input_kws, **kwargs)
+        return self.node._add_input(
+            self.input_fmt.format(
+                idx if idx is not None else len(self.node.inputs)
+            ),
+            **kwargs_combined,
+        )
+
+
+class MissingInputAddPair(MissingInputAdd):
+    """
+    Adds an input for each output in >> operator.
+    Adds an output for each new input
+    """
+
+    def __init__(self, node=None, **kwargs):
+        super().__init__(node, **kwargs)
+
+    def __call__(self, idx=None, scope=None):
+        idx_out = len(self.node.outputs)
+        out = self.node._add_output(
+            self.output_fmt.format(idx_out), **self.output_kws
+        )
+        return super().__call__(idx, child_output=out, scope=scope)
+
+
+class MissingInputAddOne(MissingInputAdd):
+    """
+    Adds an input for each output in >> operator.
+    Adds only one output if needed
+    """
+
+    add_child_output = False
+
+    def __init__(self, node=None, *, add_child_output: bool = False, **kwargs):
+        super().__init__(node, **kwargs)
+        self.add_child_output = add_child_output
+
+    def __call__(self, idx=None, scope=None):
+        if (idx_out := len(self.node.outputs)) == 0:
+            out = self.node._add_output(
+                self.output_fmt.format(idx_out), **self.output_kws
+            )
+        else:
+            out = self.node.outputs[-1]
+        if self.add_child_output:
+            return super().__call__(idx, child_output=out, scope=scope)
+        return super().__call__(idx, scope=scope)
+
+
+class MissingInputAddEach(MissingInputAdd):
+    """
+    Adds an output for each block (for each >> operation)
+    """
+
+    add_child_output = False
+    scope = 0
+
+    def __init__(self, node=None, *, add_child_output=False, **kwargs):
+        super().__init__(node, **kwargs)
+        self.add_child_output = add_child_output
+
+    def __call__(self, idx=None, scope=None):
+        if scope == self.scope != 0:
+            out = self.node.outputs[-1]
+        else:
+            out = self.node._add_output(
+                self.output_fmt.format(len(self.node.outputs)),
+                **self.output_kws,
+            )
+            self.scope = scope
+        if self.add_child_output:
+            return super().__call__(idx, child_output=out, scope=scope)
+        return super().__call__(idx, scope=scope)
diff --git a/subtrees/dagflow/dagflow/iter.py b/subtrees/dagflow/dagflow/iter.py
new file mode 100644
index 0000000000000000000000000000000000000000..09eaef8d6ee13f497a4b1fe1a4d75fb32e0a9f1e
--- /dev/null
+++ b/subtrees/dagflow/dagflow/iter.py
@@ -0,0 +1,14 @@
+from collections.abc import Iterable
+from itertools import islice
+
+class StopNesting(Exception):
+    def __init__(self, object):
+        self.object = object
+
+def IsIterable(obj):
+    return isinstance(obj, Iterable) and not isinstance(obj, str)
+
+def nth(iterable, n):
+    "Returns the nth item or a default value"
+    return next(islice(iterable, n, None)) if n > -1 else tuple(iterable)[n]
+
diff --git a/subtrees/dagflow/dagflow/iterators.py b/subtrees/dagflow/dagflow/iterators.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a4b59c0d75f851d037269125447e23ebd1806ce
--- /dev/null
+++ b/subtrees/dagflow/dagflow/iterators.py
@@ -0,0 +1,42 @@
+
+from .iter import IsIterable, StopNesting
+
+
+def get_proper_iterator(obj, methodname, onerror, **kwargs):
+    if methodname:
+        if method := getattr(obj, methodname, None):
+            return method(**kwargs)
+    if IsIterable(obj):
+        return obj
+    raise RuntimeError(
+        f"Do not know how to get an iterator for '{onerror}'! "
+        f"{obj=}, {type(obj)=}"
+    )
+
+
+def deep_iterate(obj, methodname, onerror, **kwargs):
+    try:
+        iterable = get_proper_iterator(obj, methodname, onerror, **kwargs)
+        if isinstance(iterable, dict):
+            raise StopNesting(iterable)
+        for element in iterable:
+            yield from deep_iterate(element, methodname, onerror, **kwargs)
+    except StopNesting as sn:
+        yield sn.object
+
+
+def iter_inputs(inputs, disconnected_only=False):
+    return deep_iterate(
+        inputs,
+        "deep_iter_inputs",
+        "inputs",
+        disconnected_only=disconnected_only,
+    )
+
+
+def iter_outputs(outputs):
+    return deep_iterate(outputs, "deep_iter_outputs", "outputs")
+
+
+def iter_child_outputs(inputs):
+    return deep_iterate(inputs, "deep_iter_child_outputs", "child_outputs")
diff --git a/subtrees/dagflow/dagflow/legs.py b/subtrees/dagflow/dagflow/legs.py
new file mode 100644
index 0000000000000000000000000000000000000000..91c1e204e6d3000dd840af983e8d865ee78f99b0
--- /dev/null
+++ b/subtrees/dagflow/dagflow/legs.py
@@ -0,0 +1,107 @@
+
+from . import input_extra
+from .input import Inputs
+from .output import Outputs
+from .shift import lshift, rshift
+from .iter import StopNesting
+
+class Legs:
+    inputs: Inputs
+    outputs: Outputs
+    def __init__(self, inputs=None, outputs=None, missing_input_handler=None):
+        self._missing_input_handler = missing_input_handler
+        self.inputs = Inputs(inputs)
+        self.outputs = Outputs(outputs)
+
+    @property
+    def _missing_input_handler(self):
+        return self.__missing_input_handler
+
+    @_missing_input_handler.setter
+    def _missing_input_handler(self, handler):
+        if handler:
+            if isinstance(handler, str):
+                sethandler = getattr(input_extra, handler)(self)
+            elif isinstance(handler, type):
+                sethandler = handler(self)
+            else:
+                sethandler = handler
+                sethandler.node = self
+        elif hasattr(self, 'missing_input_handler'):
+            sethandler = self.missing_input_handler
+        else:
+            sethandler = input_extra.MissingInputFail(self)
+        self.__missing_input_handler = sethandler
+
+    def __getitem__(self, key):
+        if isinstance(key, (int, slice, str)):
+            return self.outputs[key]
+        if (y := len(key)) != 2:
+            raise ValueError(f"Legs key should be of length 2, but given {y}!")
+        ikey, okey = key
+        if ikey and okey:
+            if isinstance(ikey, (int, str)):
+                ikey = (ikey,)
+            if isinstance(okey, (int, str)):
+                okey = (okey,)
+            return Legs(
+                self.inputs[ikey],
+                self.outputs[okey],
+                missing_input_handler=self.__missing_input_handler,
+            )
+        if ikey:
+            return self.inputs[ikey]
+        if okey:
+            return self.outputs[okey]
+        raise ValueError("Empty keys specified")
+
+    def get(self, key, default = None):
+        try:
+            return self.__getitem__(key)
+        except Exception:
+            return default
+
+    def __str__(self) -> str:
+        return f"→[{len(self.inputs)}],[{len(self.outputs)}]→"
+
+    def __repr__(self) -> str:
+        return self.__str__()
+
+    def deep_iter_outputs(self):
+        return iter(self.outputs)
+
+    def deep_iter_inputs(self, disconnected_only=False):
+        return iter(self.inputs)
+
+    def deep_iter_child_outputs(self):
+        raise StopNesting(self)
+
+    def print(self):
+        for i, input in enumerate(self.inputs):
+            print(i, input)
+        for i, output in enumerate(self.outputs):
+            print(i, output)
+
+    def __rshift__(self, other):
+        """
+        self >> other
+        """
+        return rshift(self, other)
+
+    def __rlshift__(self, other):
+        """
+        other << self
+        """
+        return rshift(self, other)
+
+    def __lshift__(self, other):
+        """
+        self << other
+        """
+        return lshift(self, other)
+
+    def __rrshift__(self, other):
+        """
+        other >> self
+        """
+        return lshift(self, other)
diff --git a/subtrees/dagflow/dagflow/lib/Array.py b/subtrees/dagflow/dagflow/lib/Array.py
new file mode 100644
index 0000000000000000000000000000000000000000..da6cdeaa30807834472a4ef1456d28ebde7bb575
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Array.py
@@ -0,0 +1,65 @@
+from numpy import array
+
+from ..nodes import FunctionNode
+from ..output import Output
+from ..exception import InitializationError
+
+from numpy.typing import ArrayLike, NDArray
+from typing import Optional
+
+class Array(FunctionNode):
+    """Creates a node with a single data output with predefined array"""
+
+    _mode: str
+    _data: NDArray
+    _output = Output
+    def __init__(self, name, arr, *,
+        mode: str="store",
+        outname="array",
+        mark: Optional[str]=None,
+        **kwargs
+    ):
+        super().__init__(name, **kwargs)
+        self._mode = mode
+        if mark is not None:
+            self._mark = mark
+        self._data = array(arr, copy=True)
+
+        if mode=='store':
+            self._output = self._add_output(outname, data=self._data)
+        elif mode=='store_weak':
+            self._output = self._add_output(outname, data=self._data, owns_buffer=False)
+        elif mode=='fill':
+            self._output = self._add_output(outname, dtype=self._data.dtype, shape=self._data.shape)
+        else:
+            raise InitializationError(f'Array: invalid mode "{mode}"', node=self)
+
+        self._functions.update({
+                "store": self._fcn_store,
+                "store_weak": self._fcn_store,
+                "fill": self._fcn_fill
+                })
+        self.fcn = self._functions[self._mode]
+
+        if mode=='store':
+            self.close()
+
+    def _fcn_store(self, *args):
+        return self._data
+
+    def _fcn_fill(self, *args):
+        data = self._output._data
+        data[:] = self._data
+        return data
+
+    def _typefunc(self) -> None:
+        pass
+
+    def _post_allocate(self) -> None:
+        if self._mode=='fill':
+            return
+
+        self._data = self._output._data
+
+    def set(self, data: ArrayLike, check_taint: bool=False) -> bool:
+        return self._output.set(data, check_taint)
diff --git a/subtrees/dagflow/dagflow/lib/Cholesky.py b/subtrees/dagflow/dagflow/lib/Cholesky.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4c3c12bd4121b529a3a7d4963c54501d7633596
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Cholesky.py
@@ -0,0 +1,60 @@
+from ..input_extra import MissingInputAddPair
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    copy_input_to_output,
+    check_input_square_or_diag
+)
+from scipy.linalg import cholesky
+from numpy import sqrt
+
+class Cholesky(FunctionNode):
+    """Compute the Cholesky decomposition of a matrix V=LL̃ᵀ
+    1d input is considered to be a diagonal of square matrix"""
+    _mark: str = 'V→L'
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+                "missing_input_handler", MissingInputAddPair(input_fmt='matrix', output_fmt='L')
+        )
+        super().__init__(*args, **kwargs)
+
+        self._functions.update({
+                "square": self._fcn_square,
+                "diagonal": self._fcn_diagonal
+            })
+
+    def _fcn_square(self, _, inputs, outputs):
+        """Compute Cholesky decomposition using `scipy.linalg.cholesky`
+        NOTE: inplace computation (`overwrite_a=True`) works only for
+        the F-based arrays. As soon as by default C-arrays are used,
+        transposition produces an F-array (view). Transposition with
+        `lower=False` produces a lower matrix in the end.
+        """
+        inputs.touch()
+
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            output[:] = input
+            cholesky(output.T, overwrite_a=True, lower=False) # produces L (!) inplace
+            # output[:]=cholesky(input, lower=True)
+
+    def _fcn_diagonal(self, _, inputs, outputs):
+        """Compute "Cholesky" decomposition using of a diagonal of a square matrix.
+        Elementwise sqrt is used.
+        """
+        inputs.touch()
+
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            sqrt(input, out=output)
+
+    def _typefunc(self) -> None:
+        check_has_inputs(self)
+        ndim = check_input_square_or_diag(self, slice(None))
+        copy_input_to_output(self, slice(None), slice(None))
+
+        if ndim==2:
+            self.fcn = self._functions["square"]
+            self._mark = 'V→L'
+        else:
+            self.fcn = self._functions["diagonal"]
+            self._mark = 'sqrt(Váµ¢)'
+
diff --git a/subtrees/dagflow/dagflow/lib/Concatenation.py b/subtrees/dagflow/dagflow/lib/Concatenation.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca9215db22da0e04bad8af40d43066715754c9c6
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Concatenation.py
@@ -0,0 +1,28 @@
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    combine_inputs_shape_to_output,
+    eval_output_dtype,
+)
+
+
+class Concatenation(FunctionNode):
+    """Creates a node with a single data output from all the inputs data"""
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        combine_inputs_shape_to_output(self, slice(None), "result")
+        eval_output_dtype(self, slice(None), "result")
+
+    def _fcn(self, _, inputs, outputs):
+        res = outputs["result"].data
+        res[:] = (inp.data for inp in inputs)
+        return res
diff --git a/subtrees/dagflow/dagflow/lib/CovmatrixFromCormatrix.py b/subtrees/dagflow/dagflow/lib/CovmatrixFromCormatrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..61d2834db8af4103e53313866485d490beed813f
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/CovmatrixFromCormatrix.py
@@ -0,0 +1,39 @@
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_input_square,
+    copy_input_to_output,
+    check_input_dimension,
+    check_inputs_multiplicable_mat
+)
+
+from numpy import multiply
+
+class CovmatrixFromCormatrix(FunctionNode):
+    """Compute covariance matrix from correlation matrix:
+        Vₖₘ=Cₖₘσₖσₘ
+    """
+
+    _mode: str
+    _mark: str = 'C→V'
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self._add_pair("matrix", "matrix", output_kws={'positional': True})
+        self._add_input("sigma", positional=False)
+
+    def _fcn(self, _, inputs, outputs):
+        inputs.touch()
+        C = inputs["matrix"].data
+        sigma = inputs["sigma"].data
+
+        V = outputs["matrix"].data
+
+        multiply(C, sigma[None,:], out=V)
+        multiply(V, sigma[:,None], out=V)
+
+    def _typefunc(self) -> None:
+        check_input_square(self, 'matrix')
+        check_input_dimension(self, 'sigma', 1)
+        check_inputs_multiplicable_mat(self, 'matrix', 'sigma')
+        copy_input_to_output(self, slice(None), slice(None))
+
diff --git a/subtrees/dagflow/dagflow/lib/Division.py b/subtrees/dagflow/dagflow/lib/Division.py
new file mode 100644
index 0000000000000000000000000000000000000000..b93aef1359294e9fd692a2a0fea2d05df1364384
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Division.py
@@ -0,0 +1,32 @@
+from numpy import copyto
+
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    copy_input_shape_to_output,
+)
+
+class Division(FunctionNode):
+    """Division of all the inputs together"""
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        out = outputs[0].data
+        copyto(out, inputs[0].data.copy())
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                out /= input.data
+        return out
+
+    def _typefunc(self):
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        copy_input_shape_to_output(self, 0, "result")
+        eval_output_dtype(self, slice(None), "result")
diff --git a/subtrees/dagflow/dagflow/lib/ElSumSq.py b/subtrees/dagflow/dagflow/lib/ElSumSq.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef46e7f896bcf559781d809985fac5fe5d1b8749
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/ElSumSq.py
@@ -0,0 +1,43 @@
+from numpy import ndarray
+from numpy.typing import NDArray
+
+from numba import njit
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    check_inputs_same_dtype,
+    AllPositionals
+)
+
+@njit(cache=True)
+def _sumsq(data: NDArray, out: NDArray):
+    sm = 0.0
+    for v in data:
+        sm+=v*v
+    out[0]+=sm
+
+class ElSumSq(FunctionNode):
+    """Sum of the squared of all the inputs"""
+
+    _buffer: ndarray
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        out = outputs["result"].data
+        out[0] = 0.0
+        for input in inputs:
+            _sumsq(input.data, out)
+        return out
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        check_inputs_same_dtype(self)
+        eval_output_dtype(self, AllPositionals, "result")
+        self.outputs[0].dd.shape=(1,)
diff --git a/subtrees/dagflow/dagflow/lib/Integrator.py b/subtrees/dagflow/dagflow/lib/Integrator.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa0597af9b9e68ae379457cc3026adfa9e9eb1bb
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Integrator.py
@@ -0,0 +1,172 @@
+from typing import Literal
+
+from numba import njit
+from numpy import floating, integer, issubdtype, multiply, zeros
+from numpy.typing import NDArray
+
+from ..exception import InitializationError, TypeFunctionError
+from ..input_extra import MissingInputAddEach
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    check_input_dimension,
+    check_input_dtype,
+    check_input_shape,
+)
+
+
+@njit(cache=True)
+def _integrate1d(data: NDArray, weighted: NDArray, ordersX: NDArray):
+    """
+    Summing up `weighted` within `ordersX` and puts the result into `data`.
+    The 1-dimensional version of integration.
+    """
+    iprev = 0
+    for i, order in enumerate(ordersX):
+        inext = iprev + order
+        data[i] = weighted[iprev:inext].sum()
+        iprev = inext
+
+
+@njit(cache=True)
+def _integrate2d(
+    data: NDArray, weighted: NDArray, ordersX: NDArray, ordersY: NDArray
+):
+    """
+    Summing up `weighted` within `ordersX` and `ordersY` and then
+    puts the result into `data`. The 2-dimensional version of integration.
+    """
+    iprev = 0
+    for i, orderx in enumerate(ordersX):
+        inext = iprev + orderx
+        jprev = 0
+        for j, ordery in enumerate(ordersY):
+            jnext = jprev + ordery
+            data[i, j] = weighted[iprev:inext, jprev:jnext].sum()
+            jprev = jnext
+        iprev = inext
+
+
+class Integrator(FunctionNode):
+    """
+    The `Integrator` node performs integration (summation)
+    of every input within the `weight`, `ordersX` and `ordersY` (for `2d` mode).
+
+    The `Integrator` has two modes: `1d` and `2d`.
+    The `mode` must be set in the constructor, while `precision=dtype`
+    of integration is chosen *automaticly* in the type function.
+
+    For `2d` integration the `ordersY` input must be connected.
+
+    Note that the `Integrator` preallocates temporary buffer.
+    For the integration algorithm the `Numba`_ package is used.
+
+    .. _Numba: https://numba.pydata.org
+    """
+
+    def __init__(self, *args, mode: Literal["1d", "2d"], **kwargs):
+        kwargs.setdefault("missing_input_handler", MissingInputAddEach())
+        super().__init__(*args, **kwargs)
+        if mode not in {"1d", "2d"}:
+            raise InitializationError(
+                f"Argument `mode` must be '1d' or '2d', but given '{mode}'!",
+                node=self,
+            )
+        self._mode = mode
+        if self._mode == "2d":
+            self._add_input("ordersY", positional=False)
+        self._add_input("weights", positional=False)
+        self._add_input("ordersX", positional=False)
+        self._functions.update({"1d": self._fcn_1d, "2d": self._fcn_2d})
+
+    @property
+    def mode(self) -> str:
+        return self._mode
+
+    def _typefunc(self) -> None:
+        """
+        The function to determine the dtype and shape.
+        Checks inputs dimension and, selects an integration algorithm,
+        determines dtype and shape for outputs
+        """
+        check_has_inputs(self)
+        check_has_inputs(self, ("ordersX", "weights"))
+        input0 = self.inputs[0]
+        ndim = len(input0.dd.shape)
+        if ndim != int(self.mode[:1]):
+            raise TypeFunctionError(
+                f"The Integrator works only with {self.mode} inputs, but one has ndim={ndim}!",
+                node=self,
+            )
+        check_input_dimension(self, (slice(None), "weights"), ndim)
+        check_input_dimension(self, "ordersX", 1)
+        check_input_shape(self, (slice(None), "weights"), input0.dd.shape)
+        ordersX = self.inputs["ordersX"]
+        if not issubdtype(ordersX.dd.dtype, integer):
+            raise TypeFunctionError(
+                "The `ordersX` must be array of integers, but given '{ordersX.dd.dtype}'!",
+                node=self,
+                input=ordersX,
+            )
+        dtype = input0.dd.dtype
+        if not issubdtype(dtype, floating):
+            raise TypeFunctionError(
+                "The Integrator works only within `float` or `double` "
+                f"precision, but given '{dtype}'!",
+                node=self,
+            )
+        check_input_dtype(self, (slice(None), "weights"), dtype)
+        if sum(ordersX.data) != input0.dd.shape[0]:
+            raise TypeFunctionError(
+                "ordersX must be consistent with inputs shape, "
+                f"but given {ordersX.data=} and {input0.dd.shape=}!",
+                node=self,
+                input=ordersX,
+            )
+        if self.mode == "2d":
+            check_has_inputs(self, "ordersY")
+            check_input_dimension(self, "ordersY", 1)
+            ordersY = self.inputs["ordersY"]
+            if not issubdtype(ordersY.dd.dtype, integer):
+                raise TypeFunctionError(
+                    "The `ordersY` must be array of integers, but given '{ordersY.dd.dtype}'!",
+                    node=self,
+                    input=ordersY,
+                )
+            if sum(ordersY.data) != input0.dd.shape[1]:
+                raise TypeFunctionError(
+                    "ordersY must be consistent with inputs shape, "
+                    f"but given {ordersY.data=} and {input0.dd.shape=}!",
+                    node=self,
+                    input=ordersX,
+                )
+        self.fcn = self._functions[self.mode]
+        for output in self.outputs:
+            output.dd.dtype = dtype
+            output.dd.shape = input0.dd.shape
+
+    def _post_allocate(self):
+        """Allocates the `buffer` within `weights`"""
+        weights = self.inputs["weights"]
+        self.__buffer = zeros(shape=weights.dd.shape, dtype=weights.dd.dtype)
+
+    def _fcn_1d(self, _, inputs, outputs):
+        """1d version of integration function"""
+        weights = inputs["weights"].data
+        ordersX = inputs["ordersX"].data
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            multiply(input, weights, out=self.__buffer)
+            _integrate1d(output, self.__buffer, ordersX)
+        if self.debug:
+            return [outputs.iter_data()]
+
+    def _fcn_2d(self, _, inputs, outputs):
+        """2d version of integration function"""
+        weights = inputs["weights"].data
+        ordersX = inputs["ordersX"].data
+        ordersY = inputs["ordersY"].data
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            multiply(input, weights, out=self.__buffer)
+            _integrate2d(output, self.__buffer, ordersX, ordersY)
+        if self.debug:
+            return [outputs.iter_data()]
diff --git a/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars.py b/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars.py
new file mode 100644
index 0000000000000000000000000000000000000000..05c9126195d88421502391acb83780586eaa77ae
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars.py
@@ -0,0 +1,92 @@
+from ..input_extra import MissingInputAddPair
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    check_input_square_or_diag,
+    copy_input_to_output,
+    check_input_dimension,
+    check_inputs_equivalence,
+    check_inputs_multiplicable_mat
+)
+from ..exception import InitializationError
+
+from scipy.linalg import solve_triangular
+from numpy import matmul, subtract, divide, multiply, add
+
+class NormalizeCorrelatedVars(FunctionNode):
+    """Normalize correlated variables or correlate normal variables with linear expression
+
+    If x is a vector of values, μ are the central values and L is a cholesky decomposition
+    of the covariance matrix V=LLáµ€ then
+    z = L⁻¹(x - μ)
+    x = Lz + μ
+    """
+
+    _mode: str
+    def __init__(self, *args, mode='forward', **kwargs):
+        if mode=='forward':
+            self._mark = 'c→u'
+        elif mode=='backward':
+            self._mark = 'u→c'
+        else:
+            raise InitializationError(f'Invalid NormalizeCorrelatedVars mode={mode}. Expect "forward" or "backward"',node=self)
+
+        self._mode = mode
+
+        super().__init__(*args, missing_input_handler=MissingInputAddPair(), **kwargs)
+
+        self._add_input("matrix", positional=False)
+        self._add_input("central", positional=False)
+
+        self._functions.update({
+                "forward_2d":  self._fcn_forward_2d,
+                "backward_2d": self._fcn_backward_2d,
+                "forward_1d":  self._fcn_forward_1d,
+                "backward_1d": self._fcn_backward_1d
+                })
+
+    def _fcn_forward_2d(self, _, inputs, outputs):
+        inputs.touch()
+        L = inputs["matrix"].data
+        central = inputs["central"].data
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            subtract(input, central, out=output)
+            solve_triangular(L, output, lower=True, overwrite_b=True, check_finite=False)
+
+    def _fcn_backward_2d(self, _, inputs, outputs):
+        inputs.touch()
+        L = inputs["matrix"].data
+        central = inputs["central"].data
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            matmul(L, input, out=output)
+            add(output, central, out=output)
+
+    def _fcn_forward_1d(self, _, inputs, outputs):
+        inputs.touch()
+        Ldiag = inputs["matrix"].data
+        central = inputs["central"].data
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            subtract(input, central, out=output)
+            divide(output, Ldiag, out=output)
+
+    def _fcn_backward_1d(self, _, inputs, outputs):
+        inputs.touch()
+        Ldiag = inputs["matrix"].data
+        central = inputs["central"].data
+        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
+            multiply(Ldiag, input, out=output)
+            add(output, central, out=output)
+
+    def _typefunc(self) -> None:
+        check_has_inputs(self)
+        ndim = check_input_square_or_diag(self, 'matrix')
+        check_input_dimension(self, 'central', 1)
+        check_inputs_equivalence(self, ('central', slice(None)))
+        check_inputs_multiplicable_mat(self, 'matrix', slice(None))
+        copy_input_to_output(self, slice(None), slice(None))
+
+        key = f"{self._mode}_{ndim}d"
+        try:
+            self.fcn = self._functions[key]
+        except KeyError:
+            raise InitializationError(f'Invalid mode "{key}". Expect: {self._functions.keys()}')
diff --git a/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars2.py b/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars2.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d6cb853c751935170f1e15be7bb739517fb558b
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars2.py
@@ -0,0 +1,140 @@
+from ..nodes import FunctionNode
+from ..node import Input, Output
+from ..typefunctions import (
+    check_has_inputs,
+    check_input_square_or_diag,
+    copy_input_to_output,
+    check_input_dimension,
+    check_inputs_equivalence,
+    check_inputs_multiplicable_mat
+)
+
+from scipy.linalg import solve_triangular
+from numpy import matmul, subtract, divide, multiply, add, zeros, copyto
+
+class NormalizeCorrelatedVars2(FunctionNode):
+    """Normalize correlated variables or correlate normal variables with linear expression
+
+    If x is a vector of values, μ are the central values and L is a cholesky decomposition
+    of the covariance matrix V=LLáµ€ then
+    z = L⁻¹(x - μ)
+    x = Lz + μ
+    """
+
+    _mark: str = 'c↔u'
+
+    _input_value: Input
+    _input_normvalue: Input
+    _output_value: Output
+    _output_normvalue: Output
+
+    _ndim: str
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        self._add_input("matrix", positional=False)
+        self._add_input("central", positional=False)
+
+        self._input_value, self._output_value = self._add_pair(
+            "value", "value",
+            input_kws={'allocatable': True},
+            output_kws={'forbid_reallocation': True, 'allocatable': False},
+        )
+        self._input_normvalue, self._output_normvalue = self._add_pair(
+            "normvalue", "normvalue",
+            input_kws={'allocatable': True},
+            output_kws={'forbid_reallocation': True, 'allocatable': False},
+        )
+
+        self._functions.update({
+                "forward_2d":  self._fcn_forward_2d,
+                "forward_1d":  self._fcn_forward_1d,
+                "backward_2d":  self._fcn_backward_2d,
+                "backward_1d":  self._fcn_backward_1d,
+                })
+
+    def _fcn_forward_2d(self, _, inputs, outputs):
+        inputs.touch()
+        L = inputs["matrix"].data
+        central = inputs["central"].data
+
+        input_value = inputs["value"].data
+        output_value = outputs["value"].data
+        output_normvalue = outputs["normvalue"].data
+
+        subtract(input_value, central, out=output_normvalue)
+        solve_triangular(L, output_normvalue, lower=True, overwrite_b=True, check_finite=False)
+        copyto(output_value, input_value)
+
+    def _fcn_backward_2d(self, _, inputs, outputs):
+        inputs.touch()
+        L = inputs["matrix"].data
+        central = inputs["central"].data
+
+        input_normvalue = inputs["normvalue"].data
+        output_normvalue = outputs["normvalue"].data
+        output_value = outputs["value"].data
+
+        matmul(L, input_normvalue, out=output_value)
+        add(output_value, central, out=output_value)
+        copyto(output_normvalue, input_normvalue)
+
+    def _fcn_forward_1d(self, _, inputs, outputs):
+        inputs.touch()
+        Ldiag = inputs["matrix"].data
+        central = inputs["central"].data
+
+        input_value = inputs["value"].data
+        output_value = outputs["value"].data
+        output_normvalue = outputs["normvalue"].data
+
+        subtract(input_value, central, out=output_normvalue)
+        divide(output_normvalue, Ldiag, out=output_normvalue)
+        copyto(output_value, input_value)
+
+    def _fcn_backward_1d(self, _, inputs, outputs):
+        inputs.touch()
+        Ldiag = inputs["matrix"].data
+        central = inputs["central"].data
+
+        input_normvalue = inputs["normvalue"].data
+        output_normvalue = outputs["normvalue"].data
+        output_value = outputs["value"].data
+
+        multiply(Ldiag, input_normvalue, out=output_value)
+        add(output_value, central, out=output_value)
+        copyto(output_normvalue, input_normvalue)
+
+    def _on_taint(self, caller: Input) -> None:
+        """Choose the function to call based on the modified input:
+            - if normvalue is modified, the value should be updated
+            - if value is modified, the normvalue should be updated
+            - if sigma or central is modified, the normvalue should be updated
+
+            TODO:
+                - implement partial taintflag propagation
+                - value should not be tainted on sigma/central modificantion
+        """
+        if caller is self._input_normvalue:
+            self.fcn = self._functions[f"backward_{self._ndim}"]
+        else:
+            self.fcn = self._functions[f"forward_{self._ndim}"]
+
+    def _typefunc(self) -> None:
+        check_has_inputs(self)
+        ndim = check_input_square_or_diag(self, 'matrix')
+        check_input_dimension(self, 'central', 1)
+        check_inputs_equivalence(self, ('central', slice(None)))
+        check_inputs_multiplicable_mat(self, 'matrix', slice(None))
+        copy_input_to_output(self, slice(None), slice(None))
+
+        self._ndim=f"{ndim}d"
+        self.fcn = self._functions[f"forward_{self._ndim}"]
+
+        self._valuedata = zeros(shape=self._input_value.dd.shape, dtype=self._input_value.dd.dtype)
+        self._normvaluedata = zeros(shape=self._input_normvalue.dd.shape, dtype=self._input_normvalue.dd.dtype)
+        self._input_value.set_own_data(self._valuedata, owns_buffer=False)
+        self._input_normvalue.set_own_data(self._normvaluedata, owns_buffer=False)
+        self._output_value._set_data(self._valuedata, owns_buffer=False, forbid_reallocation=True)
+        self._output_normvalue._set_data(self._normvaluedata, owns_buffer=False, forbid_reallocation=True)
diff --git a/subtrees/dagflow/dagflow/lib/Product.py b/subtrees/dagflow/dagflow/lib/Product.py
new file mode 100644
index 0000000000000000000000000000000000000000..967ed1641a4444d71fa37671e01a3bb1f1ece264
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Product.py
@@ -0,0 +1,35 @@
+from numpy import copyto
+
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    copy_input_shape_to_output,
+    check_inputs_equivalence,
+    AllPositionals
+)
+
+class Product(FunctionNode):
+    """Product of all the inputs together"""
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        out = outputs["result"].data
+        copyto(out, inputs[0].data)
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                out *= input.data
+        return out
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        copy_input_shape_to_output(self, 0, "result")
+        check_inputs_equivalence(self)
+        eval_output_dtype(self, AllPositionals, "result")
diff --git a/subtrees/dagflow/dagflow/lib/Sum.py b/subtrees/dagflow/dagflow/lib/Sum.py
new file mode 100644
index 0000000000000000000000000000000000000000..4735728a501310c800de304e8952e01c84e03c8f
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/Sum.py
@@ -0,0 +1,35 @@
+from numpy import copyto, add
+
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    copy_input_shape_to_output,
+    check_inputs_equivalence,
+    AllPositionals
+)
+
+class Sum(FunctionNode):
+    """Sum of all the inputs together"""
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        out = outputs["result"].data
+        copyto(out, inputs[0].data)
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                add(out, input.data, out=out)
+        return out
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        copy_input_shape_to_output(self, 0, "result")
+        check_inputs_equivalence(self)
+        eval_output_dtype(self, AllPositionals, "result")
diff --git a/subtrees/dagflow/dagflow/lib/SumMatOrDiag.py b/subtrees/dagflow/dagflow/lib/SumMatOrDiag.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd3ed648c87fb69615c96ff1391b3936e6449bee
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/SumMatOrDiag.py
@@ -0,0 +1,81 @@
+from numpy import copyto, add
+from numpy.typing import NDArray
+from numba import njit
+
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    copy_input_shape_to_output,
+    check_inputs_square_or_diag,
+    check_inputs_same_dtype,
+    AllPositionals
+)
+
+@njit(cache=True)
+def _settodiag1(inarray: NDArray, outmatrix: NDArray):
+    for i in range(inarray.size):
+        outmatrix[i, i] = inarray[i]
+
+@njit(cache=True)
+def _addtodiag(inarray: NDArray, outmatrix: NDArray):
+    for i in range(inarray.size):
+        outmatrix[i, i] += inarray[i]
+
+class SumMatOrDiag(FunctionNode):
+    """Sum of all the inputs together. Inputs are square matrices or diagonals of square matrices"""
+
+    _ndim: int = 0
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+        self._functions.update({
+                "2d":  self._fcn2d,
+                "1d":  self._fcn1d,
+                })
+
+    def _fcn2d(self, _, inputs, outputs):
+        out = outputs["result"].data
+        inp = inputs[0].data
+        if len(inp.shape)==1:
+            _settodiag1(inp, out)
+        else:
+            out[:] = inp
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                if len(input.dd.shape)==1:
+                    _addtodiag(input.data, out)
+                else:
+                    add(input.data, out, out=out)
+        return out
+
+    def _fcn1d(self, _, inputs, outputs):
+        out = outputs["result"].data
+        copyto(out, inputs[0].data)
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                add(out, input.data, out=out)
+        return out
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        copy_input_shape_to_output(self, 0, "result")
+        self._ndim = check_inputs_square_or_diag(self)
+        check_inputs_same_dtype(self)
+        eval_output_dtype(self, AllPositionals, "result")
+
+        size = self.inputs[0].dd.shape[0]
+        output = self.outputs[0]
+        if self._ndim==2:
+            output.dd.shape = size, size
+        elif self._ndim==1:
+            output.dd.shape = size,
+        else:
+            assert False
+
+        self.fcn = self._functions[f"{self._ndim}d"]
diff --git a/subtrees/dagflow/dagflow/lib/SumSq.py b/subtrees/dagflow/dagflow/lib/SumSq.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b9ba01e6e5fc9fdea9accbc69548ccc6cdb0a08
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/SumSq.py
@@ -0,0 +1,40 @@
+from numpy import add, square, ndarray, empty_like
+
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    copy_input_shape_to_output,
+    check_inputs_equivalence,
+    AllPositionals
+)
+
+class SumSq(FunctionNode):
+    """Sum of the squared of all the inputs"""
+
+    _buffer: ndarray
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        out = outputs["result"].data
+        square(inputs[0].data, out=out)
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                square(input.data, out=self._buffer)
+                add(self._buffer, out, out=out)
+        return out
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        copy_input_shape_to_output(self, 0, "result")
+        check_inputs_equivalence(self)
+        eval_output_dtype(self, AllPositionals, "result")
+
+    def _post_allocate(self) -> None:
+        self._buffer = empty_like(self.inputs[0].get_data_unsafe())
diff --git a/subtrees/dagflow/dagflow/lib/View.py b/subtrees/dagflow/dagflow/lib/View.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbb8425a976aca7cd971fec210fa2f3bdd474227
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/View.py
@@ -0,0 +1,32 @@
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    copy_input_dtype_to_output,
+    copy_input_shape_to_output,
+)
+
+class View(FunctionNode):
+    """Creates a node with a single data output which is a view on the input"""
+
+    def __init__(self, name, outname="view", **kwargs):
+        super().__init__(name, **kwargs)
+        output = self._add_output(
+            outname, allocatable=False, forbid_reallocation=True
+        )
+        self._add_input("input", child_output=output)
+
+    def _fcn(self, _, inputs, outputs):
+        return self.inputs[0].data
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        copy_input_dtype_to_output(self, 0, 0)
+        copy_input_shape_to_output(self, 0, 0)
+
+    def _post_allocate(self) -> None:
+        input = self.inputs[0]
+        output = self.outputs[0]
+        output._set_data(
+            input.parent_output._data,
+            owns_buffer=False,
+            forbid_reallocation=True,
+        )
diff --git a/subtrees/dagflow/dagflow/lib/ViewConcat.py b/subtrees/dagflow/dagflow/lib/ViewConcat.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ec8326916ca4bcb9ca278d61b14054c0a18add6
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/ViewConcat.py
@@ -0,0 +1,57 @@
+from typing import List, Optional
+
+from numpy import zeros
+
+from ..nodes import FunctionNode
+from ..output import Output
+from ..typefunctions import check_input_dimension, check_input_dtype
+
+
+class ViewConcat(FunctionNode):
+    """Creates a node with a single data output which is a concatenated memory of the inputs"""
+
+    _output: Output
+    _offsets: List[int]
+
+    def __init__(self, name, outname="concat", **kwargs):
+        super().__init__(name, **kwargs)
+        self._output = self._add_output(
+            outname, allocatable=False, forbid_reallocation=True
+        )
+        self._offsets = []
+
+    def missing_input_handler(
+        self, idx: Optional[int] = None, scope: Optional[int] = None
+    ):
+        icount = len(self.inputs)
+        idx = idx if idx is not None else icount
+        iname = "input_{:02d}".format(idx)
+
+        kwargs = {"child_output": self._output}
+        return self._add_input(iname, allocatable=True, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        self.inputs.touch()
+        return self._output._data
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        size = 0
+        self._offsets = []
+        cdtype = self.inputs[0].dd.dtype
+        check_input_dtype(self, slice(None), cdtype)
+        check_input_dimension(self, slice(None), 1)
+        for input in self.inputs:
+            self._offsets.append(size)
+            size += input.dd.shape[0]
+
+        output = self.outputs[0]
+        output.dd.dtype = cdtype
+        output.dd.shape = (size,)
+        data = zeros(shape=size, dtype=cdtype)
+        output._set_data(data, owns_buffer=True)
+
+        for offset, input in zip(self._offsets, self.inputs):
+            size = input.dd.shape[0]
+            idata = data[offset : offset + size]
+            input.set_own_data(idata, owns_buffer=False)
diff --git a/subtrees/dagflow/dagflow/lib/WeightedSum.py b/subtrees/dagflow/dagflow/lib/WeightedSum.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcf929a11c289f5e083965cffd79b28e975ba932
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/WeightedSum.py
@@ -0,0 +1,73 @@
+from numpy import copyto
+
+from ..exception import TypeFunctionError
+from ..input_extra import MissingInputAddOne
+from ..nodes import FunctionNode
+from ..typefunctions import (
+    check_has_inputs,
+    eval_output_dtype,
+    copy_input_shape_to_output,
+)
+
+
+class WeightedSum(FunctionNode):
+    """Weighted sum of all the inputs together"""
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+        self._add_input("weight", positional=False)
+        self._functions.update(
+            {"number": self._fcn_number, "iterable": self._fcn_iterable}
+        )
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        check_has_inputs(self)
+        weight = self.inputs["weight"]
+        shape = weight.dd.shape[0]
+        leninp = len(self.inputs)
+        if shape == 0:
+            raise TypeFunctionError(
+                "Cannot use WeightedSum with empty 'weight'!"
+            )
+        elif shape == 1:
+            self.fcn = self._functions["number"]
+        elif shape == leninp:
+            self.fcn = self._functions["iterable"]
+        else:
+            raise TypeFunctionError(
+                f"The number of weights (={shape}) must coincide "
+                f"with the number of inputs (={leninp})!"
+            )
+        copy_input_shape_to_output(self, 0, "result")
+        eval_output_dtype(self, slice(None), "result")
+
+    def _fcn_number(self, _, inputs, outputs):
+        """
+        The function for one weight for all inputs:
+        `len(weight) == 1`
+        """
+        out = outputs[0].data
+        weight = self.inputs["weight"].data
+        copyto(out, inputs[0].data.copy())
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                out += input.data
+        out *= weight
+        return out
+
+    def _fcn_iterable(self, _, inputs, outputs):
+        """
+        The function for one weight for every input:
+        `len(weight) == len(inputs)`
+        """
+        out = outputs[0].data
+        weights = self.inputs["weight"].data
+        copyto(out, inputs[0].data * weights[0])
+        if len(inputs) > 1:
+            for input, weight in zip(inputs[1:], weights[1:]):
+                out += input.data * weight
+        return out
diff --git a/subtrees/dagflow/dagflow/lib/__init__.py b/subtrees/dagflow/dagflow/lib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd731c0c80051ce8b37281492f01cfcacbd61dcb
--- /dev/null
+++ b/subtrees/dagflow/dagflow/lib/__init__.py
@@ -0,0 +1,6 @@
+from .Array import Array
+from .Sum import Sum
+from .Product import Product
+from .Division import Division
+from .Concatenation import Concatenation
+from .WeightedSum import WeightedSum
diff --git a/subtrees/dagflow/dagflow/logger.py b/subtrees/dagflow/dagflow/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d9e91a0186590e9c340ce1c1713eed2fbf004de
--- /dev/null
+++ b/subtrees/dagflow/dagflow/logger.py
@@ -0,0 +1,42 @@
+from logging import (
+    DEBUG,
+    INFO,
+    FileHandler,
+    Formatter,
+    Logger,
+    StreamHandler,
+    getLogger,
+)
+from typing import Optional
+
+# To avoid a creation of duplicates save an instance
+_loggers = {}
+
+
+def get_logger(
+    filename: Optional[str] = None, debug: bool = False, **kwargs
+) -> Logger:
+    name = kwargs.pop("name", "PyGNA")
+    if logger := _loggers.get(name):
+        return logger
+    logger = getLogger("PyGNA")
+    formatstr = (
+        fmtstr
+        if (fmtstr := kwargs.pop("formatstr", False))
+        else "%(asctime)s - %(levelname)s - %(message)s"
+    )
+    level = DEBUG if debug else INFO
+    logger.setLevel(level)
+    formatter = Formatter(formatstr)
+    if filename:
+        fh = FileHandler(filename)
+        fh.setLevel(level)
+        fh.setFormatter(formatter)
+        logger.addHandler(fh)
+    if kwargs.pop("console", True):
+        ch = StreamHandler()
+        ch.setLevel(level)
+        ch.setFormatter(formatter)
+        logger.addHandler(ch)
+    _loggers[name] = logger
+    return logger
diff --git a/subtrees/dagflow/dagflow/membernode.py b/subtrees/dagflow/dagflow/membernode.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0705fe3dd43f5ab352fb5acf54ce704dfed2c7b
--- /dev/null
+++ b/subtrees/dagflow/dagflow/membernode.py
@@ -0,0 +1,106 @@
+from .graph import Graph
+from .node import Node
+
+from typing import Optional
+
+class MemberNodesHolder:
+    _graph: Optional[Graph] = None
+
+    def __init__(self, graph: Graph=None):
+        self.graph = graph
+        for key in dir(self):
+            val = getattr(self, key)
+            if isinstance(val, Node):
+                val.obj = self
+                val.graph = self._graph
+
+    @property
+    def graph(self):
+        return self._graph
+
+    @graph.setter
+    def graph(self, graph, **kwargs):
+        if self._graph:
+            raise ValueError("Graph is already set")
+        if graph is True:
+            self._graph = Graph()
+        elif isinstance(graph, str):
+            self._graph = Graph(label=graph)
+        elif isinstance(graph, dict):
+            self._graph = Graph(**kwargs)
+        elif graph:
+            self._graph = graph
+
+
+class MemberNode(Node):
+    """Function signature: fcn(master, node, inputs, outputs)"""
+
+    _obj = None
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+    def _eval(self):
+        self._being_evaluated = True
+        ret = self._fcn(self._obj, self, self.inputs, self.outputs)
+        self._being_evaluated = False
+        return ret
+
+    @property
+    def obj(self):
+        return self._obj
+
+    @obj.setter
+    def obj(self, obj):
+        self._obj = obj
+
+    def _stash_fcn(self):
+        prev_fcn = self._fcn
+        self._fcn_chain.append(prev_fcn)
+        return lambda node, inputs, outputs: prev_fcn(
+            node._obj, node, inputs, outputs
+        )
+
+    def _make_wrap(self, prev_fcn, wrap_fcn):
+        def wrapped_fcn(master, node, inputs, outputs):
+            wrap_fcn(prev_fcn, node, inputs, outputs)
+
+        return wrapped_fcn
+
+
+class StaticMemberNode(Node):
+    """Function signature: fcn(self)"""
+
+    _obj = None
+    _touch_inputs = True
+
+    def __init__(self, *args, **kwargs):
+        self._touch_inputs = kwargs.pop("touch_inputs", True)
+        super().__init__(*args, **kwargs)
+
+    def _eval(self):
+        self._being_evaluated = True
+        if self._touch_inputs:
+            self.inputs.touch()
+        ret = self._fcn(self._obj)
+        self._being_evaluated = False
+        return ret
+
+    @property
+    def obj(self):
+        return self._obj
+
+    @obj.setter
+    def obj(self, obj):
+        self._obj = obj
+
+    def _stash_fcn(self):
+        prev_fcn = self._fcn
+        self._fcn_chain.append(prev_fcn)
+        return lambda node, inputs, outputs: prev_fcn(node._obj)
+
+    def _make_wrap(self, prev_fcn, wrap_fcn):
+        def wrapped_fcn(master):
+            wrap_fcn(prev_fcn, self, self.inputs, self.outputs)
+
+        return wrapped_fcn
diff --git a/subtrees/dagflow/dagflow/node.py b/subtrees/dagflow/dagflow/node.py
new file mode 100644
index 0000000000000000000000000000000000000000..322491035a297f91768b25d10088cec999e93c54
--- /dev/null
+++ b/subtrees/dagflow/dagflow/node.py
@@ -0,0 +1,512 @@
+from .exception import (
+    AllocationError,
+    CriticalError,
+    ClosedGraphError,
+    ClosingError,
+    OpeningError,
+    DagflowError,
+    ReconnectionError,
+    UnclosedGraphError,
+    InitializationError,
+)
+from .input import Input
+from .legs import Legs
+from .logger import Logger, get_logger
+from .output import Output
+from .iter import IsIterable
+from .types import GraphT
+from typing import Optional, List, Dict, Union, Callable, Any, Tuple
+
+class Node(Legs):
+    _name: str
+    _mark: Optional[str] = None
+    _label: Dict[str, str]
+    _graph: Optional[GraphT] = None
+    _fcn: Optional[Callable] = None
+    _fcn_chain = None
+    _exception: Optional[str] = None
+
+    # Taintflag and status
+    _tainted: bool = True
+    _frozen: bool = False
+    _frozen_tainted: bool = False
+    _invalid: bool = False
+    _closed: bool = False
+    _allocated: bool = False
+    _being_evaluated: bool = False
+
+    _types_tainted: bool = True
+
+    # Options
+    _debug: bool = False
+    _auto_freeze: bool = False
+    _immediate: bool = False
+    # _always_tainted: bool = False
+
+    def __init__(
+        self, name,
+        *,
+        label: Union[str, dict, None]=None,
+        graph: Optional[GraphT] = None,
+        fcn: Optional[Callable] = None,
+        typefunc: Optional[Callable] = None,
+        debug: Optional[bool] = None,
+        logger: Optional[Any] = None,
+        missing_input_handler: Optional[Callable] = None,
+        immediate: bool = False,
+        auto_freeze: bool = False,
+        frozen: bool = False,
+        **kwargs
+    ):
+        super().__init__(missing_input_handler=missing_input_handler)
+        self._name = name
+        if fcn is not None:
+            self._fcn = fcn
+        if typefunc is not None:
+            self._typefunc = typefunc
+        elif typefunc is False:
+            self._typefunc = lambda: None
+
+        self._fcn_chain = []
+        if graph is None:
+            from .graph import Graph
+            self.graph = Graph.current()
+        else:
+            self.graph = graph
+
+        if debug is None and self.graph is not None:
+            self._debug = self.graph.debug
+        else:
+            self._debug = bool(debug)
+
+        if isinstance(label, str):
+            self._label = {'text': label}
+        elif isinstance(label, dict):
+            self._label = label
+        else:
+            self._label = {'text': name}
+
+        if logger is not None:
+            self._logger = logger
+        elif self.graph is not None:
+            self._logger = self.graph.logger
+        else:
+            self._logger = get_logger()
+
+        self._immediate = immediate
+        self._auto_freeze = auto_freeze
+        self._frozen = frozen
+
+        if kwargs:
+            raise InitializationError(f"Unparsed arguments: {kwargs}!")
+
+    def __str__(self):
+        return f"{{{self.name}}} {super().__str__()}"
+
+    #
+    # Properties
+    #
+    @property
+    def name(self):
+        return self._name
+
+    @name.setter
+    def name(self, name):
+        self._name = name
+
+    @property
+    def mark(self):
+        return self._mark
+
+    @property
+    def exception(self):
+        return self._exception
+
+    @property
+    def logger(self) -> Logger:
+        return self._logger
+
+    @property
+    def tainted(self) -> bool:
+        return self._tainted
+
+    @property
+    def types_tainted(self) -> bool:
+        return self._types_tainted
+
+    @property
+    def frozen_tainted(self) -> bool:
+        return self._frozen_tainted
+
+    @property
+    def frozen(self) -> bool:
+        return self._frozen
+
+    @property
+    def auto_freeze(self) -> bool:
+        return self._auto_freeze
+
+    # @property
+    # def always_tainted(self) -> bool:
+    # return self._always_tainted
+
+    @property
+    def closed(self) -> bool:
+        return self._closed
+
+    @property
+    def debug(self) -> bool:
+        return self._debug
+
+    @property
+    def being_evaluated(self) -> bool:
+        return self._being_evaluated
+
+    @property
+    def allocated(self) -> bool:
+        return self._allocated
+
+    @property
+    def immediate(self) -> bool:
+        return self._immediate
+
+    @property
+    def invalid(self) -> bool:
+        return self._invalid
+
+    @invalid.setter
+    def invalid(self, invalid) -> None:
+        if invalid:
+            self.invalidate_self()
+        elif any(input.invalid for input in self.inputs.iter_all()):
+            return
+        else:
+            self.invalidate_self(False)
+        for output in self.outputs:
+            output.invalid = invalid
+
+    def invalidate_self(self, invalid=True) -> None:
+        self._invalid = bool(invalid)
+        self._frozen_tainted = False
+        self._frozen = False
+        self._tainted = True
+
+    def invalidate_children(self) -> None:
+        for output in self.outputs:
+            output.invalid = True
+
+    def invalidate_parents(self) -> None:
+        for input in self.inputs.iter_all():
+            node = input.parent_node
+            node.invalidate_self()
+            node.invalidate_parents()
+
+    @property
+    def graph(self):
+        return self._graph
+
+    @graph.setter
+    def graph(self, graph):
+        if graph is None:
+            return
+        if self._graph is not None:
+            raise DagflowError("Graph is already defined")
+        self._graph = graph
+        self._graph.register_node(self)
+
+    #
+    # Methods
+    #
+    def __call__(self, name, child_output: Optional[Output]=None):
+        self.logger.debug(f"Node '{self.name}': Get input '{name}'")
+        inp = self.inputs.get(name, None)
+        if inp is None:
+            if self.closed:
+                raise ClosedGraphError(node=self)
+            return self._add_input(name, child_output=child_output)
+        elif inp.connected and (output := inp.parent_output):
+            raise ReconnectionError(input=inp, node=self, output=output)
+        return inp
+
+    def label(self, source):
+        # if self._label:
+        #     kwargs.setdefault("name", self._name)
+        #     return self._label.format(*args, **kwargs)
+        label = self._label.get(source, None)
+        if label is None:
+            return self._label['text']
+
+        return label
+
+    def add_input(self, name, **kwargs) -> Union[Input, Tuple[Input]]:
+        if not self.closed:
+            return self._add_input(name, **kwargs)
+        raise ClosedGraphError(node=self)
+
+    def _add_input(self, name, **kwargs) -> Union[Input, Tuple[Input]]:
+        if IsIterable(name):
+            return tuple(self._add_input(n, **kwargs) for n in name)
+        self.logger.debug(f"Node '{self.name}': Add input '{name}'")
+        if name in self.inputs:
+            raise ReconnectionError(input=name, node=self)
+        positional = kwargs.pop("positional", True)
+        keyword = kwargs.pop("keyword", True)
+        inp = Input(name, self, **kwargs)
+        self.inputs.add(inp, positional=positional, keyword=keyword)
+
+        if self._graph:
+            self._graph._add_input(inp)
+        return inp
+
+    def add_output(self, name, **kwargs) -> Union[Output, Tuple[Output]]:
+        if not self.closed:
+            return self._add_output(name, **kwargs)
+        raise ClosedGraphError(node=self)
+
+    def _add_output(self, name, *, keyword: bool=True, positional: bool=True, **kwargs) -> Union[Output, Tuple[Output]]:
+        if IsIterable(name):
+            return tuple(
+                self._add_output(n, **kwargs) for n in name
+            )
+        self.logger.debug(f"Node '{self.name}': Add output '{name}'")
+        if isinstance(name, Output):
+            if name.name in self.outputs or name.node:
+                raise ReconnectionError(output=name, node=self)
+            name._node = self
+            return self.__add_output(
+                name,
+                positional=positional,
+                keyword=keyword
+            )
+        if name in self.outputs:
+            raise ReconnectionError(output=name, node=self)
+
+        return self.__add_output(
+            Output(name, self, **kwargs),
+            positional=positional,
+            keyword=keyword
+        )
+
+    def __add_output(self, out, positional: bool = True, keyword: bool = True) -> Union[Output, Tuple[Output]]:
+        self.outputs.add(out, positional=positional, keyword=keyword)
+        if self._graph:
+            self._graph._add_output(out)
+        return out
+
+    def add_pair(self, iname: str, oname: str, **kwargs) -> Tuple[Input, Output]:
+        if not self.closed:
+            return self._add_pair(iname, oname, **kwargs)
+        raise ClosedGraphError(node=self)
+
+    def _add_pair(self, iname: str, oname: str, input_kws: Optional[dict]=None, output_kws: Optional[dict]=None) -> Tuple[Input, Output]:
+        input_kws = input_kws or {}
+        output_kws = output_kws or {}
+        output = self._add_output(oname, **output_kws)
+        input = self._add_input(iname, child_output=output, **input_kws)
+        return input, output
+
+    def _wrap_fcn(self, wrap_fcn, *other_fcns):
+        prev_fcn = self._stash_fcn()
+        self._fcn = self._make_wrap(prev_fcn, wrap_fcn)
+        if other_fcns:
+            self._wrap_fcn(*other_fcns)
+
+    def _unwrap_fcn(self):
+        if not self._fcn_chain:
+            raise DagflowError("Unable to unwrap bare function")
+        self._fcn = self._fcn_chain.pop()
+
+    def _stash_fcn(self):
+        raise DagflowError(
+            "Unimplemented method: use FunctionNode, StaticNode or MemberNode"
+        )
+
+    def _make_wrap(self, prev_fcn, wrap_fcn):
+        raise DagflowError(
+            "Unimplemented method: use FunctionNode, StaticNode or MemberNode"
+        )
+
+    def touch(self, force=False):
+        if self._frozen:
+            return
+        if not self._tainted and not force:
+            return
+        self.logger.debug(f"Node '{self.name}': Touch")
+        ret = self.eval()
+        self._tainted = False  # self._always_tainted
+        if self._auto_freeze:
+            self._frozen = True
+        return ret
+
+    def _eval(self):
+        raise CriticalError(
+            "Unimplemented method: use FunctionNode, StaticNode or MemberNode"
+        )
+
+    def eval(self):
+        if not self._closed:
+            raise UnclosedGraphError("Cannot evaluate the node!", node=self)
+        self._being_evaluated = True
+        try:
+            ret = self._eval()
+            self.logger.debug(f"Node '{self.name}': Evaluated return={ret}")
+        except Exception as exc:
+            raise exc
+        self._being_evaluated = False
+        return ret
+
+    def freeze(self):
+        if self._frozen:
+            return
+        self.logger.debug(f"Node '{self.name}': Freeze")
+        if self._tainted:
+            raise CriticalError("Unable to freeze tainted node!", node=self)
+        self._frozen = True
+        self._frozen_tainted = False
+
+    def unfreeze(self, force: bool = False):
+        if not self._frozen and not force:
+            return
+        self.logger.debug(f"Node '{self.name}': Unfreeze")
+        self._frozen = False
+        if self._frozen_tainted:
+            self._frozen_tainted = False
+            self.taint(force=True)
+
+    def taint(self, *, caller: Optional[Input] = None, force: bool = False):
+        self.logger.debug(f"Node '{self.name}': Taint...")
+        if self._tainted and not force:
+            return
+        if self._frozen:
+            self._frozen_tainted = True
+            return
+        self._tainted = True
+        self._on_taint(caller)
+        ret = self.touch() if self._immediate else None
+        self.taint_children(force=force)
+        return ret
+
+    def taint_children(self, **kwargs):
+        for output in self.outputs:
+            output.taint_children(**kwargs)
+
+    def taint_type(self, force: bool = False):
+        self.logger.debug(f"Node '{self.name}': Taint types...")
+        if self._closed:
+            raise ClosedGraphError("Unable to taint type", node=self)
+        if self._type_tainted and not force:
+            return
+        self._type_tainted = True
+        self._tainted = True
+        self._frozen = False
+        for output in self.outputs:
+            output.taint_children_type(force)
+
+    def print(self):
+        print(
+            f"Node {self._name}: →[{len(self.inputs)}],[{len(self.outputs)}]→"
+        )
+        for i, input in enumerate(self.inputs):
+            print("  ", i, input)
+        for i, output in enumerate(self.outputs):
+            print("  ", i, output)
+
+    def _typefunc(self) -> bool:
+        """A output takes this function to determine the dtype and shape"""
+        raise DagflowError(
+            "Unimplemented method: the method must be overridden!"
+        )
+
+    def _fcn(self, _, inputs, outputs):
+        pass
+
+    def _on_taint(self, caller: Input):
+        """A node method to be called on taint"""
+        pass
+
+    def _post_allocate(self):
+        pass
+
+    def update_types(self, recursive: bool = True) -> bool:
+        if not self._types_tainted:
+            return True
+        if recursive:
+            self.logger.debug(f"Node '{self.name}': Trigger recursive update types...")
+            for input in self.inputs.iter_all():
+                input.parent_node.update_types(recursive)
+        self.logger.debug(f"Node '{self.name}': Update types...")
+        self._typefunc()
+        self._types_tainted = False
+
+    def allocate(self, recursive: bool = True):
+        if self._allocated:
+            return True
+        if recursive:
+            self.logger.debug(f"Node '{self.name}': Trigger recursive memory allocation...")
+            if not all(
+                input.parent_node.allocate(recursive) for input in self.inputs.iter_all()
+            ):
+                return False
+        self.logger.debug(f"Node '{self.name}': Allocate memory on inputs")
+        if not self.inputs.allocate():
+            raise AllocationError(
+                "Cannot allocate memory for inputs!", node=self
+            )
+        self.logger.debug(f"Node '{self.name}': Allocate memory on outputs")
+        if not self.outputs.allocate():
+            raise AllocationError(
+                "Cannot allocate memory for outputs!", node=self
+            )
+        self.logger.debug(f"Node '{self.name}': Post allocate")
+        self._post_allocate()
+        self._allocated = True
+        return True
+
+    def close(self, recursive: bool = True, together: List['Node'] = []) -> bool:
+        # Caution: `together` list should not be written in!
+
+        if self._closed:
+            return True
+        if self.invalid:
+            raise ClosingError("Cannot close an invalid node!", node=self)
+        self.logger.debug(f"Node '{self.name}': Trigger recursive close")
+        for node in [self]+together:
+            node.update_types(recursive=recursive)
+        for node in [self]+together:
+            node.allocate(recursive=recursive)
+        if recursive and not all(
+            input.parent_node.close(recursive) for input in self.inputs.iter_all()
+        ):
+            return False
+        for node in together:
+            if not node.close(recursive=recursive):
+                return False
+        self.logger.debug(f"Node '{self.name}': Close")
+        self._closed = self._allocated
+        if not self._closed:
+            raise ClosingError(node=self)
+        return self._closed
+
+    def open(self, force: bool = False) -> bool:
+        if not self._closed and not force:
+            return True
+        self.logger.debug(f"Node '{self.name}': Open")
+        if not all(
+            input.node.open(force)
+            for output in self.outputs
+            for input in output.child_inputs
+        ):
+            raise OpeningError(node=self)
+        self.unfreeze()
+        self.taint()
+        self._closed = False
+        return not self._closed
+
+    #
+    # Accessors
+    #
+    def get_data(self, key=0):
+        return self.outputs[key].data
+
+    def get_input_data(self, key):
+        return self.inputs[key].data()
diff --git a/subtrees/dagflow/dagflow/node_group.py b/subtrees/dagflow/dagflow/node_group.py
new file mode 100644
index 0000000000000000000000000000000000000000..1993bf983865db6a59d50f8ed3ce9530ce950f7e
--- /dev/null
+++ b/subtrees/dagflow/dagflow/node_group.py
@@ -0,0 +1,44 @@
+from .shift import lshift
+
+
+class NodeGroup:
+    _nodes: list = None
+
+    def __init__(self, *args):
+        self._nodes = list(args)
+
+    def register_node(self, node):
+        self._nodes.append(node)
+
+    def _wrap_fcns(self, *args):
+        for node in self._nodes:
+            node._wrap_fcn(*args)
+
+    def _unwrap_fcns(self):
+        for node in self._nodes:
+            node._unwrap_fcn()
+
+    def print(self):
+        print(f"Group of {len(self._nodes)} nodes:")
+        for node in self._nodes:
+            node.print()
+
+    def __lshift__(self, other):
+        """
+        self << other
+        """
+        return lshift(self, other)
+
+    def __rrshift__(self, other):
+        """
+        other >> self
+        """
+        return lshift(self, other)
+
+    def __iter__(self):
+        """
+        iterate inputs
+
+        To be used with >>/<< operators which take only disconnected inputs
+        """
+        return iter(self._nodes)
diff --git a/subtrees/dagflow/dagflow/nodes.py b/subtrees/dagflow/dagflow/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..668976652d021486cdbcc2fd5c6cb3cff0e6fcf7
--- /dev/null
+++ b/subtrees/dagflow/dagflow/nodes.py
@@ -0,0 +1,89 @@
+from dagflow.exception import CriticalError
+from .node import Node
+
+
+class FunctionNode(Node):
+    """Function signature: fcn(node, inputs, outputs)
+
+    Note: _fcn should be a static function with signature (node, inputs, outputs)
+
+    - Function defined as instance property will become a static method:
+        class Node(...):
+            def __init__(self):
+                self._fcn = ...
+        node = Node()
+        node.fcn() # will have NO self provided as first argument
+
+    - Fucntion defined in a nested class with staticmethod:
+        class Other(Node
+            @staticmethod
+            def _fcn():
+                ...
+
+        node = Node()
+        node.fcn() # will have NO self provided as first argument
+
+    - [deprecated] Function defined as class property will become a bound method:
+        class Node(...):
+            _fcn = ...
+        node = Node()
+        node.fcn() # will have self provided as first argument
+
+    - [deprecated] Function defined via staticmethod decorator as class property will become a static method:
+        class Node(...):
+            _fcn = staticmethod(...)
+        node = Node()
+        node.fcn() # will have NO self provided as first argument
+    """
+
+    fcn = None
+
+    def __init__(self, name, **kwargs):
+        super().__init__(name, **kwargs)
+        if self.fcn is None:
+            self._functions = {"default": self._fcn}
+            self.fcn = self._functions["default"]
+        else:
+            self._functions = {"default": self.fcn}
+
+    def _stash_fcn(self):
+        self._fcn_chain.append(self.fcn)
+        return self.fcn
+
+    def _make_wrap(self, prev_fcn, wrap_fcn):
+        def wrapped_fcn(node, inputs, outputs):
+            wrap_fcn(prev_fcn, node, inputs, outputs)
+
+        return wrapped_fcn
+
+    def _eval(self):
+        return self.fcn(self, self.inputs, self.outputs)
+
+
+class StaticNode(Node):
+    """Function signature: fcn()"""
+
+    _touch_inputs = True
+
+    def __init__(self, *args, **kwargs):
+        self._touch_inputs = kwargs.pop("touch_inputs", True)
+        super().__init__(*args, **kwargs)
+
+    def _eval(self):
+        self._being_evaluated = True
+        if self._touch_inputs:
+            self.inputs.touch()
+        ret = self._fcn()
+        self._being_evaluated = False
+        return ret
+
+    def _stash_fcn(self):
+        prev_fcn = self._fcn
+        self._fcn_chain.append(prev_fcn)
+        return lambda node, inputs, outputs: prev_fcn()
+
+    def _make_wrap(self, prev_fcn, wrap_fcn):
+        def wrapped_fcn():
+            wrap_fcn(prev_fcn, self, self.inputs, self.outputs)
+
+        return wrapped_fcn
diff --git a/subtrees/dagflow/dagflow/output.py b/subtrees/dagflow/dagflow/output.py
new file mode 100644
index 0000000000000000000000000000000000000000..8147fa035c554b5dda8dcf17f91a781eefbe49bb
--- /dev/null
+++ b/subtrees/dagflow/dagflow/output.py
@@ -0,0 +1,363 @@
+from itertools import cycle
+from typing import List, Optional, Tuple
+
+from numpy import zeros
+from numpy.typing import ArrayLike, DTypeLike, NDArray
+
+from .edges import EdgeContainer
+from .exception import (
+    ClosedGraphError,
+    CriticalError,
+    InitializationError,
+    AllocationError,
+    ConnectionError,
+    UnclosedGraphError,
+)
+from .shift import lshift, rshift
+from .iter import StopNesting
+from .types import EdgesLike, InputT, NodeT, ShapeLike
+from .datadescriptor import DataDescriptor
+
+
+class Output:
+    _data: Optional[NDArray] = None
+    _dd: DataDescriptor
+
+    _node: Optional[NodeT]
+    _name: Optional[str]
+
+    _child_inputs: List[InputT]
+    _parent_input: Optional[InputT] = None
+    _allocating_input: Optional[InputT] = None
+
+    _allocatable: bool = True
+    _owns_buffer: bool = False
+    _forbid_reallocation: bool = False
+
+    _debug: bool = False
+
+    def __init__(
+        self,
+        name: Optional[str],
+        node: Optional[NodeT],
+        *,
+        debug: Optional[bool] = None,
+        allocatable: Optional[bool] = None,
+        data: Optional[NDArray] = None,
+        owns_buffer: Optional[bool] = None,
+        dtype: DTypeLike = None,
+        shape: Optional[ShapeLike] = None,
+        axes_edges: Optional[Tuple[EdgesLike]] = None,
+        axes_nodes: Optional[Tuple[EdgesLike]] = None,
+        forbid_reallocation: bool = False,
+    ):
+        self._name = name
+        self._node = node
+        self._child_inputs = []
+        self._debug = (
+            debug if debug is not None else node.debug if node else False
+        )
+        self._forbid_reallocation = forbid_reallocation
+
+        self._dd = DataDescriptor(dtype, shape, axes_edges, axes_nodes)
+
+        if data is None:
+            self._allocatable = True if allocatable is None else allocatable
+        else:
+            if owns_buffer is None:
+                owns_buffer = True
+            self._allocatable = not owns_buffer
+            self._set_data(data, owns_buffer=owns_buffer)
+
+            if allocatable or dtype is not None or shape is not None:
+                raise InitializationError(output=self, node=node)
+
+    def __str__(self):
+        return f"●→ {self._name}" if self.owns_buffer else f"○→ {self._name}"
+
+    def __repr__(self):
+        return self.__str__()
+
+    @property
+    def name(self):
+        return self._name
+
+    @name.setter
+    def name(self, name):
+        self._name = name
+
+    @property
+    def allocatable(self):
+        return self._allocatable
+
+    @property
+    def has_data(self) -> bool:
+        return self._data is not None
+
+    @property
+    def node(self):
+        return self._node
+
+    @property
+    def child_inputs(self):
+        return self._child_inputs
+
+    @property
+    def parent_input(self):
+        return self._parent_input
+
+    @parent_input.setter
+    def parent_input(self, input):
+        self._parent_input = input
+
+    @property
+    def logger(self):
+        return self._node.logger
+
+    @property
+    def invalid(self):
+        """Checks the validity of the current node"""
+        return self._node.invalid
+
+    @invalid.setter
+    def invalid(self, invalid):
+        """Sets the validity of the following nodes"""
+        for input in self.child_inputs:
+            input.invalid = invalid
+
+    @property
+    def data(self):
+        if self.node.being_evaluated:
+            return self._data
+        if not self.closed:
+            raise UnclosedGraphError(
+                "Unable to get the output data from unclosed graph!",
+                node=self._node,
+                output=self,
+            )
+        try:
+            self.touch()
+            return self.get_data_unsafe()
+        except Exception as exc:
+            raise CriticalError(
+                "An exception occured during touching of the parent node!",
+                node=self._node,
+                output=self,
+            ) from exc
+
+    def _set_data(
+        self,
+        data,
+        *,
+        owns_buffer: bool,
+        override: bool = False,
+        forbid_reallocation: Optional[bool] = None,
+    ):
+        if self.closed:
+            raise ClosedGraphError(
+                "Unable to set output data.", node=self._node, output=self
+            )
+        if self._data is not None and not override:
+            # TODO: this will fail during reallocation
+            raise AllocationError(
+                "Output already has data.", node=self._node, output=self
+            )
+        if owns_buffer:
+            forbid_reallocation = True
+        elif forbid_reallocation is None:
+            forbid_reallocation = owns_buffer
+
+        forbid_reallocation |= self._forbid_reallocation
+        if forbid_reallocation and self._allocating_input:
+            raise AllocationError(
+                "Output is connected to allocating input, but reallocation is forbidden",
+                node=self._node,
+                output=self,
+            )
+
+        self._data = data
+        self.dd.dtype = data.dtype
+        self.dd.shape = data.shape
+        self._owns_buffer = owns_buffer
+        self._forbid_reallocation = forbid_reallocation
+
+    @property
+    def dd(self) -> Optional[DataDescriptor]:
+        return self._dd
+
+    @property
+    def owns_buffer(self):
+        return self._owns_buffer
+
+    @property
+    def forbid_reallocation(self):
+        return self._forbid_reallocation
+
+    @property
+    def closed(self):
+        return self.node.closed if self.node else False
+
+    @property
+    def tainted(self) -> bool:
+        return self._node.tainted
+
+    @property
+    def debug(self) -> bool:
+        return self._debug
+
+    def get_data_unsafe(self):
+        return self._data
+
+    def connect_to(self, input) -> InputT:
+        if not self.closed and input.closed:
+            raise ConnectionError(
+                "Cannot connect an output to a closed input!",
+                node=self.node,
+                output=self,
+                input=input,
+            )
+        if self.closed and input.allocatable:
+            raise ConnectionError(
+                "Cannot connect a closed output to an allocatable input!",
+                node=self.node,
+                output=self,
+                input=input,
+            )
+        return self._connect_to(input)
+
+    def _connect_to(self, input) -> InputT:
+        if input.allocatable:
+            if self._allocating_input:
+                raise ConnectionError(
+                    "Output has multiple allocatable/allocated child inputs",
+                    node=self._node,
+                    output=self,
+                )
+            if self._forbid_reallocation:
+                raise ConnectionError(
+                    "Output forbids reallocation and may not connect to allocating inputs",
+                    node=self._node,
+                    output=self,
+                )
+            self._allocating_input = input
+        self._child_inputs.append(input)
+        input._set_parent_output(self)
+        return input
+
+    def __rshift__(self, other):
+        return rshift(self, other)
+
+    def __rlshift__(self, other):
+        return lshift(self, other)
+
+    def taint_children(self, **kwargs) -> None:
+        for input in self._child_inputs:
+            input.taint(**kwargs)
+
+    def taint_children_type(self, **kwargs) -> None:
+        for input in self._child_inputs:
+            input.taint_type(**kwargs)
+
+    def touch(self):
+        return self._node.touch()
+
+    def connected(self):
+        return bool(self._child_inputs)
+
+    def deep_iter_outputs(self, disconnected_only=False):
+        if disconnected_only and self.connected():
+            return iter(tuple())
+        raise StopNesting(self)
+
+    def deep_iter_child_outputs(self):
+        raise StopNesting(self)
+
+    def repeat(self):
+        return RepeatedOutput(self)
+
+    def allocate(self, **kwargs):
+        if not self._allocatable:
+            return True
+
+        if self._allocating_input:
+            input = self._allocating_input
+            input.allocate(recursive=False)
+            if input.has_data:
+                idata = input._own_data
+                if idata.shape != self.dd.shape or idata.dtype != self.dd.dtype:
+                    raise AllocationError(
+                        "Input's data shape/type is inconsistent",
+                        node=self._node,
+                        output=self,
+                        input=input,
+                    )
+
+                if self._data is not idata:
+                    if self._data is not None:
+                        idata[:] = self._data
+                    self._set_data(idata, owns_buffer=False, override=True)
+                return True
+
+        if self.has_data:
+            return True
+
+        if self.dd.shape is None or self.dd.dtype is None:
+            raise AllocationError(
+                "No shape/type information provided for the Output",
+                node=self._node,
+                output=self,
+            )
+        try:
+            data = zeros(self.dd.shape, self.dd.dtype, **kwargs)
+            self._set_data(data, owns_buffer=True)
+        except Exception as exc:
+            raise AllocationError(
+                f"Output: {exc.args[0]}", node=self._node, output=self
+            ) from exc
+
+        return True
+
+    def set(
+        self, data: ArrayLike, check_taint: bool = False, force: bool = False
+    ) -> bool:
+        if self.node._frozen and not force:
+            return False
+
+        tainted = True
+        if check_taint:
+            tainted = (self._data != data).any()
+
+        if tainted:
+            self._data[:] = data
+            self.taint_children()
+            self.node.invalidate_parents()
+            self.node._tainted = False
+
+        return tainted
+
+
+class RepeatedOutput:
+    def __init__(self, output):
+        self._output = output
+
+    def __iter__(self):
+        return cycle((self._output,))
+
+    def __rshift__(self, other):
+        return rshift(self, other)
+
+    def __rlshift__(self, other):
+        return lshift(self, other)
+
+
+class Outputs(EdgeContainer):
+    _dtype = Output
+
+    def __init__(self, iterable=None) -> None:
+        super().__init__(iterable)
+
+    def __str__(self) -> str:
+        return f"○[{tuple(obj.name for obj in self)}]→"
+
+    def __repr__(self) -> str:
+        return self.__str__()
diff --git a/subtrees/dagflow/dagflow/printl.py b/subtrees/dagflow/dagflow/printl.py
new file mode 100644
index 0000000000000000000000000000000000000000..459eb59e9c4b045facb4735b9f2eb451681292a3
--- /dev/null
+++ b/subtrees/dagflow/dagflow/printl.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+
+printlevel = 0
+singlemargin = "    "
+marginflag = False
+prefix_function = lambda: ""
+
+
+def set_prefix_function(f):
+    global prefix_function
+    prefix_function = f
+
+
+class next_level:
+    def __enter__(self):
+        global printlevel
+        printlevel += 1
+
+    def __exit__(self, *args, **kwargs):
+        global printlevel
+        printlevel -= 1
+
+
+def current_level():
+    return printlevel
+
+
+def print_margin(kwargs):
+    global marginflag
+    prefix = kwargs.pop("prefix", prefix_function())
+    postfix = kwargs.pop("postfix", None)
+    prefixopts = kwargs.pop("prefixopts", dict(end=""))
+    postfixopts = kwargs.pop("postfixopts", dict(end=" "))
+    if marginflag:
+        return
+
+    if prefix:
+        print(*prefix, **prefixopts)
+
+    print(singlemargin * printlevel, sep="", end="")
+
+    if postfix:
+        print(*postfix, **postfixopts)
+
+    marginflag = True
+
+
+def reset_margin_flag(*args, **kwargs):
+    global marginflag
+
+    for arg in args + (kwargs.pop("sep", ""), kwargs.pop("end", "\n")):
+        if "\n" in str(arg):
+            marginflag = False
+            return
+
+
+def printl(*args, **kwargs):
+    print_margin(kwargs)
+    print(*args, **kwargs)
+    reset_margin_flag(*args, **kwargs)
diff --git a/subtrees/dagflow/dagflow/shift.py b/subtrees/dagflow/dagflow/shift.py
new file mode 100644
index 0000000000000000000000000000000000000000..693c422063d1bc2ff4447d428316a454edf2aeba
--- /dev/null
+++ b/subtrees/dagflow/dagflow/shift.py
@@ -0,0 +1,50 @@
+from itertools import zip_longest
+
+from .exception import ConnectionError
+from .iterators import iter_child_outputs, iter_inputs, iter_outputs
+
+_rshift_scope_id = 0
+
+
+def rshift_scope_id():
+    global _rshift_scope_id
+    ret = _rshift_scope_id
+    _rshift_scope_id += 1
+    return ret
+
+
+def rshift(outputs, inputs):
+    """`>>` operator"""
+    scope_id = rshift_scope_id()
+
+    for output, inp in zip_longest(
+        iter_outputs(outputs),
+        iter_inputs(inputs, True),
+        fillvalue=None,
+    ):
+        if not output:
+            raise ConnectionError("Unable to connect mismatching lists!")
+        if isinstance(output, dict):
+            if inp:
+                raise ConnectionError(
+                    f"Cannot perform a binding from dict={output} due to "
+                    f"non-empty input={inp}!"
+                )
+            for key, val in output.items():
+                val >> inputs(key)
+            continue
+        if not inp:
+            missing_input_handler = getattr(
+                inputs, "_missing_input_handler", lambda *args, **kwargs: None
+            )
+            if not (inp := missing_input_handler(scope=scope_id)):
+                break
+        output.connect_to(inp)
+
+    child_outputs = tuple(iter_child_outputs(inputs))
+    return child_outputs[0] if len(child_outputs) == 1 else child_outputs
+
+
+def lshift(inputs, outputs):
+    """`<<` operator"""
+    return rshift(outputs, inputs)
diff --git a/subtrees/dagflow/dagflow/tools/__init__.py b/subtrees/dagflow/dagflow/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/subtrees/dagflow/dagflow/tools/schema.py b/subtrees/dagflow/dagflow/tools/schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4c452feff550561cc586d6072f2223c93d98def
--- /dev/null
+++ b/subtrees/dagflow/dagflow/tools/schema.py
@@ -0,0 +1,77 @@
+from typing import Any, Union
+from schema import Schema, Schema, SchemaError
+from contextlib import suppress
+
+from os import access, R_OK
+from typing import Callable
+
+def IsReadable(filename: str):
+    """Returns True if the file is readable"""
+    return access(filename, R_OK)
+
+def IsFilewithExt(*exts: str):
+    """Returns a function that retunts True if the file extension is consistent"""
+    def checkfilename(filename: str):
+        return any(filename.endswith(f'.{ext}' for ext in exts))
+    return checkfilename
+
+def LoadFileWithExt(*, key: str=None,**kwargs: Callable):
+    """Returns a function that retunts True if the file extension is consistent"""
+    def checkfilename(filename: Union[str, dict]):
+        if key is not None:
+            filename = filename[key]
+        for ext, loader in kwargs.items():
+            if filename.endswith(f'.{ext}'):
+                return loader(filename)
+
+            return False
+    return checkfilename
+
+from yaml import load, Loader
+def LoadYaml(fname: str):
+    with open(fname, 'r') as file:
+        return load(file, Loader)
+
+from dictwrapper.dictwrapper import DictWrapper
+class NestedSchema(object):
+    __slots__ = ('_schema', '_processdicts')
+    _schema: Union[Schema,object]
+    _processdicts: bool
+
+    def __init__(self, /, schema: Union[Schema,object], *, processdicts: bool=False):
+        self._schema = schema
+        self._processdicts = processdicts
+
+    def validate(self, data: Any) -> Any:
+        if not isinstance(data, dict):
+            return self._schema.validate(data)
+
+        if self._processdicts:
+            return {
+                key: self._process_dict((key,), subdata) for key, subdata in data.items()
+            }
+
+        dtin = DictWrapper(data)
+        dtout = DictWrapper({})
+        for key, subdata in dtin.walkitems():
+            dtout[key] = self._process_element(key, subdata)
+
+        return dtout.object
+
+    def _process_element(self, key, subdata: Any) -> Any:
+        try:
+            return self._schema.validate(subdata, _is_event_schema=False)
+        except SchemaError as err:
+            key = ".".join(str(k) for k in key)
+            raise SchemaError(f'Key "{key}" has invalid value "{subdata}":\n{err.args[0]}') from err
+
+    def _process_dict(self, key, data: Any) -> Any:
+        if not isinstance(data, dict):
+            return self._schema.validate(data)
+
+        with suppress(SchemaError):
+            return self._schema.validate(data, _is_event_schema=False)
+
+        return {
+            subkey: self._process_dict(key+(subkey,), subdata) for subkey, subdata in data.items()
+        }
diff --git a/subtrees/dagflow/dagflow/typefunctions.py b/subtrees/dagflow/dagflow/typefunctions.py
new file mode 100644
index 0000000000000000000000000000000000000000..f923f66710769f577aacd3e9461374b535b91c14
--- /dev/null
+++ b/subtrees/dagflow/dagflow/typefunctions.py
@@ -0,0 +1,301 @@
+from collections.abc import Sequence
+from typing import Union
+
+from numpy import result_type
+from itertools import repeat
+
+from .exception import TypeFunctionError
+from .types import NodeT
+
+AllPositionals = slice(None)
+
+try:
+    zip((), (), strict=True)
+except TypeError:
+    # provide a replacement of strict zip from Python 3.1
+    # to be deprecated at some point
+    from itertools import zip_longest
+
+    def zip(*iterables, strict: bool = False):
+        sentinel = object()
+        for combo in zip_longest(*iterables, fillvalue=sentinel):
+            if strict and sentinel in combo:
+                raise ValueError("Iterables have different lengths")
+            yield combo
+
+
+def check_has_inputs(
+    node: NodeT, inputkey: Union[str, int, slice, Sequence, None] = None
+) -> None:
+    """Checking if the node has inputs"""
+    if inputkey is None or inputkey == AllPositionals:
+        try:
+            node.inputs[0]
+        except Exception as exc:
+            raise TypeFunctionError(
+                "The node must have at lease one input!", node=node
+            ) from exc
+    else:
+        try:
+            node.inputs[inputkey]
+        except Exception as exc:
+            raise TypeFunctionError(
+                f"The node must have the input '{inputkey}'!", node=node
+            ) from exc
+
+
+def eval_output_dtype(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+) -> None:
+    """Automatic calculation and setting dtype for the output"""
+    inputs = node.inputs.iter(inputkey)
+    outputs = node.outputs.iter(outputkey)
+
+    dtype = result_type(*(inp.dd.dtype for inp in inputs))
+    for output in outputs:
+        output.dd.dtype = dtype
+
+
+def copy_input_to_output(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = 0,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+    dtype: bool = True,
+    shape: bool = True,
+) -> None:
+    """Coping input dtype and setting for the output"""
+    inputs = tuple(node.inputs.iter(inputkey))
+    outputs = tuple(node.outputs.iter(outputkey))
+
+    if dtype and shape:
+
+        def cpy(input, output):
+            output.dd.dtype = input.dd.dtype
+            output.dd.shape = input.dd.shape
+
+    elif dtype:
+
+        def cpy(input, output):
+            output.dd.dtype = input.dd.dtype
+
+    elif shape:
+
+        def cpy(input, output):
+            output.dd.shape = input.dd.shape
+
+    else:
+        return
+
+    if len(inputs) == 1:
+        inputs = repeat(inputs[0], len(outputs))
+
+    for input, output in zip(inputs, outputs, strict=True):
+        cpy(input, output)
+
+
+def copy_input_dtype_to_output(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = 0,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+) -> None:
+    """Coping input dtype and setting for the output"""
+    inputs = tuple(node.inputs.iter(inputkey))
+    outputs = tuple(node.outputs.iter(outputkey))
+
+    if len(inputs) == 1:
+        inputs = repeat(inputs[0], len(outputs))
+
+    for input, output in zip(inputs, outputs, strict=True):
+        output.dd.dtype = input.dd.dtype
+
+
+def copy_input_shape_to_output(
+    node: NodeT,
+    inputkey: Union[str, int] = 0,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+) -> None:
+    """Coping input shape and setting for the output"""
+    inputs = tuple(node.inputs.iter(inputkey))
+    outputs = tuple(node.outputs.iter(outputkey))
+
+    if len(inputs) == 1:
+        inputs = repeat(inputs[0], len(outputs))
+
+    for input, output in zip(inputs, outputs, strict=True):
+        output.dd.shape = input.dd.shape
+
+
+def combine_inputs_shape_to_output(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
+    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
+) -> None:
+    """Combine all the inputs shape and setting for the output"""
+    inputs = node.inputs.iter(inputkey)
+    shape = tuple(inp.dd.shape for inp in inputs)
+    for output in node.outputs.iter(outputkey):
+        output.dd.shape = shape
+
+
+def check_input_dimension(
+    node: NodeT, inputkey: Union[str, int, slice, Sequence], ndim: int
+):
+    """Checking the dimension of the input"""
+    for input in node.inputs.iter(inputkey):
+        dim = len(input.dd.shape)
+        if ndim != dim:
+            raise TypeFunctionError(
+                f"The node supports only {ndim}d inputs. Got {dim}d!",
+                node=node,
+                input=input,
+            )
+
+
+def check_input_square(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence],
+):
+    """Checking input is a square matrix"""
+    for input in node.inputs.iter(inputkey):
+        shape = input.dd.shape
+        dim = len(shape)
+        if dim != 2 or shape[0] != shape[1]:
+            raise TypeFunctionError(
+                f"The node supports only square inputs. Got {shape}!",
+                node=node,
+                input=input,
+            )
+
+
+def check_input_square_or_diag(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence],
+) -> int:
+    """Check if input is a square matrix or diagonal (1d) of a square matrix.
+    Returns the maximal dimension."""
+    dim_max = 0
+    for input in node.inputs.iter(inputkey):
+        shape = input.dd.shape
+        dim = len(shape)
+        dim_max = max(dim, dim_max)
+        if (dim == 2 and shape[0] != shape[1]) and dim != 1:
+            raise TypeFunctionError(
+                f"The node supports only square inputs (or 1d as diagonal). Got {shape}!",
+                node=node,
+                input=input,
+            )
+    return dim_max
+
+def check_input_shape(
+    node: NodeT, inputkey: Union[str, int, slice, Sequence], shape: tuple
+):
+    """Checking the shape equivalence for inputs"""
+    for input in node.inputs.iter(inputkey):
+        sshape = input.dd.shape
+        if sshape != shape:
+            raise TypeFunctionError(
+                f"The node supports only inputs with shape={shape}. Got {sshape}!",
+                node=node,
+                input=input,
+            )
+
+
+def check_input_dtype(
+    node: NodeT, inputkey: Union[str, int, slice, Sequence], dtype
+):
+    """Checking the dtype equivalence for inputs"""
+    for input in node.inputs.iter(inputkey):
+        dtt = input.dd.dtype
+        if dtt != dtype:
+            raise TypeFunctionError(
+                f"The node supports only input types {dtype}. Got {dtt}!",
+                node=node,
+                input=input,
+            )
+
+
+def check_inputs_equivalence(
+    node: NodeT, inputkey: Union[str, int, slice, Sequence] = AllPositionals
+):
+    """Checking the equivalence of the dtype and shape of all the inputs"""
+    inputs = tuple(node.inputs.iter(inputkey))
+    input0, inputs = inputs[0], inputs[1:]
+
+    dtype, shape = input0.dd.dtype, input0.dd.shape
+    for input in inputs:
+        if input.dd.dtype != dtype or input.dd.shape != shape:
+            raise TypeFunctionError(
+                f"Input data {input.dtype} [{input.shape}] is inconsistent with {dtype} [{shape}]",
+                node=node,
+                input=input,
+            )
+
+def check_inputs_square_or_diag(
+    node: NodeT,
+    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
+) -> int:
+    """Check if inputs are square matrices or diagonals (1d) of a square matrices of the same size.
+    Returns the maximal dimension."""
+    inputs = tuple(node.inputs.iter(inputkey))
+
+    dim_max = 0
+    shape0 = inputs[0].dd.shape[0]
+
+    for input in inputs:
+        shape = input.dd.shape
+        dim = len(shape)
+        dim_max = max(dim, dim_max)
+        if shape0 != shape[0] or ((dim == 2 and shape[0] != shape[1]) and dim != 1):
+            raise TypeFunctionError(
+                f"The node supports only square inputs (or 1d as diagonal) of size {shape0}x{shape0}. Got {shape}!",
+                node=node,
+                input=input,
+            )
+    return dim_max
+
+
+def check_inputs_same_dtype(
+    node: NodeT, inputkey: Union[str, int, slice, Sequence] = AllPositionals
+):
+    """Checking dtypes of all the inputs are same"""
+    inputs = tuple(node.inputs.iter(inputkey))
+    input0, inputs = inputs[0], inputs[1:]
+
+    dtype = input0.dd.dtype
+    for input in inputs:
+        if input.dd.dtype != dtype:
+            raise TypeFunctionError(
+                f"Input data {input.dd.dtype} is inconsistent with {dtype}",
+                node=node,
+                input=input,
+            )
+
+
+def check_inputs_multiplicable_mat(
+    node: NodeT,
+    inputkey1: Union[str, int, slice, Sequence],
+    inputkey2: Union[str, int, slice, Sequence],
+):
+    """Checking that inputs from key1 and key2 may be multiplied (matrix)"""
+    inputs1 = tuple(node.inputs.iter(inputkey1))
+    inputs2 = tuple(node.inputs.iter(inputkey2))
+
+    len1, len2 = len(inputs1), len(inputs2)
+    if len1 == len2:
+        pass
+    elif len1 == 1:
+        inputs1 = repeat(inputs1[0], len2)
+    elif len2 == 1:
+        inputs2 = repeat(inputs2[0], len1)
+
+    for input1, input2 in zip(inputs1, inputs2, strict=True):
+        shape1 = input1.dd.shape
+        shape2 = input2.dd.shape
+        if shape1[-1] != shape2[0]:
+            raise TypeFunctionError(
+                f"Inputs {shape1} and {shape2} are not multiplicable",
+                node=node,
+                input=input,
+            )
diff --git a/subtrees/dagflow/dagflow/types.py b/subtrees/dagflow/dagflow/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ee0295bd890c7554446ffc0eba3ecd2b10e2382
--- /dev/null
+++ b/subtrees/dagflow/dagflow/types.py
@@ -0,0 +1,11 @@
+from typing import Tuple, TypeVar
+
+GraphT = TypeVar("GraphT", bound="Graph")
+NodeT = TypeVar("NodeT", bound="Node")
+InputT = TypeVar("InputT", bound="Input")
+InputsT = TypeVar("InputsT", bound="Inputs")
+OutputT = TypeVar("OutputT", bound="Output")
+OutputsT = TypeVar("OutputsT", bound="Outputs")
+
+ShapeLike = Tuple[int, ...]
+EdgesLike = Tuple[OutputT]
diff --git a/subtrees/dagflow/dagflow/variable.py b/subtrees/dagflow/dagflow/variable.py
new file mode 100644
index 0000000000000000000000000000000000000000..d565d9f9abe9b2170ea221ce52acab2f6d5c5529
--- /dev/null
+++ b/subtrees/dagflow/dagflow/variable.py
@@ -0,0 +1,145 @@
+from .node import Node, Output
+from .exception import InitializationError
+from .lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
+from .lib.Cholesky import Cholesky
+from .lib.Array import Array
+from .lib.CovmatrixFromCormatrix import CovmatrixFromCormatrix
+
+from numpy import zeros_like, array
+from numpy.typing import DTypeLike
+from typing import Optional, Dict
+
+class Parameters(object):
+    value: Output
+    _value_node: Node
+
+    def __init__(self, value: Node):
+        self._value_node = value
+        self.value = value.outputs[0]
+
+    @staticmethod
+    def from_numbers(*, dtype: DTypeLike='d', **kwargs) -> 'Parameters':
+        sigma = kwargs['sigma']
+        if sigma is not None:
+            return GaussianParameters.from_numbers(dtype=dtype, **kwargs)
+
+        label: Dict[str, str] = kwargs.get('label')
+        if label is None:
+            label = {'text': 'parameter'}
+        else:
+            label = dict(label)
+        name: str = label.setdefault('name', 'parameter')
+        value = kwargs['value']
+        return Parameters(
+            Array(
+                name,
+                array((value,), dtype=dtype),
+                label = label,
+                mode='store_weak'
+            )
+        )
+
+class GaussianParameters(Parameters):
+    central: Output
+    sigma: Output
+    normvalue: Output
+
+    _central_node: Node
+    _sigma_node: Node
+    _normvalue_node: Node
+
+    _cholesky_node: Optional[Node] = None
+    _covariance_node: Optional[Node] = None
+    _correlation_node: Optional[Node] = None
+    _sigma_total_node: Optional[Node] = None
+
+    _forward_node: Node
+    _backward_node: Node
+
+    def __init__(self, value: Node, central: Node, *, sigma: Node=None, covariance: Node=None, correlation: Node=None):
+        super().__init__(value)
+        self._central_node = central
+
+        if sigma is not None and covariance is not None:
+            raise InitializationError('GaussianParameters: got both "sigma" and "covariance" as arguments')
+        if correlation is not None and sigma is None:
+            raise InitializationError('GaussianParameters: got "correlation", but no "sigma" as arguments')
+
+        if correlation is not None:
+            self._correlation_node = correlation
+            self._covariance_node = CovmatrixFromCormatrix(f"V({value.name})")
+            self._cholesky_node = Cholesky(f"L({value.name})")
+            self._sigma_total_node = sigma
+            self._sigma_node = self._cholesky_node
+
+            self._sigma_total_node >> self._covariance_node.inputs['sigma']
+            correlation >> self._covariance_node
+            self._covariance_node >> self._cholesky_node
+        elif sigma is not None:
+            self._sigma_node = sigma
+        elif covariance is not None:
+            self._cholesky_node = Cholesky(f"L({value.name})")
+            self._sigma_node = self._cholesky_node
+            self._covariance_node = covariance
+
+            covariance >> self._cholesky_node
+        else:
+            # TODO: no sigma/covariance AND central means normalized=value?
+            raise InitializationError('GaussianParameters: got no "sigma" and no "covariance" arguments')
+
+        self.central = self._central_node.outputs[0]
+        self.sigma = self._sigma_node.outputs[0]
+
+        self._normvalue_node = Array(
+            f'Normalized {value.name}',
+            zeros_like(self.central._data),
+            mark = f'norm({value.mark})',
+            mode='store_weak'
+        )
+        self.normvalue = self._normvalue_node.outputs[0]
+
+        self._norm_node = NormalizeCorrelatedVars2(f"Normalize {value.name}", immediate=True)
+        self.central >> self._norm_node.inputs['central']
+        self.sigma >> self._norm_node.inputs['matrix']
+        (self.value, self.normvalue) >> self._norm_node
+
+        self._norm_node.close(recursive=True)
+        self._norm_node.touch()
+
+    @staticmethod
+    def from_numbers(
+        value: float,
+        *,
+        central: float,
+        sigma: float,
+        label: Optional[Dict[str,str]]=None,
+        dtype: DTypeLike='d',
+        **_
+    ) -> 'GaussianParameters':
+        if label is None:
+            label = {'text': 'gaussian parameter'}
+        else:
+            label = dict(label)
+        name = label.setdefault('name', 'parameter')
+        node_value = Array(
+            name,
+            array((value,), dtype=dtype),
+            label = label,
+            mode='store_weak'
+        )
+
+        node_central = Array(
+            f'{name}_central',
+            array((central,), dtype=dtype),
+            label = {k: f'central: {v}' for k,v in label.items()},
+            mode='store_weak'
+        )
+
+        node_sigma = Array(
+            f'{name}_sigma',
+            array((sigma,), dtype=dtype),
+            label = {k: f'sigma: {v}' for k,v in label.items()},
+            mode='store_weak'
+        )
+
+        return GaussianParameters(value=node_value, central=node_central, sigma=node_sigma)
diff --git a/subtrees/dagflow/dagflow/wrappers.py b/subtrees/dagflow/dagflow/wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..18dec84a76b0475df33406825e6f20827b4177a5
--- /dev/null
+++ b/subtrees/dagflow/dagflow/wrappers.py
@@ -0,0 +1,35 @@
+
+from .printl import next_level, printl
+
+
+def printer(fcn, node, inputs, outputs):
+    printl(f"Evaluate {node.name}")
+    with next_level():
+        fcn(node, inputs, outputs)
+    printl(f"... done with {node.name}")
+
+
+def before_printer(fcn, node, inputs, outputs):
+    printl(f"Evaluate {node.name}: {node.label()}")
+    with next_level():
+        fcn(node, inputs, outputs)
+
+
+def after_printer(fcn, node, inputs, outputs):
+    with next_level():
+        fcn(node, inputs, outputs)
+    printl(f"Evaluate {node.name}: {node.label()}")
+
+
+def dataprinter(fcn, node, inputs, outputs):
+    fcn(node, inputs, outputs)
+    for i, output in enumerate(outputs):
+        printl("{: 2d} {}: {!s}".format(i, output.name, output._data))
+
+
+def toucher(fcn, node, inputs, outputs):
+    for i, input in enumerate(inputs):
+        printl("touch input {: 2d} {}.{}".format(i, node.name, input.name))
+        with next_level():
+            input.touch()
+    fcn(node, inputs, outputs)
diff --git a/subtrees/dagflow/dictwrapper b/subtrees/dagflow/dictwrapper
new file mode 120000
index 0000000000000000000000000000000000000000..3ff36458e776b6f68c096e68ceac44280b392e97
--- /dev/null
+++ b/subtrees/dagflow/dictwrapper
@@ -0,0 +1 @@
+subtrees/dictwrapper/dictwrapper
\ No newline at end of file
diff --git a/subtrees/dagflow/example/dagflow_example.png b/subtrees/dagflow/example/dagflow_example.png
new file mode 100644
index 0000000000000000000000000000000000000000..a1b7b06a4a56ff7129eba31bb1690a6923f87d09
Binary files /dev/null and b/subtrees/dagflow/example/dagflow_example.png differ
diff --git a/subtrees/dagflow/example/example.py b/subtrees/dagflow/example/example.py
new file mode 100755
index 0000000000000000000000000000000000000000..be2288fc6c9538e6d0c565ab25d05529d215870c
--- /dev/null
+++ b/subtrees/dagflow/example/example.py
@@ -0,0 +1,128 @@
+from numpy import arange, copyto, result_type
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.input_extra import MissingInputAddEach
+from dagflow.lib import Array, Product, Sum, WeightedSum
+from dagflow.nodes import FunctionNode
+
+array = arange(3, dtype="d")
+debug = False
+
+
+class ThreeInputsOneOutput(FunctionNode):
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault("missing_input_handler", MissingInputAddEach())
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        for i, output in enumerate(outputs):
+            out = output.data
+            copyto(out, inputs[3 * i].data)
+            for input in inputs[3 * i + 1 : (i + 1) * 3]:
+                out += input.data
+        return out
+
+    @property
+    def result(self):
+        return [out.data for out in self.outputs]
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        for i, output in enumerate(self.outputs):
+            inputs = self.inputs[2*i:2*(1+i)]
+            output.dd.shape = inputs[0].dd.shape
+            output.dd.dtype = result_type(tuple(inp.dd.dtype for inp in inputs))
+        self.logger.debug(
+            f"Node '{self.name}': dtype={tuple(out.dd.dtype for out in self.outputs)}, "
+            f"shape={tuple(out.dd.shape for out in self.outputs)}"
+        )
+
+
+# Check predefined Array, Sum and Product
+with Graph(debug=debug) as graph:
+    (in1, in2, in3, in4) = (
+        Array(name, array) for name in ("n1", "n2", "n3", "n4")
+    )
+    s = Sum("sum")
+    m = Product("product")
+
+    (in1, in2, in3) >> s
+    (in4, s) >> m
+    graph.close()
+
+    print("Result:", m.outputs["result"].data)
+    savegraph(graph, "dagflow_example_1a.png")
+
+# Check random generated Array, Sum and Product
+with Graph(debug=debug) as graph:
+    (in1, in2, in3, in4) = (
+        Array(name, array) for name in ("n1", "n2", "n3", "n4")
+    )
+    s = Sum("sum")
+    m = Product("product")
+
+    (in1, in2, in3) >> s
+    (in4, s) >> m
+    graph.close()
+
+    print("Result:", m.outputs["result"].data)
+    savegraph(graph, "dagflow_example_1b.png")
+
+# Check predefined Array, two Sum's and Product
+with Graph(debug=debug) as graph:
+    (in1, in2, in3, in4) = (
+        Array(name, array) for name in ("n1", "n2", "n3", "n4")
+    )
+    s = Sum("sum")
+    s2 = Sum("sum")
+    m = Product("product")
+
+    (in1, in2) >> s
+    (in3, in4) >> s2
+    (s, s2) >> m
+    graph.close()
+
+    print("Result:", m.outputs["result"].data)
+    savegraph(graph, "dagflow_example_2.png")
+
+# Check predefined Array, Sum, WeightedSum and Product
+with Graph(debug=debug) as graph:
+    (in1, in2, in3, in4) = (
+        Array(name, array) for name in ("n1", "n2", "n3", "n4")
+    )
+    weight = Array("weight", (2, 3))
+    # The same result with other weight
+    # weight = makeArray(5)("weight")
+    s = Sum("sum")
+    ws = WeightedSum("weightedsum")
+    m = Product("product")
+
+    (in1, in2) >> s  # [0,2,4]
+    (in3, in4) >> ws
+    {"weight": weight} >> ws  # [0,1,2] * 2 + [0,1,2] * 3 = [0,5,10]
+    # NOTE: also it is possible to use the old style binding:
+    #weight >> ws("weight")
+    (s, ws) >> m  # [0,2,4] * [0,5,10] = [0,10,40]
+    graph.close()
+
+    print("Result:", m.outputs["result"].data)
+    savegraph(graph, "dagflow_example_3.png")
+
+
+with Graph(debug=debug) as graph:
+    (in1, in2, in3) = (Array(name, array) for name in ("n1", "n2", "n3"))
+    (in4, in5, in6) = (
+        Array(name, (1, 0, 0)) for name in ("n4", "n5", "n6")
+    )
+    (in7, in8, in9) = (
+        Array(name, (3, 3, 3)) for name in ("n7", "n8", "n9")
+    )
+    s = ThreeInputsOneOutput("3to1")
+    (in1, in2, in3) >> s
+    (in4, in5, in6) >> s
+    (in7, in8, in9) >> s
+    graph.close()
+
+    print("Result:", s.result)
+    savegraph(graph, "dagflow_example_4.png")
diff --git a/subtrees/dagflow/example/graph_evaluation.gif b/subtrees/dagflow/example/graph_evaluation.gif
new file mode 100644
index 0000000000000000000000000000000000000000..36b4b7d3f7a3d4a685531c0dc3f5f5287a853992
Binary files /dev/null and b/subtrees/dagflow/example/graph_evaluation.gif differ
diff --git a/subtrees/dagflow/pytest.ini b/subtrees/dagflow/pytest.ini
new file mode 100755
index 0000000000000000000000000000000000000000..0f91ae46aaea5c6922d3a8f583bc520c4224ede9
--- /dev/null
+++ b/subtrees/dagflow/pytest.ini
@@ -0,0 +1,3 @@
+[pytest]
+testpaths=test/
+; addopts= --cov-report term --cov=./ --cov-report xml:cov.xml
diff --git a/subtrees/dagflow/requirements.txt b/subtrees/dagflow/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..63a493e7e59c09a6a189eb7ffc5eecdf9dadf4f8
--- /dev/null
+++ b/subtrees/dagflow/requirements.txt
@@ -0,0 +1,8 @@
+contextlib2
+coverage
+numba
+numpy
+pygraphviz
+pytest
+pytest-cov
+schema
diff --git a/subtrees/dagflow/storage b/subtrees/dagflow/storage
new file mode 120000
index 0000000000000000000000000000000000000000..79ecdb34cd8f2191cb528038042137a9de8c82d4
--- /dev/null
+++ b/subtrees/dagflow/storage
@@ -0,0 +1 @@
+subtrees/dictwrapper/storage
\ No newline at end of file
diff --git a/subtrees/dagflow/subtrees/dictwrapper/.envrc b/subtrees/dagflow/subtrees/dictwrapper/.envrc
new file mode 100644
index 0000000000000000000000000000000000000000..e780e09108ca62a21880576ed949d9b5d515ac90
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/.envrc
@@ -0,0 +1 @@
+export PYTHONPATH=$PWD
diff --git a/subtrees/dagflow/subtrees/dictwrapper/.gitignore b/subtrees/dagflow/subtrees/dictwrapper/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..4906f4c4df53b47c9f99b611f1d4fcf3a85e209a
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/.gitignore
@@ -0,0 +1,145 @@
+
+# Created by https://www.toptal.com/developers/gitignore/api/python
+# Edit at https://www.toptal.com/developers/gitignore?templates=python
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+#   For a library or package, you might want to ignore these files since the code is
+#   intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# End of https://www.toptal.com/developers/gitignore/api/python
diff --git a/subtrees/dagflow/subtrees/dictwrapper/.gitlab-ci.yml b/subtrees/dagflow/subtrees/dictwrapper/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..05a935a95d9c71b73f11b9832bdfcfe5c68de3ca
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/.gitlab-ci.yml
@@ -0,0 +1,21 @@
+stages:
+    - tests
+
+tests:
+    image: git.jinr.ru:5005/gna/gna-base-docker-image:latest
+    stage: tests
+
+    script:
+    - python3 -m pip install -r requirements.txt
+    - coverage run --source=. -m pytest
+    - coverage report
+    - coverage xml
+    coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
+    artifacts:
+        reports:
+            coverage_report:
+                coverage_format: cobertura
+                path: coverage.xml
+    only:
+        - master
+        - merge_requests
diff --git a/subtrees/dagflow/subtrees/dictwrapper/README.md b/subtrees/dagflow/subtrees/dictwrapper/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7516994f16ea61a017bb6fb999a98a5191079895
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/README.md
@@ -0,0 +1,11 @@
+# Summary
+
+[![python](https://img.shields.io/badge/python-3.10-purple.svg)](https://www.python.org/)
+[![pipeline](https://git.jinr.ru/dag-computing/dictwrapper.py/badges/master/pipeline.svg)](https://git.jinr.ru/dag-computing/dictwrapper.py/commits/master)
+[![coverage report](https://git.jinr.ru/dag-computing/dictwrapper.py/badges/master/coverage.svg)](https://git.jinr.ru/dag-computing/dictwrapper.py/-/commits/master)
+<!--- Uncomment here after adding docs!
+[![pages](https://img.shields.io/badge/pages-link-white.svg)](http://dag-computing.pages.jinr.ru/dictwrapper.py)
+-->
+
+* `DictWrapper` is a tool to work with nested dictionaries
+* `Storage` is a map-like class supporing list/set as a key and does not distinguish the order of the keys
diff --git a/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/__init__.py b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..08d58d7a49f476d4fb48d92b2756353a6dc09b4d
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/__init__.py
@@ -0,0 +1 @@
+from .dictwrapper import DictWrapper, DictWrapperAccess
diff --git a/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/classwrapper.py b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/classwrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec06b6468799c8bf7f6a04f775ac17ab2befff81
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/classwrapper.py
@@ -0,0 +1,47 @@
+from typing import Any
+
+class ClassWrapper(object):
+    _object: Any
+    _types: Any
+    def __init__(self, obj, *, types=None):
+        self._object = obj
+        self._types = type(obj) if types is None else types
+        self._wrapper_class = type(self)
+
+    @property
+    def object(self) -> Any:
+        return self._object
+
+    def __str__(self):
+        return str(self._object)
+
+    def __repr__(self):
+        return repr(self._object)
+
+    def __dir__(self):
+        return dir(self._object)
+
+    def __len__(self):
+        return len(self._object)
+
+    def __bool__(self):
+        return bool(self._object)
+
+    def __contains__(self, v):
+        return v in self._object
+
+    def __eq__(self, other):
+        if isinstance(other, ClassWrapper):
+            return self._object==other._object
+
+        return self._object==other
+
+    def _wrap(self, obj, **kwargs):
+        if isinstance(obj, ClassWrapper):
+            return obj
+
+        if isinstance(obj, self._types):
+            return self._wrapper_class(obj, **kwargs)
+
+        return obj
+
diff --git a/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/dictwrapper.py b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/dictwrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..e76223a808fff96e5369c49e849573e20ea7a08b
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/dictwrapper.py
@@ -0,0 +1,289 @@
+from .classwrapper import ClassWrapper
+from .visitor import MakeDictWrapperVisitor
+from .dictwrapperaccess import DictWrapperAccess
+
+from collections.abc import Sequence, MutableMapping
+from typing import Any
+
+class DictWrapper(ClassWrapper):
+    """Dictionary wrapper managing nested dictionaries
+        The following functionality is implemented:
+        - Tuple keys are treated to access nested dictionaries ('key1', 'key2', 'key3')
+        - Optionally sep symbol may be set to automatically split string keys into tuple keys:
+          'key1.key2.key3' will be treated as a nested key if '.' is set for the sep symbol
+        - self._ may be used to access nested dictionaries via attributes: dw.key1.key2.key3
+    """
+    _sep: str = None
+    _parent: Any = None
+    _types: Any = dict
+    _not_recursive_to_others: bool = True
+    def __new__(cls, dic, *args, parent=None, sep=None, recursive_to_others=None):
+        if not isinstance(dic, (MutableMapping, DictWrapper)):
+            return dic
+        return ClassWrapper.__new__(cls)
+
+    def __init__(self, dic, *, sep: str=None, parent=None, recursive_to_others: bool=False):
+        if isinstance(dic, DictWrapper):
+            if sep is None:
+                sep = dic._sep
+            recursive_to_others = not dic._not_recursive_to_others
+            dic = dic._object
+        super().__init__(dic, types=type(dic))
+
+        self._sep = sep
+        self._not_recursive_to_others = not recursive_to_others
+        if parent:
+            if sep and sep!=parent._sep:
+                raise ValueError(f'Inconsistent separators: {sep} (self) and {parent._sep} (parent)')
+
+            self._parent = parent
+            self._sep = parent._sep
+            self._types = parent._types
+            self._not_recursive_to_others = parent._not_recursive_to_others
+
+    @property
+    def _(self):
+        return DictWrapperAccess(self)
+
+    def parent(self):
+        return self._parent
+
+    def child(self, key):
+        try:
+            ret = self[key]
+        except KeyError:
+            ret = self[key]=self._types()
+            return self._wrap(ret, parent=self)
+
+        if not isinstance(ret, self._wrapper_class):
+            raise KeyError('Child {!s} is not DictWrapper'.format(key))
+
+        return ret
+
+    def keys(self):
+        return self._object.keys()
+
+    def iterkey(self, key):
+        if isinstance(key, str):
+            if self._sep:
+                yield from key.split(self._sep)
+            else:
+                yield key
+        elif isinstance(key, Sequence):
+            for sk in key:
+                yield from self.iterkey(sk)
+        else:
+            yield key
+
+    def splitkey(self, key):
+        it = self.iterkey(key)
+        try:
+            return next(it), tuple(it)
+        except StopIteration:
+            return None, None
+
+    def get(self, key, *args, **kwargs):
+        if key==():
+            return self
+        key, rest=self.splitkey(key)
+
+        if not rest:
+            ret = self._object.get(key, *args, **kwargs)
+            return self._wrap(ret, parent=self)
+
+        sub = self._wrap(self._object.get(key), parent=self)
+        if sub is None:
+            if args:
+                return args[0]
+            raise KeyError(f"No nested key '{key}'")
+
+        if self._not_recursive_to_others and not isinstance(sub, DictWrapper):
+            raise TypeError(f"Nested value for '{key}' has wrong type")
+
+        return sub.get(rest, *args, **kwargs)
+
+    def __getitem__(self, key):
+        if key==():
+            return self
+        key, rest=self.splitkey(key)
+
+        sub = self._object.__getitem__(key)
+        sub = self._wrap(sub, parent=self)
+        if not rest:
+            return sub
+
+        if sub is None:
+            raise KeyError( f"No nested key '{key}'" )
+
+        if self._not_recursive_to_others and not isinstance(sub, DictWrapper):
+            raise TypeError(f"Nested value for '{key}' has wrong type")
+
+        return sub[rest]
+
+    def __delitem__(self, key):
+        if key==():
+            raise ValueError('May not delete itself')
+        key, rest=self.splitkey(key)
+
+        sub = self._wrap(self._object.__getitem__(key), parent=self)
+        if not rest:
+            del self._object[key]
+            return
+
+        if self._not_recursive_to_others and not isinstance(sub, DictWrapper):
+            raise TypeError(f"Nested value for '{key}' has wrong type")
+
+        del sub[rest]
+
+    def setdefault(self, key, value):
+        key, rest=self.splitkey(key)
+
+        if not rest:
+            ret=self._object.setdefault(key, value)
+            return self._wrap(ret, parent=self)
+
+        if key in self:
+            sub = self._wrap(self._object.get(key), parent=self)
+        else:
+            sub = self._object[key] = self._types()
+            sub = self._wrap(sub, parent=self)
+            # # cfg._set_parent( self )
+
+        if self._not_recursive_to_others and not isinstance(sub, DictWrapper):
+            raise TypeError(f"Nested value for '{key}' has wrong type")
+
+        return sub.setdefault(rest, value)
+
+    def set(self, key, value):
+        key, rest=self.splitkey(key)
+
+        if not rest:
+            self._object[key] = value
+            return value
+
+        if key in self:
+            sub = self._wrap(self._object.get(key), parent=self)
+        else:
+            sub = self._object[key] = self._types()
+            sub = self._wrap(sub, parent=self)
+            # # cfg._set_parent( self )
+
+        if self._not_recursive_to_others and not isinstance(sub, DictWrapper):
+            raise TypeError(f"Nested value for '{key}' has wrong type")
+
+        return sub.set(rest, value)
+
+    __setitem__= set
+
+    def __contains__(self, key):
+        if key==():
+            return True
+        key, rest=self.splitkey(key)
+
+        if key not in self._object:
+            return False
+
+        if rest:
+            sub = self._wrap(self._object.get(key), parent=self)
+
+            if self._not_recursive_to_others and not isinstance(sub, DictWrapper):
+                raise TypeError(f"Nested value for '{key}' has wrong type")
+
+            return rest in sub
+
+        return True
+
+    def keys(self):
+        return self._object.keys()
+
+    def items(self):
+        for k, v in self._object.items():
+            yield k, self._wrap(v, parent=self)
+
+    def values(self):
+        for v in self._object.values():
+            yield self._wrap(v, parent=self)
+
+    def deepcopy(self):
+        new = DictWrapper(self._types(), parent=self._parent, sep=self._sep, recursive_to_others=not self._not_recursive_to_others)
+        for k, v in self.items():
+            k = k,
+            if isinstance(v, self._wrapper_class):
+                new[k] = v.deepcopy()._object
+            else:
+                new[k] = v
+
+        new._sep = self._sep
+
+        return new
+
+    def walkitems(self, startfromkey=(), *, appendstartkey=False, maxdepth=None):
+        v0 = self[startfromkey]
+        k0 = tuple(self.iterkey(startfromkey))
+
+        if maxdepth is None:
+            nextdepth=None
+        else:
+            nextdepth=max(maxdepth-len(k0)-1, 0)
+
+        if maxdepth==0 or not isinstance(v0, self._wrapper_class):
+            if appendstartkey:
+                yield k0, v0
+            else:
+                yield (), v0
+            return
+
+        if not appendstartkey:
+            k0 = ()
+
+        for k, v in v0.items():
+            k = k0+(k,)
+            if isinstance(v, self._wrapper_class):
+                for k1, v1 in v.walkitems(maxdepth=nextdepth):
+                    yield k+k1, v1
+            elif not self._not_recursive_to_others and isinstance(v, MutableMapping):
+                for k1, v1 in v.items():
+                    if isinstance(k1, tuple):
+                        yield k+k1, v1
+                    else:
+                        yield k+(k1,), v1
+            else:
+                yield k, v
+
+    def walkdicts(self):
+        yieldself=True
+        for k, v in self.items():
+            k = k,
+            if isinstance(v, self._wrapper_class):
+                yieldself=False
+                for k1, v1 in v.walkdicts():
+                    yield k+k1, v1
+        if yieldself:
+            yield (), self
+
+    def walkkeys(self, *args, **kwargs):
+        for k, _ in self.walkitems(*args, **kwargs):
+            yield k
+
+    def walkvalues(self, *args, **kwargs):
+        for _, v in self.walkitems(*args, **kwargs):
+            yield v
+
+    def visit(self, visitor, parentkey=()):
+        visitor = MakeDictWrapperVisitor(visitor)
+
+        if not parentkey:
+            visitor.start(self)
+
+        visitor.enterdict(parentkey, self)
+        for k, v in self.items():
+            key = parentkey + (k,)
+            if isinstance(v, self._wrapper_class):
+                v.visit(visitor, parentkey=key)
+            else:
+                visitor.visit(key, v)
+
+        visitor.exitdict(parentkey, self)
+
+        if not parentkey:
+            visitor.stop(self)
diff --git a/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/dictwrapperaccess.py b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/dictwrapperaccess.py
new file mode 100644
index 0000000000000000000000000000000000000000..76274d633787eda2ebd1602ff7625c224743f2dc
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/dictwrapperaccess.py
@@ -0,0 +1,25 @@
+class DictWrapperAccess(object):
+    '''DictWrapper wrapper. Enables attribute based access to nested dictionaries'''
+    _ = None
+    def __init__(self, dct):
+        self.__dict__['_'] = dct
+
+    def __call__(self, key):
+        return self._.child(key)._
+
+    def __getattr__(self, key):
+        ret = self._[key]
+
+        if isinstance(ret, self._._wrapper_class):
+            return ret._
+
+        return ret
+
+    def __setattr__(self, key, value):
+        self._[key]=value
+
+    def __delattr__(self, key):
+        del self._[key]
+
+    def __dir__(self):
+        return list(self._.keys())
diff --git a/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/visitor.py b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/visitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..5600451fd9b4c02c1e919ad69d31268c9c70b70a
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/dictwrapper/visitor.py
@@ -0,0 +1,59 @@
+class DictWrapperVisitor(object):
+    def start(self, dct):
+        pass
+
+    def enterdict(self, k, v):
+        pass
+
+    def visit(self, k, v):
+        pass
+
+    def exitdict(self, k, v):
+        pass
+
+    def stop(self, dct):
+        pass
+
+def MakeDictWrapperVisitor(fcn):
+    if isinstance(fcn, DictWrapperVisitor):
+        return fcn
+
+    if not callable(fcn):
+        raise TypeError(f'Expect function, got {type(fcn).__name__}')
+
+    ret=DictWrapperVisitor()
+    ret.visit = fcn
+    return ret
+
+class DictWrapperVisitorDemostrator(DictWrapperVisitor):
+    fmt = '{action:7s} {depth!s:>5s} {key!s:<{keylen}s} {vtype!s:<{typelen}s} {value}'
+    opts = dict(keylen=20, typelen=15)
+    def typestring(self, v):
+        return type(v).__name__
+
+    def start(self, d):
+        v = object.__repr__(d.object)
+        print('Start printing dictionary:', v)
+        self._print('Action', 'Key', 'Value', 'Type', depth='Depth')
+
+    def stop(self, _):
+        print('Done printing dictionary')
+
+    def enterdict(self, k, d):
+        d = d.object
+        v = object.__repr__(d)
+        self._print('Enter', k, v, self.typestring(d))
+
+    def exitdict(self, k, d):
+        d = d.object
+        v = object.__repr__(d)
+        self._print('Exit', k, v, self.typestring(d))
+
+    def visit(self, k, v):
+        self._print('Visit', k, v, self.typestring(v))
+
+    def _print(self, action, k, v, vtype, *, depth=None):
+        if depth is None:
+            depth = len(k)
+        print(self.fmt.format(action=action, depth=depth, key=k, vtype=vtype, value=v, **self.opts))
+
diff --git a/subtrees/dagflow/subtrees/dictwrapper/requirements.txt b/subtrees/dagflow/subtrees/dictwrapper/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..755c43c771ca6569f968a4c78c66331e92ca2496
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/requirements.txt
@@ -0,0 +1,3 @@
+pytest
+pytest-cov
+coverage
diff --git a/subtrees/dagflow/subtrees/dictwrapper/storage/__init__.py b/subtrees/dagflow/subtrees/dictwrapper/storage/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f838374ff32bc47a2f2d5228a46df22263aa9e2
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/storage/__init__.py
@@ -0,0 +1 @@
+from .storage import Storage
diff --git a/subtrees/dagflow/subtrees/dictwrapper/storage/storage.py b/subtrees/dagflow/subtrees/dictwrapper/storage/storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d168d23430a3f5e879688a82103183924b56409
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/storage/storage.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+from collections import UserDict
+from collections.abc import Sequence
+from typing import Any, Callable, Generator, Optional
+
+class Storage(UserDict):
+    _protect: bool = False
+
+    def __init__(*args, protect: bool = False, **kwargs) -> None:
+        self = args[0]
+        self._protect = protect
+        UserDict.__init__(*args, **kwargs)
+
+    def _process_key(self, key: Any) -> tuple:
+        if isinstance(key, Sequence):
+            return tuple(sorted(key))
+        else:
+            return frozenset((key,))
+
+    def __getitem__(self, key: Any) -> Any:
+        key = self._process_key(key)
+        return super().__getitem__(key)
+
+    def __setitem__(self, key: Any, val: Any) -> None:
+        key = self._process_key(key)
+        if self._protect and key in self:
+            raise AttributeError(
+                f"Reassigning of the existed key '{key}' is restricted, "
+                "due to the protection!"
+            )
+        super().__setitem__(key, val)
+
+    def __contains__(self, key: Any) -> bool:
+        key = self._process_key(key)
+        return super().__contains__(key)
+
+    def values(self, *, keys: tuple = (), **kwargs) -> Generator:
+        for _, val in self.items(*keys, **kwargs):
+            yield val
+
+    def keys(self, *args, **kwargs) -> Generator:
+        for key, _ in self.items(*args, **kwargs):
+            yield key
+
+    def items(
+        self,
+        *args,
+        filterkey: Optional[Callable[[Any], bool]] = None,
+        filterkeyelem: Optional[Callable[[Any], bool]] = None,
+    ) -> tuple:
+        """
+        Returns items from the slice by `args`.
+        If `args` are empty returns all items.
+        """
+        res = super().items()
+        if args:
+            args = set(args)
+            res = (elem for elem in res if args.issubset(elem[0]))
+        if filterkey:
+            res = (elem for elem in res if filterkey(elem[0]))
+        if filterkeyelem:
+            res = (
+                elem
+                for elem in res
+                if all(filterkeyelem(key) for key in elem[0])
+            )
+
+        yield from res
+
+    def slice(self, *args, **kwargs) -> Storage:
+        """
+        Returns new `Storage` with keys containing `args`.
+        It is possible to filter elements by `filterkey` and `filterkeyelem`.
+        """
+        return Storage(
+            self.items(
+                *args,
+                filterkey=kwargs.pop("filterkey", None),
+                filterkeyelem=kwargs.pop("filterkeyelem", None),
+            ),  # type: ignore
+            **kwargs,
+        )
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_dictwrapper.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_dictwrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..c396e143682af2ae9a67acd3a0d8c17d67a2983c
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_dictwrapper.py
@@ -0,0 +1,336 @@
+from dictwrapper.dictwrapper import DictWrapper
+import pytest
+
+def test_dictwrapper_01():
+    dw = DictWrapper({})
+
+    assert not dw
+    assert len(dw)==0
+
+def test_dictwrapper_02():
+    dw = DictWrapper(dict(a=1))
+
+    assert dw
+    assert len(dw)==1
+
+def test_dictwrapper_03():
+    d = dict(a=1, b=2, c=3)
+    dw = DictWrapper(d)
+
+    assert dw.get('a')==1
+    assert dw.get('b')==2
+    assert dw.get('c')==3
+    assert dw.get('d')==None
+    assert dw.get('d.e')==None
+
+    assert tuple(dw.keys())==('a','b','c')
+
+@pytest.mark.parametrize('sep', [None, '.'])
+def test_dictwrapper_03(sep):
+    dct = dict(a=1, b=2, c=3, d=dict(e=4), f=dict(g=dict(h=5)))
+    dct['z.z.z'] = 0
+    print(dct)
+    dw = DictWrapper(dct, sep=sep)
+
+    #
+    # Test self access
+    #
+    assert dw.get(()).object is dct
+    assert dw[()].object is dct
+
+    #
+    # Test wrapping
+    #
+    assert isinstance(dw.get('d'), DictWrapper)
+    assert isinstance(dw.get(('f', 'g')), DictWrapper)
+
+    #
+    # Test get tuple
+    #
+    assert dw.get(('d', 'e'))==4
+    assert dw.get(('d', 'e1')) is None
+    assert dw.get(('f', 'g', 'h'))==5
+    try:
+        dw.get(('z', 'z', 'z'))
+        assert False
+    except KeyError:
+        pass
+
+    #
+    # Test getitem tuple
+    #
+    assert dw[('d', 'e')]==4
+    try:
+        dw[('d', 'e1')]
+        assert False
+    except KeyError:
+        pass
+    assert dw[('f', 'g', 'h')]==5
+
+    try:
+        dw[('z', 'z', 'z')]
+        assert False
+    except KeyError:
+        pass
+
+    #
+    # Test get sep
+    #
+    if sep:
+        assert dw.get('d.e')==4
+    else:
+        assert dw.get('d.e') is None
+
+    if sep:
+        try:
+            dw.get('z.z.z')
+            assert False
+        except KeyError:
+            pass
+    else:
+        assert dw.get('z.z.z')==0
+
+    #
+    # Test getitem sep
+    #
+    try:
+        assert dw['d.e']==4
+        assert sep is not None
+    except KeyError:
+        pass
+
+    try:
+        assert dw['f.g.h']==5
+        assert dw[('f.g', 'h')]==5
+        assert sep is not None
+    except KeyError:
+        pass
+
+    if sep:
+        try:
+            dw['z.z.z']
+            assert False
+        except KeyError:
+            pass
+    else:
+        assert dw['z.z.z']==0
+
+    #
+    # Test contains
+    #
+    assert 'a' in dw
+    assert not 'a1' in dw
+    assert 'd' in dw
+
+    #
+    # Test contains tuple
+    #
+    assert ('d', 'e') in dw
+    assert not ('k', 'e') in dw
+    assert ('f', 'g', 'h') in dw
+    assert ('f.g.h' in dw) == bool(sep)
+    assert ('z.z.z' in dw) == bool(not sep)
+
+    #
+    # Test parents
+    #
+    g = dw.get(('f', 'g'))
+    assert g.parent().parent() is dw
+
+    #
+    # Test children
+    #
+    m=dw.child(('k', 'l', 'm'))
+    assert dw.get(('k', 'l', 'm')).object is m.object
+
+    #
+    # Test recursive setitem
+    #
+    dw[('k', 'l', 'm', 'n')] = 5
+    try:
+        dw.child(tuple('klmn'))
+        assert False
+    except KeyError:
+        pass
+    assert dw.get(('k', 'l', 'm', 'n')) == 5
+
+    dw[('o.l.m.n')] = 6
+    assert dw['o.l.m.n'] == 6
+    if not sep:
+        assert dw.object['o.l.m.n'] == 6
+
+    #
+    # Test attribute access
+    #
+    assert dw._.a==1
+    assert dw._.b==2
+    assert dw._.c==3
+    assert dw._.d.e==4
+    assert dw._.f.g.h==5
+
+    dw._.f.g.h=6
+    assert dw._.f.g.h==6
+    assert dw._._ is dw
+
+def test_dictwrapper_06_inheritance():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5, i=6)))])
+    dct['z.z.z'] = 0
+
+    class DictWrapperA(DictWrapper):
+        def count(self):
+            return len(tuple(self.walkitems()))
+
+        def depth(self):
+            return max([len(k) for k in self.walkkeys()])
+
+    dw = DictWrapperA(dct, sep='.')
+    assert dw.count()==7
+    assert dw['d'].count()==1
+    assert dw['f'].count()==2
+    assert dw['f.g'].count()==2
+    assert dw._.f._.count()==2
+
+    assert dw.depth()==3
+    assert dw['d'].depth()==1
+    assert dw['f'].depth()==2
+
+def test_dictwrapper_07_delete():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
+    dct['z.z.z'] = 0
+    dw = DictWrapper(dct)
+
+    assert 'a' in dw
+    del dw['a']
+    assert 'a' not in dw
+
+    assert ('d', 'e') in dw
+    del dw[('d', 'e')]
+    assert ('d', 'e') not in dw
+
+    assert ('f', 'g', 'h') in dw
+    del dw._.f.g.h
+    assert ('f', 'g', 'h') not in dw
+    assert ('f', 'g') in dw
+
+def test_dictwrapper_08_create():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
+    dct['z.z.z'] = 0
+    dw = DictWrapper(dct, sep='.')
+
+    dw._('i.k').l=3
+    assert dw._.i.k.l==3
+
+    child = dw.child('child')
+    assert dw['child'].object=={}
+
+def test_dictwrapper_09_dictcopy():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
+    dct['z'] = {}
+    dw = DictWrapper(dct, sep='.')
+
+    dw1 = dw.deepcopy()
+    for i, (k, v) in enumerate(dw1.walkdicts()):
+        # print(i, k)
+        assert k in dw
+        assert v._object==dw[k]._object
+        assert v._object is not dw[k]._object
+        assert type(v._object) is type(dw[k]._object)
+    assert i==2
+
+def test_dictwrapper_09_walkitems():
+    dct = {
+        'a': 1,
+        'b': 2,
+        'c': 3,
+        'c1': {
+            'i': {
+                'j': {
+                    'k': {
+                        'l': 6
+                    }
+                }
+            }
+        },
+        'd': {
+            'e': 4
+        },
+        'f': {
+            'g': {
+                'h': 5
+            }
+        }
+    }
+    dct['z'] = {}
+    dw = DictWrapper(dct, sep='.')
+
+    imaxlist=[5, 0, 6, 5, 5, 5, 5, 5, 5]
+    for imax, maxdepth in zip(imaxlist, [None]+list(range(len(imaxlist)))):
+        i=0
+        print(f'{imax=}, {maxdepth=}')
+        maxk = -1
+        for i, (k, v) in enumerate(dw.walkitems(maxdepth=maxdepth)):
+            print(i, k, v)
+            assert maxdepth is None or len(k)<=maxdepth
+            maxk=max(maxk, len(k))
+        print(f'{maxk=}')
+        print()
+        assert i==imax
+
+def test_dictwrapper_09_walk():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
+    dw = DictWrapper(dct)
+
+    keys0 = [ ('a',), ('b', ), ('c',), ('d', 'e'), ('f', 'g', 'h') ]
+    keys = [k for k, v in dw.walkitems()]
+    assert keys==keys0
+
+    assert [(k,v) for k, v in dw.walkitems('a', appendstartkey=True)] == [(('a',), 1)]
+    assert [(k,v) for k, v in dw.walkitems('a', appendstartkey=False)] == [((), 1)]
+    assert [(k,v) for k, v in dw.walkitems('d', appendstartkey=True)] == [(('d','e'), 4)]
+    assert [(k,v) for k, v in dw.walkitems('d', appendstartkey=False)] == [(('e',), 4)]
+    assert [(k,v) for k, v in dw.walkitems(('f','g'), appendstartkey=True)] == [(('f','g', 'h'), 5)]
+    assert [(k,v) for k, v in dw.walkitems(('f','g'), appendstartkey=False)] == [(('h',), 5)]
+
+def test_dictwrapper_10_iterkey():
+    d = dict(a=1, b=2, c=3)
+    dw = DictWrapper(d)
+
+    assert ['a']==list(dw.iterkey('a'))
+    assert ['a.b']==list(dw.iterkey('a.b'))
+    assert ['a', 'b']==list(dw.iterkey(('a', 'b')))
+    assert [1]==list(dw.iterkey(1))
+    assert [1.0]==list(dw.iterkey(1.0))
+
+def test_dictwrapper_11_iterkey():
+    d = dict(a=1, b=2, c=3)
+    dw = DictWrapper(d,  sep='.')
+
+    assert ['a']==list(dw.iterkey('a'))
+    assert ['a', 'b']==list(dw.iterkey('a.b'))
+    assert ['a', 'b']==list(dw.iterkey(('a', 'b')))
+    assert [1]==list(dw.iterkey(1))
+    assert [1.0]==list(dw.iterkey(1.0))
+
+def test_dictwrapper_setdefault_01():
+    d = dict(a=dict(b=dict(key='value')))
+    dw = DictWrapper(d)
+
+    newdict = dict(newkey='newvalue')
+
+    sd1 = dw.setdefault(('a','b'), newdict)
+    assert isinstance(sd1, DictWrapper)
+    assert sd1._object==d['a']['b']
+
+    sd2 = dw.setdefault(('a','c'), newdict)
+    assert isinstance(sd2, DictWrapper)
+    assert sd2._object==newdict
+
+def test_dictwrapper_eq_01():
+    d = dict(a=dict(b=dict(key='value')))
+    dw = DictWrapper(d)
+
+    assert dw['a']==d['a']
+    assert d['a']==dw['a']
+    assert dw['a']!=d
+    assert dw['a']==dw['a']
+    assert dw['a'] is not dw['a']
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_dictwrapper_storage.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_dictwrapper_storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..13a8c461c8eb8bd5fae31fae6745cd8fc92a1841
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_dictwrapper_storage.py
@@ -0,0 +1,99 @@
+from dictwrapper.dictwrapper import DictWrapper
+from storage.storage import Storage
+from pytest import raises
+
+def test_dictwrapper_storage():
+    storage = Storage({
+        ('a1', 'b1', 'c1'): 'v1',
+        ('a2', 'b2', 'c2'): 'v2',
+        })
+    dct = {'root': {
+        'subfolder1': {
+            'key1': 'value1',
+            'key2': 'value2'
+        },
+        'subfolder2': {
+            'key1': 'value1',
+            'key2': 'value2',
+            'st': storage
+        },
+        'key0': 'value0'
+    }}
+    dw = DictWrapper(dct, recursive_to_others=True)
+    dws = DictWrapper(dct, sep='.', recursive_to_others=True)
+    dwerror = DictWrapper(dct, recursive_to_others=False)
+
+    objects = (dw, dws, dwerror)
+    objectsok = (dw, dws)
+
+    assert storage['a1', 'b1', 'c1']=='v1'
+    assert storage['b1', 'a1', 'c1']=='v1'
+    assert storage['c1', 'a1', 'b1']=='v1'
+
+    for obj in objects:
+        st1 = obj['root', 'subfolder2', 'st']
+        assert st1 is storage
+
+    for obj in objectsok:
+        assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
+        assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
+        assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
+
+        assert ('root', 'subfolder2', 'st', 'a1', 'b1', 'c1') in obj
+        assert ('root', 'subfolder2', 'st', 'b1', 'a1', 'c1') in obj
+        assert ('root', 'subfolder2', 'st', 'c1', 'a1', 'b1') in obj
+        assert ('root', 'subfolder2', 'st', 'a2', 'b2', 'c2') in obj
+        assert ('root', 'subfolder2', 'st', 'b2', 'a2', 'c2') in obj
+        assert ('root', 'subfolder2', 'st', 'c2', 'a2', 'b2') in obj
+
+    assert dws['root.subfolder2.st.a1.b1.c1']=='v1'
+    assert dws['root.subfolder2.st.b1.a1.c1']=='v1'
+    assert dws['root.subfolder2.st.c1.a1.b1']=='v1'
+    assert dws['root.subfolder2.st.a2.b2.c2']=='v2'
+    assert dws['root.subfolder2.st.b2.a2.c2']=='v2'
+    assert dws['root.subfolder2.st.c2.a2.b2']=='v2'
+
+    assert 'root.subfolder2.st.a1.b1.c1' in dws
+    assert 'root.subfolder2.st.b1.a1.c1' in dws
+    assert 'root.subfolder2.st.c1.a1.b1' in dws
+    assert 'root.subfolder2.st.a2.b2.c2' in dws
+    assert 'root.subfolder2.st.b2.a2.c2' in dws
+    assert 'root.subfolder2.st.c2.a2.b2' in dws
+
+    assert 'root.subfolder3.st.c2.a2.b2' not in dws
+    assert 'root.subfolder2.st.c3.a2.b2' not in dws
+
+    with raises(KeyError):
+        dws['root.subfolder2.st.a1.b2.c1']
+
+    with raises(KeyError):
+        dws['root.subfolder1.st.a1.b1.c1']
+
+    with raises(TypeError):
+        dwerror['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']
+
+    with raises(TypeError):
+        dwerror.get(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'))
+
+    with raises(TypeError):
+        dwerror.get(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'), 'default')
+
+    with raises(TypeError):
+        del dwerror['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']
+
+    with raises(TypeError):
+        dwerror.setdefault(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'), 'default')
+
+    with raises(TypeError):
+        dwerror.set(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'), 'default')
+
+    with raises(TypeError):
+        ('root', 'subfolder2', 'st', 'a1', 'b1', 'c1') in dwerror
+
+    # Walks
+    for k, v in dw.walkitems():
+        print(k, v)
+
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_storage.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..c27818dd2f96272f2754dde50ac15e1a72381b8e
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_storage.py
@@ -0,0 +1,62 @@
+from itertools import permutations
+
+from storage.storage import Storage
+from pytest import raises
+
+
+def test_getset():
+    storage = Storage()
+    safestorage = Storage(protect=True)
+    val = "val"
+    val2 = ["val", "lav"]
+    storage["a", "b", "c"] = val
+    safestorage["a", "b", "c"] = val
+    for key in permutations(("a", "b", "c")):
+        assert storage[tuple(key)] == val
+        assert safestorage[tuple(key)] == val
+    storage["c", "b", "a"] = val2
+    for key in permutations(("a", "b", "c")):
+        assert storage[tuple(key)] == val2
+        with raises(AttributeError):
+            safestorage[tuple(key)] = val2
+    safestorage._protect = False
+    for key in permutations(("a", "b", "c")):
+        safestorage[tuple(key)] = val
+
+
+def test_slice_filter():
+    storage = Storage()
+    storage["a", "b"] = 1
+    storage["a", "b", "c"] = 2
+    storage["a", "c", "d", "b"] = 3
+    assert all(
+        len(tuple(x)) == 3
+        for x in (storage.items(), storage.items("a"), storage.items("a", "b"))
+    )
+    assert len(tuple(storage.items("a", "b", "c"))) == 2
+    assert len(tuple(storage.items("a", "b", "d", "c"))) == 1
+    assert isinstance(storage.slice("a"), Storage)
+    assert all(
+        x == storage
+        for x in (
+            storage.slice("a"),
+            storage.slice("a", "b"),
+            storage.slice(
+                filterkey=lambda key: all(elem in "abcd" for elem in key)
+            ),
+            storage.slice(filterkeyelem=lambda key: key in "abcd")
+        )
+    )
+    assert storage.slice("a", "b", "c") == {
+        ("a", "b", "c"): 2,
+        ("a", "b", "c", "d"): 3,
+    }
+    assert storage.slice("a", "b", "c", "d") == {
+        ("a", "b", "c", "d"): 3,
+    }
+    assert storage.slice(
+        filterkey=lambda key: all(elem != "d" for elem in key)
+    ) == {
+        ("a", "b", "c"): 2,
+        ("a", "b"): 1,
+    }
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_visitor.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_visitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..49bf0ee13106bcb8ac01966fd789a7da97c0f1ee
--- /dev/null
+++ b/subtrees/dagflow/subtrees/dictwrapper/test/test_visitor.py
@@ -0,0 +1,32 @@
+from dictwrapper.dictwrapper import DictWrapper
+from dictwrapper.visitor import DictWrapperVisitorDemostrator
+
+def test_dictwrapper_04_visitor():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
+    dct['z.z.z'] = 0
+    dw = DictWrapper(dct)
+
+    keys0 = (('a',) , ('b',) , ('c',) , ('d', 'e'), ('f', 'g', 'h'), ('z.z.z', ))
+    values0 = (1, 2, 3, 4, 5, 0)
+
+    keys = tuple(dw.walkkeys())
+    values = tuple(dw.walkvalues())
+    assert keys==keys0
+    assert values==values0
+
+    class Visitor(object):
+        keys, values = (), ()
+        def __call__(self, k, v):
+            self.keys+=k,
+            self.values+=v,
+    v = Visitor()
+    dw.visit(v)
+    assert v.keys==keys0
+    assert v.values==values0
+
+def test_dictwrapper_05_visitor():
+    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
+    dct['z.z.z'] = 0
+    dw = DictWrapper(dct)
+
+    dw.visit(DictWrapperVisitorDemostrator())
diff --git a/subtrees/dagflow/test/core/allocation.py b/subtrees/dagflow/test/core/allocation.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccf8baf0f098447cca22abd7d663dc65f81fb95f
--- /dev/null
+++ b/subtrees/dagflow/test/core/allocation.py
@@ -0,0 +1,34 @@
+import numpy as np
+
+from dagflow.graph import Graph
+
+def test_output_allocation_1():
+	data = np.arange(12, dtype='d').reshape(3,4)
+	with Graph(close=True) as graph:
+		n1 = graph.add_node("node1", typefunc=False)
+		n2 = graph.add_node("node2", typefunc=False)
+
+		out1 = n1._add_output("o1", data=data, allocatable=False)
+		in1 = n2._add_input("i1")
+
+		out1 >> in1
+
+	assert (data==out1.data).all()
+
+def test_output_allocation_2():
+	data = np.arange(12, dtype='d').reshape(3,4)
+	with Graph(close=True) as graph:
+		n1 = graph.add_node("node1", typefunc=False)
+		n2 = graph.add_node("node2", typefunc=False)
+
+		out1 = n1._add_output("o1", dtype=data.dtype, shape=data.shape)
+		in1 = n2._add_input("i1", data=data)
+
+		out1 >> in1
+
+	assert (data==out1.data).all()
+	assert (data==in1.data).all()
+	assert (data==in1._own_data).all()
+	assert data.dtype==out1.data.dtype
+	assert data.dtype==in1.data.dtype
+	assert data.dtype==in1._own_data.dtype
diff --git a/subtrees/dagflow/test/core/outputs.py b/subtrees/dagflow/test/core/outputs.py
new file mode 100644
index 0000000000000000000000000000000000000000..37263af43e94d38742de00c88f09fd8627187607
--- /dev/null
+++ b/subtrees/dagflow/test/core/outputs.py
@@ -0,0 +1,84 @@
+from dagflow.lib.Array import Array
+from dagflow.lib.Sum import Sum
+from dagflow.graph import Graph
+from dagflow.output import SettableOutput
+from dagflow.exception import CriticalError
+import pytest
+
+def test_SettableOutput_01():
+	value = 123.
+	array_in = (value, )
+	array_alt = (value+1, )
+	with Graph() as g:
+		va = Array('test', array_in)
+		s = Sum('add')
+		va >> s
+	g.close()
+
+	va.taint()
+	newout = SettableOutput.take_over(va.outputs.array)
+	newout.set(array_alt)
+
+	assert va.outputs.array is newout
+	assert s.inputs[0].parent_output is newout
+	assert s.outputs.result.data==array_alt
+
+def test_SettableOutput_02():
+	"""Test SettableOutput, Node.invalidate_parents()"""
+	value = 123.
+	array_in = (value, )
+	array_alt = (value+1, )
+	with Graph() as g:
+		va = Array('test', array_in)
+		sm1 = Sum('add 1')
+		sm2 = Sum('add 2')
+		va >> sm1 >> sm2
+	g.close()
+
+	output1 = va.outputs[0]
+	output2 = sm1.outputs[0]
+	output3 = sm2.outputs[0]
+
+	assert va.tainted==True
+	assert sm1.tainted==True
+	assert sm2.tainted==True
+	assert va.invalid==False
+	assert sm1.invalid==False
+	assert sm2.invalid==False
+	assert output3.data==array_in
+	assert va.tainted==False
+	assert sm1.tainted==False
+	assert sm2.tainted==False
+
+	newout = SettableOutput.take_over(sm1.outputs[0])
+	assert va.tainted==False
+	assert sm1.tainted==False
+	assert sm2.tainted==False
+	assert va.invalid==False
+	assert sm1.invalid==False
+	assert sm2.invalid==False
+	assert output3.data==array_in
+
+	newout.set(array_alt)
+	assert va.tainted==True
+	assert sm1.tainted==False
+	assert sm2.tainted==True
+	assert va.invalid==True
+	assert sm1.invalid==False
+	assert sm2.invalid==False
+	assert output2.data==array_alt
+	assert output3.data==array_alt
+	with pytest.raises(CriticalError):
+		output1.data==array_alt
+
+	va.invalid = False
+	assert va.tainted==True
+	assert sm1.tainted==True
+	assert sm2.tainted==True
+	assert va.invalid==False
+	assert sm1.invalid==False
+	assert sm2.invalid==False
+	assert output3.data==array_in
+	assert output2.data==array_in
+	assert output1.data==array_in
+
diff --git a/subtrees/dagflow/test/nodes/test_Array.py b/subtrees/dagflow/test/nodes/test_Array.py
new file mode 100644
index 0000000000000000000000000000000000000000..46f86032c2cb92b9628687d1b4b45e57fc6781b5
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_Array.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.Sum import Sum
+from dagflow.graphviz import savegraph
+
+from numpy import arange
+import pytest
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_Array_00(dtype):
+    array = arange(12.0, dtype=dtype).reshape(3,4)
+    with Graph(close=True) as graph:
+        arr1 = Array('array: store', array, mode='store')
+        arr2 = Array('array: store (weak)', array, mode='store_weak')
+        arr3 = Array('array: fill', array, mode='fill')
+
+    assert arr1.tainted==True
+    assert arr2.tainted==True
+    assert arr3.tainted==True
+
+    out1 = arr1.outputs['array']
+    out2 = arr2.outputs['array']
+    out3 = arr3.outputs['array']
+
+    assert out1.owns_buffer == True
+    assert out2.owns_buffer == False
+    assert out3.owns_buffer == True
+
+    assert out1.allocatable == False
+    assert out2.allocatable == True
+    assert out3.allocatable == True
+
+    assert (out1._data==array).all()
+    assert (out2._data==array).all()
+    assert (out3._data==0.0).all()
+
+    result1 = arr1.get_data(0)
+    result2 = arr2.get_data(0)
+    result3 = arr3.get_data(0)
+
+    assert (result1==array).all()
+    assert (result2==array).all()
+    assert (result3==array).all()
+    assert arr1.tainted==False
+    assert arr2.tainted==False
+    assert arr3.tainted==False
+
+    savegraph(graph, f"output/test_array_00_{dtype}.png")
+
+def test_Array_01_set():
+    value = 123.
+    array_in = (value, )
+    array_alt = (value+1, )
+    va = Array('test', array_in)
+    sm = Sum('sum')
+    va >> sm
+    va.close()
+    sm.close()
+
+    output = va.outputs[0]
+    output2 = sm.outputs[0]
+
+    assert va.tainted==True
+    assert sm.tainted==True
+    assert output.data[0]==value
+    assert output2.data[0]==value
+    assert va.tainted==False
+    assert sm.tainted==False
+
+    assert va.set(array_in, check_taint=True)==False
+    assert va.tainted==False
+    assert sm.tainted==False
+    assert (output.data==array_in).all()
+    assert (output2.data==array_in).all()
+
+    assert va.set(array_in)==True
+    assert va.tainted==False
+    assert sm.tainted==True
+    assert (output.data==array_in).all()
+    assert (output2.data==array_in).all()
+    assert va.tainted==False
+    assert sm.tainted==False
+
+    assert va.set(array_alt, check_taint=True)==True
+    assert va.tainted==False
+    assert sm.tainted==True
+    assert (output.data==array_alt).all()
+    assert (output2.data==array_alt).all()
+    assert va.tainted==False
+    assert sm.tainted==False
diff --git a/subtrees/dagflow/test/nodes/test_Cholesky.py b/subtrees/dagflow/test/nodes/test_Cholesky.py
new file mode 100644
index 0000000000000000000000000000000000000000..328f5985e9d3a3ac72e58be9f1033d65d955031f
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_Cholesky.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+from dagflow.exception import TypeFunctionError
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.Cholesky import Cholesky
+import numpy as np
+import scipy
+from pytest import raises
+from dagflow.graphviz import savegraph
+
+import pytest
+
+@pytest.mark.parametrize("dtype", ('d', 'f'))
+def test_Cholesky_00(dtype):
+    inV = np.array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
+    inV2 = inV@inV
+    inD = np.diag(inV)
+    inL2d1 = scipy.linalg.cholesky(inV, lower=True)
+    inL2d2 = scipy.linalg.cholesky(inV2, lower=True)
+    inL1d = np.sqrt(inD)
+
+    with Graph(close=True) as graph:
+        V1 = Array('V1', inV, mode='store')
+        V2 = Array('V2', inV2, mode='store')
+        D = Array('D', (inD), mode='store')
+        chol2d = Cholesky('Cholesky 2d')
+        chol1d = Cholesky('Cholesky 1d')
+        (V1, V2) >> chol2d
+        D >> chol1d
+
+    assert V1.tainted==True
+    assert V2.tainted==True
+    assert chol2d.tainted==True
+    assert chol1d.tainted==True
+
+    result2d1 = chol2d.get_data(0)
+    result2d2 = chol2d.get_data(1)
+    result1d = chol1d.get_data(0)
+    assert V1.tainted==False
+    assert V2.tainted==False
+    assert D.tainted==False
+    assert chol2d.tainted==False
+    assert chol1d.tainted==False
+
+    assert np.allclose(inL2d1, result2d1, atol=0, rtol=0)
+    assert np.allclose(inL2d2, result2d2, atol=0, rtol=0)
+    assert np.allclose(inL1d, result1d, atol=0, rtol=0)
+
+    savegraph(graph, f"output/test_Cholesky_00_{dtype}.png")
+
+def test_Cholesky_01_typefunctions():
+    inV = np.array([
+        [10, 2,   1],
+        [ 2, 12,  3],
+        ], dtype='d')
+
+    with Graph() as g1:
+        V1 = Array('V1', inV, mode='store')
+        chol1 = Cholesky('Cholesky')
+        V1 >> chol1
+
+    with Graph() as g2:
+        V2 = Array('V2', inV[0], mode='store')
+        chol2 = Cholesky('Cholesky')
+        V2 >> chol1
+
+    with raises(TypeFunctionError):
+        g1.close()
+
+    with raises(TypeFunctionError):
+        g2.close()
diff --git a/subtrees/dagflow/test/nodes/test_CovmatrixFromCormatrix.py b/subtrees/dagflow/test/nodes/test_CovmatrixFromCormatrix.py
new file mode 100644
index 0000000000000000000000000000000000000000..67478f1745a8a24c308c68456ee929b7e9a87262
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_CovmatrixFromCormatrix.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+from numpy import arange
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.lib.Array import Array
+from dagflow.lib.CovmatrixFromCormatrix import CovmatrixFromCormatrix
+
+from numpy import array, allclose, tril
+import pytest
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_CovmatrixFromCormatrix_00(dtype):
+    inSigma = arange(1.0, 4.0, dtype=dtype)
+    inC = array([
+        [1.0, 0.5, 0.0],
+        [0.5, 1.0, 0.9],
+        [0.0, 0.9, 1.0],
+        ],
+        dtype=dtype)
+    with Graph(close=True) as graph:
+        matrix = Array('matrix', inC)
+        sigma = Array('sigma', inSigma)
+        cov = CovmatrixFromCormatrix('covariance')
+
+        sigma >> cov.inputs['sigma']
+        matrix >> cov
+
+    inV = inC * inSigma[:,None] * inSigma[None,:]
+    V = cov.get_data()
+
+    assert allclose(inV, V, atol=0, rtol=0)
+    assert allclose(tril(V), tril(V.T), atol=0, rtol=0)
+
+    savegraph(graph, f"output/test_CovmatrixFromCormatrix_00_{dtype}.png", show=['all'])
+
diff --git a/subtrees/dagflow/test/nodes/test_ElSumSq.py b/subtrees/dagflow/test/nodes/test_ElSumSq.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3e0f3c30906226a9d6f94078ffeed0a300f5fe8
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_ElSumSq.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.ElSumSq import ElSumSq
+from dagflow.graphviz import savegraph
+
+from numpy import arange, sum
+import pytest
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_ElSumSq_01(dtype):
+    arrays_in = tuple(arange(12, dtype=dtype)*i for i in (1, 2, 3))
+    arrays2_in = tuple(a**2 for a in arrays_in)
+
+    with Graph(close=True) as graph:
+        arrays = tuple(Array('test', array_in) for array_in in arrays_in)
+        sm = ElSumSq('sumsq')
+        arrays >> sm
+
+    output = sm.outputs[0]
+
+    res = sum(arrays2_in)
+    assert sm.tainted==True
+    assert all(output.data==res)
+    assert sm.tainted==False
+
+    arrays2_in = (arrays2_in[1],) + arrays2_in[1:]
+    res = sum(arrays2_in)
+    assert arrays[0].set(arrays[1].get_data())
+    assert sm.tainted==True
+    assert all(output.data==res)
+    assert sm.tainted==False
+
+    savegraph(graph, f"output/test_SumSq_00_{dtype}.png", show='all')
diff --git a/subtrees/dagflow/test/nodes/test_Integrator.py b/subtrees/dagflow/test/nodes/test_Integrator.py
new file mode 100644
index 0000000000000000000000000000000000000000..1463982a4278ad6febae798d86bb106b759079a5
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_Integrator.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+from dagflow.exception import TypeFunctionError
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.Integrator import Integrator
+
+from pytest import raises
+
+
+def test_Integrator_00(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("array", [1.0, 2.0, 3.0])
+        arr2 = Array("array", [3.0, 2.0, 1.0])
+        weights = Array("weights", [2.0, 2.0, 2.0])
+        ordersX = Array("ordersX", [1, 1, 1])
+        integrator = Integrator("integrator", mode="1d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+    assert (integrator.outputs[0].data == [2, 4, 6]).all()
+    assert (integrator.outputs[1].data == [6, 4, 2]).all()
+
+
+def test_Integrator_01(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("array", [1.0, 2.0, 3.0])
+        arr2 = Array("array", [3.0, 2.0, 1.0])
+        weights = Array("weights", [2.0, 2.0, 2.0])
+        ordersX = Array("ordersX", [2, 0, 1])
+        integrator = Integrator("integrator", mode="1d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+    assert (integrator.outputs[0].data == [6, 0, 6]).all()
+    assert (integrator.outputs[1].data == [10, 0, 2]).all()
+
+
+def test_Integrator_02(debug_graph):
+    arr123 = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
+        arr2 = Array("array", [arr123, arr123])
+        weights = Array("weights", [[1.0, 1.0, 1.0], arr123])
+        ordersX = Array("ordersX", [1, 1])
+        ordersY = Array("ordersY", [1, 1, 1])
+        integrator = Integrator("integrator", mode="2d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    assert (integrator.outputs[0].data == [[1, 1, 1], [1, 2, 3]]).all()
+    assert (integrator.outputs[1].data == [[1, 2, 3], [1, 4, 9]]).all()
+
+
+def test_Integrator_03(debug_graph):
+    arr123 = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
+        arr2 = Array("array", [arr123, arr123])
+        weights = Array("weights", [[1.0, 1.0, 1.0], arr123])
+        ordersX = Array("ordersX", [1, 1])
+        ordersY = Array("ordersY", [1, 2, 0])
+        integrator = Integrator("integrator", mode="2d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    assert (integrator.outputs[0].data == [[1, 2, 0], [1, 5, 0]]).all()
+    assert (integrator.outputs[1].data == [[1, 5, 0], [1, 13, 0]]).all()
+
+
+def test_Integrator_04(debug_graph):
+    arr123 = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
+        arr2 = Array("array", [arr123, arr123])
+        weights = Array("weights", [[1.0, 1.0, 1.0], arr123])
+        ordersX = Array("ordersX", [0, 2])
+        ordersY = Array("ordersY", [1, 1, 1])
+        integrator = Integrator("integrator", mode="2d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    assert (integrator.outputs[0].data == [[0, 0, 0], [2, 3, 4]]).all()
+    assert (integrator.outputs[1].data == [[0, 0, 0], [2, 6, 12]]).all()
+
+
+def test_Integrator_05(debug_graph):
+    unity = [1.0, 1.0, 1.0]
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array(
+            "array", [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
+        )
+        arr2 = Array("array", [unity, unity, unity])
+        weights = Array("weights", [unity, unity, unity])
+        ordersX = Array("ordersX", [1, 1, 1])
+        ordersY = Array("ordersY", [1, 0, 2])
+        integrator = Integrator("integrator", mode="2d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    assert (
+        integrator.outputs[0].data == [[1, 0, 0], [0, 0, 1], [0, 0, 1]]
+    ).all()
+    assert (
+        integrator.outputs[1].data == [[1, 0, 2], [1, 0, 2], [1, 0, 2]]
+    ).all()
+
+
+# test wrong ordersX: sum(ordersX) != shape
+def test_Integrator_06(debug_graph):
+    arr = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph):
+        arr1 = Array("array", arr)
+        weights = Array("weights", arr)
+        ordersX = Array("ordersX", [1, 2, 3])
+        integrator = Integrator("integrator", mode="1d")
+        arr1 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+    with raises(TypeFunctionError):
+        integrator.close()
+
+
+# test wrong ordersX: sum(ordersX[i]) != shape[i]
+def test_Integrator_07(debug_graph):
+    arr = [1.0, 2.0, 3.0]
+    with Graph(debug=debug_graph):
+        arr1 = Array("array", [arr, arr])
+        weights = Array("weights", [arr, arr])
+        ordersX = Array("ordersX", [1, 3])
+        ordersY = Array("ordersY", [1, 0, 0])
+        integrator = Integrator("integrator", mode="2d")
+        arr1 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    with raises(TypeFunctionError):
+        integrator.close()
+
+
+# test wrong shape
+def test_Integrator_08(debug_graph):
+    with Graph(debug=debug_graph, close=False):
+        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
+        arr2 = Array("array", [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]])
+        weights = Array("weights", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
+        ordersX = Array("ordersX", [0, 2])
+        ordersY = Array("ordersY", [1, 1, 1, 3])
+        integrator = Integrator("integrator", mode="2d")
+        arr1 >> integrator
+        arr2 >> integrator
+        weights >> integrator("weights")
+        ordersX >> integrator("ordersX")
+        ordersY >> integrator("ordersY")
+    with raises(TypeFunctionError):
+        integrator.close()
diff --git a/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars.py b/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars.py
new file mode 100644
index 0000000000000000000000000000000000000000..234b3e233d4d7b7d22b88d43349753bfc96d4f98
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+
+from numpy import arange
+from dagflow.exception import TypeFunctionError
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.lib.Array import Array
+from dagflow.lib.NormalizeCorrelatedVars import NormalizeCorrelatedVars
+from dagflow.lib.Cholesky import Cholesky
+
+from numpy import array, arange, allclose, sqrt
+from scipy.linalg import solve_triangular, cholesky
+
+import pytest
+from pytest import raises
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_NormalizeCorrelatedVars_00(dtype):
+    inCentral = arange(3.0, dtype=dtype)*100.0
+    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
+    inD = inV.diagonal()
+    inL = cholesky(inV, lower=True)
+    inLd = sqrt(inD)
+    inOffset = array((-10.0, 20.0, 30.0), dtype=dtype)
+    inVec = inCentral + inOffset
+    with Graph(close=True) as graph:
+        matrix = Array('matrix', inV)
+        diag = Array('diag', inD)
+        Lmatrix = Cholesky('cholesky 1d')
+        Ldiag = Cholesky('cholesky 2d')
+        central = Array('central', inCentral)
+        vec = Array('vec', inVec)
+        norm1d_fwd = NormalizeCorrelatedVars('norm1d fwd')
+        norm2d_fwd = NormalizeCorrelatedVars('norm2d fwd')
+
+        norm1d_bwd = NormalizeCorrelatedVars('norm1d bwd', mode='backward')
+        norm2d_bwd = NormalizeCorrelatedVars('norm2d bwd', mode='backward')
+
+        central >> norm1d_fwd.inputs['central']
+        central >> norm2d_fwd.inputs['central']
+        central >> norm1d_bwd.inputs['central']
+        central >> norm2d_bwd.inputs['central']
+
+        matrix >> Lmatrix
+        diag   >> Ldiag
+
+        Lmatrix >> norm2d_fwd.inputs['matrix']
+        Ldiag   >> norm1d_fwd.inputs['matrix']
+        Lmatrix >> norm2d_bwd.inputs['matrix']
+        Ldiag   >> norm1d_bwd.inputs['matrix']
+
+        vec >> norm1d_fwd >> norm1d_bwd
+        vec >> norm2d_fwd >> norm2d_bwd
+
+    nodes = (
+        matrix, diag,
+        Lmatrix, Ldiag,
+        central, vec,
+        norm1d_fwd, norm2d_fwd,
+        norm1d_bwd, norm2d_bwd,
+    )
+
+    assert all(node.tainted==True for node in nodes)
+    back_matrix = norm2d_bwd.get_data(0)
+    back_diag = norm1d_bwd.get_data(0)
+
+    assert all(node.tainted==False for node in nodes)
+
+    result_matrix = norm2d_fwd.get_data(0)
+    result_diag = norm1d_fwd.get_data(0)
+
+    norm1 = solve_triangular(inL, inOffset, lower=True)
+    norm2 = inOffset/inLd
+
+    if debug:
+        print('V:', inV)
+        print('Vdiag:', inD)
+        print('L:', inL)
+        print('Ldiag:', inLd)
+        print('Central:', inCentral)
+        print('In:', inVec)
+        print('Offset:', inOffset)
+        print('Norm 1:', norm1)
+        print('Norm 2:', norm2)
+        print('Rec 1:', back_matrix)
+        print('Rec 2:', back_diag)
+        print('Diff 1:', inVec-back_matrix)
+        print('Diff 2:', inVec-back_diag)
+
+    assert allclose(norm1, result_matrix, atol=0, rtol=0)
+    assert allclose(norm2, result_diag, atol=0, rtol=0)
+    assert allclose(inVec, back_matrix, atol=1.e-14, rtol=0)
+    assert allclose(inVec, back_diag, atol=0, rtol=0)
+
+    savegraph(graph, f"output/test_NormalizeCorrelatedVars_00_{dtype}.png")
+
+def test_NormalizeCorrelatedVars_01(dtype='d'):
+    inVec = arange(4.0, dtype=dtype)*100.0
+    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
+    inD = inV.diagonal()
+    with Graph() as graph1:
+        diag = Array('diag', inD)
+        vec = Array('vec', inVec)
+        norm1d_fwd = NormalizeCorrelatedVars('norm1d fwd')
+
+        vec  >> norm1d_fwd.inputs['central']
+        diag >> norm1d_fwd.inputs['matrix']
+
+    with Graph() as graph2:
+        matrix = Array('matrix', inV)
+        vec = Array('vec', inVec)
+        norm2d_fwd = NormalizeCorrelatedVars('norm2d fwd')
+
+        vec >> norm2d_fwd.inputs['central']
+        matrix >> norm2d_fwd.inputs['matrix']
+
+    with raises(TypeFunctionError):
+        graph1.close()
+
+    with raises(TypeFunctionError):
+        graph2.close()
+
diff --git a/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars2.py b/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars2.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5ee5172762873563c125d991fb7d4c53eec3aed
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars2.py
@@ -0,0 +1,254 @@
+#!/usr/bin/env python
+
+from numpy import arange
+from dagflow.exception import TypeFunctionError
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.lib.Array import Array
+from dagflow.lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
+from dagflow.lib.Cholesky import Cholesky
+
+from numpy import array, arange, allclose, sqrt, full_like, zeros_like, ones_like, finfo
+from scipy.linalg import solve_triangular, cholesky
+
+import pytest
+from pytest import raises
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_NormalizeCorrelatedVars2_00(dtype):
+    fp_tolerance = finfo(dtype).resolution*2
+
+    inCentral = arange(3.0, dtype=dtype)*100.0
+    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
+    inD = inV.diagonal()
+    inL = cholesky(inV, lower=True)
+    inLd = sqrt(inD)
+    inOffset = array((-10.0, 20.0, 30.0), dtype=dtype)
+    inVec = inCentral + inOffset
+    inNorm = full_like(inVec, -100)
+    with Graph(close=True) as graph:
+        var_matrix = Array('var_matrix', inV)
+        var_diag = Array('var_diag', inD)
+        Lmatrix = Cholesky('cholesky 1d')
+        Ldiag = Cholesky('cholesky 2d')
+        central = Array('central', inCentral)
+        value1d = Array('vec 1d', inVec, mode='store_weak')
+        normvalue1d = Array('normvalue 1d', inNorm, mode='store_weak')
+        value2d = Array('vec 2d', inVec, mode='store_weak')
+        normvalue2d = Array('normvalue 2d', inNorm, mode='store_weak')
+        norm1d = NormalizeCorrelatedVars2('norm1d')
+        norm2d = NormalizeCorrelatedVars2('norm2d')
+
+        central >> norm1d.inputs['central']
+        central >> norm2d.inputs['central']
+
+        var_matrix >> Lmatrix
+        var_diag   >> Ldiag
+
+        Lmatrix >> norm2d.inputs['matrix']
+        Ldiag   >> norm1d.inputs['matrix']
+
+        (value1d, normvalue1d) >> norm1d
+        (value2d, normvalue2d) >> norm2d
+
+
+    nodes = (
+        var_matrix, var_diag,
+        Lmatrix, Ldiag,
+        central,
+        value1d, normvalue1d,
+        value1d, normvalue2d,
+        norm1d, norm2d,
+    )
+
+    assert all(node.tainted==True for node in nodes)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+
+    assert all(node.tainted==False for node in nodes)
+    assert all(inNorm!=back_matrix)
+    assert all(inNorm!=back_diag)
+
+    norm1 = solve_triangular(inL, inOffset, lower=True)
+    norm2 = inOffset/inLd
+
+    if debug:
+        print('V:', inV)
+        print('Vdiag:', inD)
+        print('L:', inL)
+        print('Ldiag:', inLd)
+        print('Central:', inCentral)
+        print('In:', inVec)
+        print('Offset:', inOffset)
+        print('Norm 1:', norm1)
+        print('Norm 2:', norm2)
+        print('Rec 1:', back_matrix)
+        print('Rec 2:', back_diag)
+        print('Diff 1:', inVec-back_matrix)
+        print('Diff 2:', inVec-back_diag)
+
+    assert allclose(norm1, norm_matrix, atol=0, rtol=0)
+    assert allclose(norm2, norm_diag, atol=0, rtol=0)
+    assert allclose(inVec, back_matrix, atol=0, rtol=0)
+    assert allclose(inVec, back_diag, atol=0, rtol=0)
+
+    #
+    # Set norm value
+    #
+    inZeros = zeros_like(inVec)
+    normvalue1d.set(inZeros)
+    normvalue2d.set(inZeros)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    assert allclose(inZeros, norm_matrix, atol=0, rtol=0)
+    assert allclose(inZeros, norm_diag, atol=0, rtol=0)
+    assert allclose(inCentral, back_matrix, atol=0, rtol=0)
+    assert allclose(inCentral, back_diag, atol=0, rtol=0)
+
+    #
+    # Set normvalue
+    #
+    inOnes = ones_like(inVec)
+    normvalue1d.set(inOnes)
+    normvalue2d.set(inOnes)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    checkDiagOnes = inCentral + inLd
+    checkMatrixOnes = inCentral + inL@inOnes
+    assert allclose(inOnes, norm_matrix, atol=0, rtol=0)
+    assert allclose(inOnes, norm_diag, atol=0, rtol=0)
+    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
+    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
+
+    #
+    # Set value (with immediate flag)
+    #
+    norm2d._immediate = True
+    norm1d._immediate = True
+    value1d.set(inCentral)
+    value2d.set(inCentral)
+    norm_matrix = norm2d.outputs[1]._data
+    norm_diag = norm1d.outputs[1]._data
+    back_matrix = norm2d.outputs[0]._data
+    back_diag = norm1d.outputs[0]._data
+    assert allclose(inZeros, norm_matrix, atol=0, rtol=0)
+    assert allclose(inZeros, norm_diag, atol=0, rtol=0)
+    assert allclose(inCentral, back_matrix, atol=0, rtol=0)
+    assert allclose(inCentral, back_diag, atol=0, rtol=0)
+
+    #
+    # Set value (with immediate flag)
+    #
+    norm2d._immediate = True
+    norm1d._immediate = True
+    normvalue1d.set(inOnes)
+    normvalue2d.set(inOnes)
+    norm_matrix = norm2d.outputs[1]._data
+    norm_diag = norm1d.outputs[1]._data
+    back_matrix = norm2d.outputs[0]._data
+    back_diag = norm1d.outputs[0]._data
+    assert allclose(inOnes, norm_matrix, atol=0, rtol=0)
+    assert allclose(inOnes, norm_diag, atol=0, rtol=0)
+    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
+    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
+
+    #
+    # Set central
+    #
+    norm2d._immediate = False
+    norm1d._immediate = False
+    central.set(-inOnes)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    assert all(norm_matrix!=inOnes)
+    assert all(norm_matrix!=inOnes)
+    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
+    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
+
+    #
+    # Revert central
+    #
+    central.set(inCentral)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    assert allclose(inOnes, norm_matrix, atol=fp_tolerance, rtol=0)
+    assert allclose(inOnes, norm_diag, atol=fp_tolerance, rtol=0)
+    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
+    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
+
+    #
+    # Set sigma
+    #
+    var_matrix.set(inV*2)
+    var_diag.set(inD*2)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    assert all(norm_matrix!=inOnes)
+    assert all(norm_matrix!=inOnes)
+    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
+    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
+
+    #
+    # Revert sigma
+    #
+    var_matrix.set(inV)
+    var_diag.set(inD)
+    back_matrix = norm2d.get_data(0)
+    back_diag = norm1d.get_data(0)
+    norm_matrix = norm2d.get_data(1)
+    norm_diag = norm1d.get_data(1)
+    assert allclose(inOnes, norm_matrix, atol=fp_tolerance, rtol=0)
+    assert allclose(inOnes, norm_diag, atol=fp_tolerance, rtol=0)
+    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
+    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
+
+    savegraph(graph, f"output/test_NormalizeCorrelatedVars2_00_{dtype}.png", show=['all'])
+
+def test_NormalizeCorrelatedVars2_01(dtype='d'):
+    inVec = arange(4.0, dtype=dtype)*100.0
+    inNorm = full_like(inVec, -100)
+    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
+    inD = inV.diagonal()
+    with Graph() as graph1:
+        var_diag = Array('var_diag', inD)
+        vec = Array('vec', inVec, mode='store_weak')
+        nvec = Array('vec', inNorm, mode='store_weak')
+        norm1d = NormalizeCorrelatedVars2('norm1d')
+
+        vec  >> norm1d.inputs['central']
+        var_diag >> norm1d.inputs['matrix']
+
+        (vec, nvec) >> norm1d
+
+    with Graph() as graph2:
+        var_matrix = Array('var_matrix', inV)
+        vec = Array('vec', inVec, mode='store_weak')
+        nvec = Array('vec', inNorm, mode='store_weak')
+        norm2d = NormalizeCorrelatedVars2('norm2d')
+
+        vec >> norm2d.inputs['central']
+        var_matrix >> norm2d.inputs['matrix']
+
+        (vec, nvec) >> norm2d
+
+    with raises(TypeFunctionError):
+        graph1.close()
+
+    with raises(TypeFunctionError):
+        graph2.close()
+
diff --git a/subtrees/dagflow/test/nodes/test_Sum.py b/subtrees/dagflow/test/nodes/test_Sum.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ab8897d58c6beda0f9679dc2f6a9279ac022ab4
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_Sum.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.Sum import Sum
+from dagflow.graphviz import savegraph
+
+from numpy import arange, sum
+import pytest
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_Sum_01(dtype):
+    arrays_in = tuple(arange(12, dtype=dtype)*i for i in (1, 2, 3))
+
+    with Graph(close=True) as graph:
+        arrays = tuple(Array('test', array_in) for array_in in arrays_in)
+        sm = Sum('sum')
+        arrays >> sm
+
+    output = sm.outputs[0]
+
+    res = sum(arrays_in, axis=0)
+
+    assert sm.tainted==True
+    assert all(output.data==res)
+    assert sm.tainted==False
+
+    arrays_in = (arrays_in[1],) + arrays_in[1:]
+    res = sum(arrays_in, axis=0)
+    assert arrays[0].set(arrays[1].get_data())
+    assert sm.tainted==True
+    assert all(output.data==res)
+    assert sm.tainted==False
+
+    savegraph(graph, f"output/test_sum_00_{dtype}.png")
diff --git a/subtrees/dagflow/test/nodes/test_SumMatOrDiag.py b/subtrees/dagflow/test/nodes/test_SumMatOrDiag.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b8bacfe276436e5d3730ab936987c00117b7543
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_SumMatOrDiag.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.SumMatOrDiag import SumMatOrDiag
+from dagflow.graphviz import savegraph
+from dagflow.exception import TypeFunctionError
+
+from numpy import arange, diag, allclose
+import pytest
+from pytest import raises
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_SumMatOrDiag_01(dtype):
+    for size in (5, 4):
+        array1  = arange(size, dtype=dtype)+1.0
+        array2  = arange(size, dtype=dtype)*3
+        matrix1 = arange(size*size, dtype=dtype).reshape(size, size)+1.0
+        matrix2 = arange(size*size, dtype=dtype).reshape(size, size)*2.5
+        arrays_in = (array1, array2, matrix1, matrix2)
+
+        combinations = ((0,), (2,), (0, 1), (0, 2), (2, 0), (0, 1, 2), (2, 3), (0, 1, 2, 3))
+
+        sms = []
+
+        with Graph(close=True) as graph:
+            arrays = tuple(Array(f'test {i}', array_in) for i, array_in in enumerate(arrays_in))
+
+            for cmb in combinations:
+                sm = SumMatOrDiag(f'sum {cmb}')
+                tuple(arrays[i] for i in cmb) >> sm
+                sms.append(sm)
+
+        for cmb, sm in zip(combinations, sms):
+            res = 0.0
+            all1d = True
+            for i in cmb:
+                array_in = arrays_in[i]
+                if len(array_in.shape)==1:
+                    array_in = diag(array_in)
+                else:
+                    all1d = False
+                res += array_in
+
+            if all1d:
+                res = diag(res)
+
+            assert sm.tainted==True
+            output = sm.outputs[0]
+            assert allclose(output.data, res, rtol=0, atol=0)
+            assert sm.tainted==False
+
+        savegraph(graph, f"output/test_SumMatOrDiag_00_{dtype}_{size}.png", show='all')
+
+def test_SumMatOrDiag_02(dtype='d'):
+    size = 5
+    in_array1  = arange(size, dtype=dtype)                                      # 0
+    in_array2  = arange(size+1, dtype=dtype)                                    # 1
+    in_matrix1 = arange(size*size, dtype=dtype).reshape(size, size)             # 2
+    in_matrix2 = arange(size*(size+1), dtype=dtype).reshape(size, size+1)       # 3
+    in_matrix3 = arange((size+1)*(size+1), dtype=dtype).reshape(size+1, size+1) # 4
+    arrays_in = (in_array1, in_array2, in_matrix1, in_matrix2, in_matrix3)
+
+    combinations = (
+            (0, 1), (0, 3), (0, 4),
+            (3, 0), (4, 0),
+            (2, 3), (2, 4)
+            )
+    with Graph(close=False):
+        arrays = tuple(Array(f'test {i}', array_in) for i, array_in in enumerate(arrays_in))
+
+        for i1, i2 in combinations:
+            sm = SumMatOrDiag(f'sum')
+            (arrays[i1], arrays[i2]) >> sm
+
+            with raises(TypeFunctionError):
+                sm.close()
+
diff --git a/subtrees/dagflow/test/nodes/test_SumSq.py b/subtrees/dagflow/test/nodes/test_SumSq.py
new file mode 100644
index 0000000000000000000000000000000000000000..d70a48113e2f57d0ceca9f028c669c36f1cfd8b0
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_SumSq.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.lib.Array import Array
+from dagflow.lib.SumSq import SumSq
+from dagflow.graphviz import savegraph
+
+from numpy import arange, sum
+import pytest
+
+debug = False
+
+@pytest.mark.parametrize('dtype', ('d', 'f'))
+def test_SumSq_01(dtype):
+    arrays_in = tuple(arange(12, dtype=dtype)*i for i in (1, 2, 3))
+    arrays2_in = tuple(a**2 for a in arrays_in)
+
+    with Graph(close=True) as graph:
+        arrays = tuple(Array('test', array_in) for array_in in arrays_in)
+        sm = SumSq('sumsq')
+        arrays >> sm
+
+    output = sm.outputs[0]
+
+    res = sum(arrays2_in, axis=0)
+
+    assert sm.tainted==True
+    assert all(output.data==res)
+    assert sm.tainted==False
+
+    arrays2_in = (arrays2_in[1],) + arrays2_in[1:]
+    res = sum(arrays2_in, axis=0)
+    assert arrays[0].set(arrays[1].get_data())
+    assert sm.tainted==True
+    assert all(output.data==res)
+    assert sm.tainted==False
+
+    savegraph(graph, f"output/test_SumSq_00_{dtype}.png")
diff --git a/subtrees/dagflow/test/nodes/test_View.py b/subtrees/dagflow/test/nodes/test_View.py
new file mode 100644
index 0000000000000000000000000000000000000000..6150da820ac9fe1271e50bab0b91c47c6fca3b10
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_View.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+
+from numpy import arange
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.lib.View import View
+from dagflow.lib.Array import Array
+
+debug = False
+
+def test_View_00():
+    """Create four nodes: sum up three of them, multiply the result by the fourth
+    Use graph context to create the graph.
+    Use one-line code for connecting the nodes
+    """
+    array = arange(5.0)
+    with Graph(close=True) as graph:
+        initial = Array('array', array)
+        view = View("view")
+        view2 = View("view2")
+
+        initial >> view >> view2
+
+    assert initial.tainted==True
+    assert view.tainted==True
+    assert view2.tainted==True
+
+    result = view.get_data()
+    result2 = view2.get_data()
+    assert (result==array).all()
+    assert (result2==array).all()
+    assert view.tainted==False
+    assert view2.tainted==False
+    assert initial.tainted==False
+
+    d1=initial.outputs[0]._data
+    d2=view.outputs[0]._data
+    d3=view2.outputs[0]._data
+    assert (d1==d2).all()
+    assert (d1==d3).all()
+    d1[:]=-1
+    assert (d2==-1).all()
+    assert (d3==-1).all()
+
+    initial.taint()
+    assert initial.tainted==True
+    assert view.tainted==True
+    assert view2.tainted==True
+
+    view2.touch()
+    savegraph(graph, "output/test_View_00.png")
diff --git a/subtrees/dagflow/test/nodes/test_ViewConcat.py b/subtrees/dagflow/test/nodes/test_ViewConcat.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9703cb99dae4f80ee9169687e4d722ba3de7472
--- /dev/null
+++ b/subtrees/dagflow/test/nodes/test_ViewConcat.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+
+from pytest import raises
+import numpy as np
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.lib.ViewConcat import ViewConcat
+from dagflow.lib.View import View
+from dagflow.lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
+from dagflow.lib.Array import Array
+from dagflow.exception import ConnectionError
+
+import pytest
+
+debug = False
+
+@pytest.mark.parametrize('closemode', ['graph', 'recursive'])
+def test_ViewConcat_00(closemode):
+    """Create four nodes: sum up three of them, multiply the result by the fourth
+    Use graph context to create the graph.
+    Use one-line code for connecting the nodes
+    """
+    closegraph = closemode=='graph'
+
+    array1 = np.arange(5.0)
+    array2 = np.ones(shape=10, dtype='d')
+    array3 = np.zeros(shape=12, dtype='d')-1
+    array = np.concatenate((array1, array2, array3))
+    arrays = (array1, array2, array3)
+    n1, n2, _ = (a.size for a in arrays)
+    with Graph(debug=debug, close=closegraph) as graph:
+        inputs = [Array('array', array, mode='fill') for array in arrays]
+        concat = ViewConcat("concat")
+        view = View("view")
+
+        inputs >> concat >> view
+
+    if not closegraph:
+        view.close()
+
+    graph.print()
+
+    assert all(initial.tainted==True for initial in inputs)
+    assert concat.tainted==True
+    assert view.tainted==True
+
+    result = concat.get_data()
+    result_view = view.get_data()
+    assert (result==array).all()
+    assert (result_view==array).all()
+    assert concat.tainted==False
+    assert view.tainted==False
+    assert all(i.tainted==False for i in inputs)
+
+    data1, data2, data3 = (i.get_data(0) for i in inputs)
+    datac = concat.get_data(0)
+    datav = view.get_data(0)
+    assert all(data1==datac[:data1.size])
+    assert all(data2==datac[n1:n1+data2.size])
+    assert all(data3==datac[n1+n2:n1+n2+data3.size])
+
+    data1[2]=-1
+    data2[:]=-1
+    data3[::2]=-2
+    assert all(data1==datac[:data1.size])
+    assert all(data2==datac[n1:n1+data2.size])
+    assert all(data3==datac[n1+n2:n1+n2+data3.size])
+    assert all(data1==datav[:data1.size])
+    assert all(data2==datav[n1:n1+data2.size])
+    assert all(data3==datav[n1+n2:n1+n2+data3.size])
+
+    inputs[1].taint()
+    assert concat.tainted==True
+    assert view.tainted==True
+
+    view.touch()
+    savegraph(graph, "output/test_ViewConcat_00.png")
+
+def test_ViewConcat_01():
+    with Graph() as graph:
+        concat = ViewConcat("concat")
+        concat2 = ViewConcat("concat 2")
+        view = View('view')
+        normnode = NormalizeCorrelatedVars2('normvars')
+
+        with raises(ConnectionError):
+            view >> concat
+
+        with raises(ConnectionError):
+            normnode.outputs[0] >> concat
+
+        with raises(ConnectionError):
+            concat >> normnode.inputs[0]
+
+        with raises(ConnectionError):
+            concat >> concat2
+
+    savegraph(graph, "output/test_ViewConcat_01.png")
diff --git a/subtrees/dagflow/test/test_class.py b/subtrees/dagflow/test/test_class.py
new file mode 100755
index 0000000000000000000000000000000000000000..041591dba0fe942c362eaf1ec40e0d91ab105936
--- /dev/null
+++ b/subtrees/dagflow/test/test_class.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+
+from numpy import arange
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.lib.Array import Array
+from dagflow.lib.Product import Product
+from dagflow.lib.Sum import Sum
+from dagflow.printl import current_level, printl, set_prefix_function
+from dagflow.wrappers import *
+
+set_prefix_function(lambda: "{:<2d} ".format(current_level()))
+debug = False
+
+
+def test_00():
+    """Create four nodes: sum up three of them, multiply the result by the fourth
+    Use graph context to create the graph.
+    Use one-line code for connecting the nodes
+    """
+    array = arange(5)
+    names = "n1", "n2", "n3", "n4"
+    with Graph(debug=debug) as graph:
+        initials = [Array(name, array) for name in names]
+        s = Sum("add")
+        m = Product("mul")
+
+    (initials[3], (initials[:3] >> s)) >> m
+
+    graph._wrap_fcns(dataprinter, printer)
+    graph.close()
+
+    s.print()
+    m.print()
+
+    result = m.outputs["result"].data
+    printl(result)
+
+    savegraph(graph, "output/class_00.pdf")
diff --git a/subtrees/dagflow/test/test_close_open.py b/subtrees/dagflow/test/test_close_open.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcb936ff95f05659d6ee8370e06426fdc11d78f0
--- /dev/null
+++ b/subtrees/dagflow/test/test_close_open.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.lib import Array, Product, Sum, WeightedSum
+from numpy import arange, array
+
+
+def test_00(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr = Array("arr", arange(3, dtype="d"))  # [0, 1, 2]
+        ws = WeightedSum("weightedsum")
+        (arr, arr) >> ws
+        Array("weight", (2, 3)) >> ws("weight")
+    assert ws.closed
+    assert (ws.outputs["result"].data == [0, 5, 10]).all()
+    assert arr.open()
+    assert not ws.inputs["weight"].closed
+    assert not arr.closed
+
+
+def test_01(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
+        arr2 = Array("arr2", array((3, 2, 1), dtype="d"))
+        sum = Sum("sum")
+        (arr1, arr2) >> sum
+    assert sum.closed
+    assert (sum.outputs["result"].data == [3, 3, 3]).all()
+    assert sum.open()
+    assert all((not sum.closed, arr1.closed, arr2.closed))
+    assert arr1.open()
+    assert all((not sum.closed, not arr1.closed, arr2.closed))
+    assert arr2.open()
+    assert all((not sum.closed, not arr1.closed, not arr2.closed))
+
+
+def test_02(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr1 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
+        arr2 = Array("arr2", array((3, 2, 1), dtype="d"))
+        arr3 = Array("unity", array((1, 1, 1), dtype="d"))
+        sum1 = Sum("sum1")
+        sum2 = Sum("sum2")
+        prod = Product("product")
+        (arr1, arr2, arr3) >> sum1  # [4, 4, 4]
+        (arr3, sum1) >> prod  # [4, 4, 4]
+        (arr1, prod) >> sum2  # [4, 5, 6]
+    assert sum2.closed
+    assert (sum2.outputs["result"].data == [4, 5, 6]).all()
+    assert arr1.open()
+    assert arr2.closed
+    assert arr3.closed
+    assert not arr1.closed
+    assert not prod.closed
+    assert not sum1.closed
+
+
+def test_03(debug_graph):
+    with Graph(debug=debug_graph, close=False):
+        arr1 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
+        arr2 = Array("arr2", array((3, 2, 1), dtype="d"))
+        arr3 = Array("unity", array((1, 1, 1), dtype="d"))
+        sum1 = Sum("sum1")
+        sum2 = Sum("sum2")
+        prod = Product("product")
+        (arr1, arr2, arr3) >> sum1  # [4, 4, 4]
+        (arr3, sum1) >> prod  # [4, 4, 4]
+        (arr1, prod) >> sum2  # [4, 5, 6]
+
+    with Graph(debug=debug_graph, close=True):
+        arr4 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
+        sum3 = Sum("sum3")
+        (sum2, arr4) >> sum3  # [4, 7, 8]
+    assert arr1.closed
+    assert arr2.closed
+    assert arr3.closed
+    assert arr4.closed
+    assert sum2.closed
+    assert sum3.closed
+    assert (sum3.outputs["result"].data == [4, 6, 8]).all()
+    assert arr1.open()
+    assert arr2.closed
+    assert arr3.closed
+    assert arr4.closed
+    assert not arr1.closed
+    assert not prod.closed
+    assert not sum1.closed
+    assert not sum2.closed
diff --git a/subtrees/dagflow/test/test_connection.py b/subtrees/dagflow/test/test_connection.py
new file mode 100755
index 0000000000000000000000000000000000000000..d23dfb49b91776e67f9c4fafa859727cf79ac39c
--- /dev/null
+++ b/subtrees/dagflow/test/test_connection.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python
+
+from dagflow.exception import ClosedGraphError, UnclosedGraphError
+from dagflow.graph import Graph
+from dagflow.input import Input
+from dagflow.nodes import FunctionNode
+from dagflow.output import Output
+from dagflow.wrappers import *
+from pytest import raises
+
+nodeargs = {"typefunc": lambda: True}
+
+
+def test_01():
+    i = Input("input", None)
+    o = Output("output", None)
+
+    o >> i
+
+
+def test_02():
+    n1 = FunctionNode("node1")
+    n2 = FunctionNode("node2")
+
+    n1.add_output("o1")
+    n1.add_output("o2")
+
+    n2.add_input("i1")
+    n2.add_input("i2")
+    n2.add_output("o1")
+
+    n1 >> n2
+
+
+def test_03():
+    n1 = FunctionNode("node1")
+    n2 = FunctionNode("node2")
+
+    out = n1.add_output("o1")
+
+    n2.add_input("i1")
+    n2.add_output("o1")
+
+    out >> n2
+
+
+def test_04():
+    n1 = FunctionNode("node1")
+    n2 = FunctionNode("node2")
+
+    out = n1.add_output("o1")
+
+    n2.add_pair("i1", "o1")
+
+    final = out >> n2
+
+
+def test_05():
+    n1 = FunctionNode("node1", **nodeargs)
+    n2 = FunctionNode("node2", **nodeargs)
+
+    out1 = n1.add_output("o1", allocatable=False)
+    out2 = n1.add_output("o2", allocatable=False)
+
+    _, final = n2.add_pair("i1", "o1", output_kws={"allocatable": False})
+    n2.add_input("i2")
+
+    (out1, out2) >> n2
+
+    n2.close()
+    assert n2.closed
+    assert n1.closed
+    with raises(ClosedGraphError):
+        n2.add_input("i3")
+    with raises(ClosedGraphError):
+        n1.add_output("o3")
+    final.data
+
+
+def test_06():
+    n1 = FunctionNode("node1", **nodeargs)
+    n2 = FunctionNode("node2", **nodeargs)
+
+    out1 = n1._add_output("o1", allocatable=False)
+    out2 = n1._add_output("o2", allocatable=False)
+
+    _, final = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
+    n2._add_input("i2")
+
+    (out1, out2) >> n2
+
+    n1.close(recursive=False)
+    assert n1.closed
+    assert not n2.closed
+    n2.close(recursive=False)
+    assert n2.closed
+    with raises(ClosedGraphError):
+        n2.add_input("i3")
+    with raises(ClosedGraphError):
+        n1.add_output("o3")
+    final.data
+
+
+def test_07():
+    g = Graph()
+    n1 = g.add_node("node1", **nodeargs)
+    n2 = g.add_node("node2", **nodeargs)
+    g._wrap_fcns(toucher, printer)
+
+    out1 = n1._add_output("o1", allocatable=False)
+    out2 = n1._add_output("o2", allocatable=False)
+
+    _, final = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
+    n2._add_input("i2")
+
+    (out1, out2) >> n2
+
+    with raises(UnclosedGraphError):
+        final.data
+    g.close()
+    with raises(ClosedGraphError):
+        n2.add_input("i3")
+    with raises(ClosedGraphError):
+        n1.add_output("o3")
+    final.data
+
+
+def test_08():
+    g = Graph()
+    n1 = g.add_node("node1", **nodeargs)
+    n2 = g.add_node("node2", **nodeargs)
+    n3 = g.add_node("node3", **nodeargs)
+    g._wrap_fcns(toucher, printer)
+
+    out1 = n1._add_output("o1", allocatable=False)
+    out2 = n1._add_output("o2", allocatable=False)
+
+    _, out3 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
+    n2._add_input("i2")
+
+    _, final = n3._add_pair("i1", "o1", output_kws={"allocatable": False})
+
+    (out1, out2) >> n2
+    out3 >> n3
+
+    with raises(UnclosedGraphError):
+        final.data
+    g.close()
+    with raises(ClosedGraphError):
+        n2.add_input("i3")
+    with raises(ClosedGraphError):
+        n1.add_output("o3")
+    with raises(ClosedGraphError):
+        n3.add_pair("i3", "o3")
+    final.data
+
+    print()
+    final.data
+
+    print("Taint n2")
+    n2.taint()
+    final.data
+
+    print("Taint n3")
+    n3.taint()
+    final.data
diff --git a/subtrees/dagflow/test/test_containers.py b/subtrees/dagflow/test/test_containers.py
new file mode 100755
index 0000000000000000000000000000000000000000..1e19a94a0bafd98c6d983f251266c49c22475585
--- /dev/null
+++ b/subtrees/dagflow/test/test_containers.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+import contextlib
+
+from dagflow.input import Input, Inputs
+from dagflow.legs import Legs
+from dagflow.output import Output
+
+
+def test_01():
+    inputs = Inputs()
+
+    input1 = Input("i1", None)
+    input2 = Input("i2", None)
+    input3 = Input("i3", None)
+
+    inputs.add( (input1, input2) )
+    inputs.add( input3 )
+
+    print(inputs)
+
+    print(inputs[0])
+    print(inputs[1])
+    print(inputs[2])
+
+    try:
+        print(inputs[3])
+    except IndexError:
+        pass
+    else:
+        raise RuntimeError("fail")
+
+    print(inputs["i1"])
+    print(inputs["i2"])
+    print(inputs[("i1", "i3")])
+
+    print(inputs["i1"])
+    print(inputs["i2"])
+    print(inputs["i3"])
+    with contextlib.suppress(KeyError):
+        print(inputs["i4"])
+
+
+def test_02():
+    inputs = Inputs()
+    print(inputs)
+
+    output1 = Output("o1", None)
+
+    try:
+        inputs.add( output1 )
+    except Exception:
+        pass
+    else:
+        raise RuntimeError("fail")
+
+
+def test_03():
+    print("test3")
+    input1 = Input("i1", None)
+    input2 = Input("i2", None)
+    input3 = Input("i3", None)
+
+    output1 = Output("o1", None)
+    output2 = Output("o2", None)
+
+    legs = Legs((input1, input2, input3), (output1, output2))
+    print(legs)
+    legs.print()
+    print()
+
+    legs1 = legs[None, "o1"]
+    print(legs1)
+    # legs1.print()
+    print()
+
+    legs2 = legs[:, "o1"]
+    print(legs2)
+    legs2.print()
+    print()
+
+    legs3 = legs[("i1", "i3"), "o1"]
+    print(legs3)
+    legs3.print()
+    print()
diff --git a/subtrees/dagflow/test/test_function_switch.py b/subtrees/dagflow/test/test_function_switch.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d700799e736fbcfeb99c77bec9a172375902dfa
--- /dev/null
+++ b/subtrees/dagflow/test/test_function_switch.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+
+from dagflow.graph import Graph
+from dagflow.input_extra import MissingInputAddOne
+from dagflow.lib import Array
+from dagflow.nodes import FunctionNode
+from numpy import arange, array, copyto, result_type
+
+
+class SumIntProductFloatElseNothing(FunctionNode):
+    def __init__(self, name, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(name, **kwargs)
+        self._functions.update(
+            {"int": self._fcn_int, "float": self._fcn_float}
+        )
+
+    def _fcn(self, _, inputs, outputs):
+        return outputs[0].data
+
+    def _fcn_int(self, _, inputs, outputs):
+        out = outputs[0].data
+        copyto(out, inputs[0].data.copy())
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                out += input.data
+        return out
+
+    def _fcn_float(self, _, inputs, outputs):
+        out = outputs[0].data
+        copyto(out, inputs[0].data.copy())
+        if len(inputs) > 1:
+            for input in inputs[1:]:
+                out *= input.data
+        return out
+
+    def _typefunc(self) -> bool:
+        if self.inputs[0].dd.dtype == "i":
+            self.fcn = self._functions.get("int")
+        elif self.inputs[0].dd.dtype == "d":
+            self.fcn = self._functions.get("float")
+        self.outputs["result"].dd.shape = self.inputs[0].dd.shape
+        self.outputs["result"].dd.dtype = result_type(
+            *tuple(inp.dd.dtype for inp in self.inputs)
+        )
+        self.logger.debug(
+            f"Node '{self.name}': dtype={self.outputs['result'].dd.dtype}, "
+            f"shape={self.outputs['result'].dd.shape}, function={self.fcn.__name__}"
+        )
+        return True
+
+
+def test_00(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr = Array("arr", array(("1", "2", "3")))
+        node = SumIntProductFloatElseNothing("node")
+        (arr, arr) >> node
+    assert (node.outputs["result"].data == ["", "", ""]).all()
+
+
+def test_01(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr = Array("arr", arange(3, dtype="i"))  # [0, 1, 2]
+        node = SumIntProductFloatElseNothing("node")
+        (arr, arr) >> node
+    assert (node.outputs["result"].data == [0, 2, 4]).all()
+
+
+def test_02(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr = Array("arr", arange(3, dtype="d"))  # [0, 1, 2]
+        node = SumIntProductFloatElseNothing("node")
+        (arr, arr) >> node
+    assert (node.outputs["result"].data == [0, 1, 4]).all()
diff --git a/subtrees/dagflow/test/test_graph.py b/subtrees/dagflow/test/test_graph.py
new file mode 100755
index 0000000000000000000000000000000000000000..eb070b2fde9fb230c6e78a37cd35fd20468ac5ed
--- /dev/null
+++ b/subtrees/dagflow/test/test_graph.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+from dagflow.graph import Graph
+from dagflow.graphviz import GraphDot
+from dagflow.printl import current_level, set_prefix_function
+from dagflow.wrappers import *
+
+set_prefix_function(
+    lambda: "{:<2d} ".format(current_level()),
+)
+nodeargs = dict(typefunc=lambda: True)
+
+
+def test_01():
+    """Simple test of the graph plotter"""
+    g = Graph()
+    n1 = g.add_node("node1", **nodeargs)
+    n2 = g.add_node("node2", **nodeargs)
+    n3 = g.add_node("node3", **nodeargs)
+    g._wrap_fcns(toucher, printer)
+
+    out1 = n1._add_output("o1", allocatable=False)
+    out2 = n1._add_output("o2", allocatable=False)
+
+    _, out3 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
+    n2._add_input("i2")
+    n3._add_pair("i1", "o1", output_kws={"allocatable": False})
+
+    print(f"{out1=}, {out2=}")
+    (out1, out2) >> n2
+    out3 >> n3
+    g.close()
+
+    d = GraphDot(g)
+    d.savegraph("output/test1_00.png")
+
+
+def test_02():
+    """Simple test of the graph plotter"""
+    g = Graph()
+    n1 = g.add_node("node1", **nodeargs)
+    n2 = g.add_node("node2", **nodeargs)
+    n3 = g.add_node("node3", **nodeargs)
+    g._wrap_fcns(toucher, printer)
+
+    out1 = n1._add_output("o1", allocatable=False)
+    out2 = n1._add_output("o2", allocatable=False)
+
+    _, out3 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
+    n2._add_input("i2")
+
+    _, final = n3._add_pair("i1", "o1", output_kws={"allocatable": False})
+
+    (out1, out2) >> n2
+    out3 >> n3
+    g.close()
+
+    d = GraphDot(g)
+    d.savegraph("output/test2_00.png")
+
+    final.data
+    d = GraphDot(g)
+    d.savegraph("output/test2_01.png")
+
+
+def test_02a():
+    """Simple test of the graph plotter"""
+    g = Graph()
+    n1 = g.add_node("node1", **nodeargs)
+    n2 = g.add_node("node2", **nodeargs)
+    n3 = g.add_node("node3", **nodeargs)
+    n4 = g.add_node("node4", **nodeargs)
+    g._wrap_fcns(toucher, printer)
+
+    out1 = n1._add_output("o1", allocatable=False)
+
+    in2, out2 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
+    in3, out3 = n3._add_pair("i1", "o1", output_kws={"allocatable": False})
+    in4, out4 = n4._add_pair("i1", "o1", output_kws={"allocatable": False})
+
+    out1.repeat() >> (in2, in3, in4)
+    g.close()
+
+    d = GraphDot(g)
+    d.savegraph("output/test2a_00.png")
+
+    print(out4.data)
+    d = GraphDot(g)
+    d.savegraph("output/test2a_01.png")
diff --git a/subtrees/dagflow/test/test_graph_big.py b/subtrees/dagflow/test/test_graph_big.py
new file mode 100755
index 0000000000000000000000000000000000000000..71a71ef43a02d28270d00a0e80a06908bd0af77c
--- /dev/null
+++ b/subtrees/dagflow/test/test_graph_big.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python
+from dagflow.graph import Graph
+from dagflow.graphviz import GraphDot
+from dagflow.printl import current_level, set_prefix_function
+from dagflow.wrappers import *
+
+set_prefix_function(lambda: "{:<2d} ".format(current_level()))
+
+counter = 0
+nodeargs = dict(typefunc=lambda: True)
+
+
+def test_graph_big_01():
+    """Create a graph of nodes and test evaluation features"""
+    g = Graph()
+    label = None
+
+    def plot(suffix=""):
+        global counter
+        d = GraphDot(g)
+        newlabel = label and label + suffix or suffix
+        if newlabel is not None:
+            d.set_label(newlabel)
+        d.savegraph("output/test_graph_big_{:03d}.png".format(counter))
+        counter += 1
+
+    def plotter(fcn, node, inputs, outputs):
+        plot(f"[start evaluating {node.name}]")
+        fcn(node, inputs, outputs)
+        plot(f"[done evaluating {node.name}]")
+
+    A1 = g.add_node("A1", **nodeargs)
+    A2 = g.add_node("A2", auto_freeze=True, label="{name}|frozen", **nodeargs)
+    A3 = g.add_node("A3", immediate=True, label="{name}|immediate", **nodeargs)
+    B = g.add_node("B", **nodeargs)
+    C1 = g.add_node("C1", **nodeargs)
+    C2 = g.add_node("C2", **nodeargs)
+    D = g.add_node("D", **nodeargs)
+    E = g.add_node("E", **nodeargs)
+    F = g.add_node("F", **nodeargs)
+    H = g.add_node("H", **nodeargs)
+    P = g.add_node("P", immediate=True, label="{name}|immediate", **nodeargs)
+
+    g._wrap_fcns(toucher, printer, plotter)
+
+    A1._add_output("o1", allocatable=False)
+    A2._add_output("o1", allocatable=False)
+    P._add_output("o1", allocatable=False)
+    A3._add_pair("i1", "o1", output_kws={"allocatable": False})
+    B._add_pair(
+        ("i1", "i2", "i3", "i4"),
+        ("o1", "o2"),
+        output_kws={"allocatable": False},
+    )
+    C1._add_output("o1", allocatable=False)
+    C2._add_output("o1", allocatable=False)
+    D._add_pair("i1", "o1", output_kws={"allocatable": False})
+    D._add_pair("i2", "o2", output_kws={"allocatable": False})
+    H._add_pair("i1", "o1", output_kws={"allocatable": False})
+    _, other = F._add_pair("i1", "o1", output_kws={"allocatable": False})
+    _, final = E._add_pair("i1", "o1", output_kws={"allocatable": False})
+
+    (A1, A2, (P >> A3), D[:1]) >> B >> (E, H)
+    ((C1, C2) >> D[:, 1]) >> F
+
+    g.print()
+    g.close()
+
+    label = "Initial graph state."
+    plot()
+
+    label = "Read E..."
+    plot()
+    plot()
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
+
+    label = "Taint D."
+    plot()
+    plot()
+    plot()
+    D.taint()
+    plot()
+    label = "Read F..."
+    other.data
+    label = "Done reading F."
+    plot()
+
+    label = "Read E..."
+    plot()
+    plot()
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
+
+    label = "Taint A2."
+    plot()
+    plot()
+    plot()
+    A2.taint()
+    plot()
+    label = "Read E..."
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
+
+    label = "Unfreeze A2 (tainted)."
+    plot()
+    plot()
+    plot()
+    A2.unfreeze()
+    plot()
+    label = "Read E..."
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
+
+    label = "Unfreeze A2 (not tainted)."
+    plot()
+    plot()
+    plot()
+    A2.unfreeze()
+    plot()
+    label = "Read E..."
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
+
+    label = "Taint P"
+    plot()
+    plot()
+    plot()
+    P.taint()
+    plot()
+    label = "Read E..."
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
+
+    label = "Invalidate P"
+    plot()
+    plot()
+    plot()
+    P.invalid = True
+    plot()
+
+    label = "Validate P"
+    plot()
+    plot()
+    plot()
+    P.invalid = False
+    plot()
+    label = "Read E..."
+    plot()
+    final.data
+    label = "Done reading E."
+    plot()
diff --git a/subtrees/dagflow/test/test_hooks.py b/subtrees/dagflow/test/test_hooks.py
new file mode 100755
index 0000000000000000000000000000000000000000..5678da9ecfb42add4173f9db008b85f064b5c378
--- /dev/null
+++ b/subtrees/dagflow/test/test_hooks.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+from numpy import arange, copyto, result_type
+from pytest import raises
+
+from dagflow.exception import (
+    CriticalError,
+    ReconnectionError,
+    UnclosedGraphError,
+)
+from dagflow.graph import Graph
+from dagflow.input_extra import MissingInputAddOne
+from dagflow.lib.Array import Array
+from dagflow.lib.WeightedSum import WeightedSum
+from dagflow.nodes import FunctionNode
+
+
+class ThreeInputsSum(FunctionNode):
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault(
+            "missing_input_handler", MissingInputAddOne(output_fmt="result")
+        )
+        super().__init__(*args, **kwargs)
+
+    def _fcn(self, _, inputs, outputs):
+        out = outputs["result"].data
+        copyto(out, inputs[0].data.copy())
+        for input in inputs[1:3]:
+            out += input.data
+        return out
+
+    def _typefunc(self) -> None:
+        """A output takes this function to determine the dtype and shape"""
+        if (y := len(self.inputs)) != 3:
+            raise CriticalError(
+                f"The node must have only 3 inputs, but given {y}: {self.inputs}!"
+            )
+        self.outputs["result"].dd.shape = self.inputs[0].dd.shape
+        self.outputs["result"].dd.dtype = result_type(
+            *tuple(inp.dd.dtype for inp in self.inputs)
+        )
+        self.logger.debug(
+            f"Node '{self.name}': dtype={self.outputs['result'].dd.dtype}, "
+            f"shape={self.outputs['result'].dd.shape}"
+        )
+
+
+def test_00(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr = Array("arr", arange(3, dtype="i"))  # [0, 1, 2]
+        node = ThreeInputsSum("threesum")
+        for _ in range(3):
+            # Error while evaluating before len(inputs) == 3
+            with raises(UnclosedGraphError):
+                node.eval()
+            arr >> node
+    assert (node.outputs["result"].data == [0, 3, 6]).all()
+
+
+def test_01(debug_graph):
+    with Graph(debug=debug_graph, close=True):
+        arr = Array("arr", arange(3, dtype="i"))  # [0, 1, 2]
+        ws = WeightedSum("weightedsum")
+        (arr, arr) >> ws
+        # Error while eval before setting the weight input
+        with raises(UnclosedGraphError):
+            ws.eval()
+        # multiply the first input by 2 and the second one by 3
+        Array("weight", (2, 3)) >> ws("weight")
+    with raises(ReconnectionError):
+        Array("weight", (2, 3)) >> ws("weight")
+    assert (ws.outputs["result"].data == [0, 5, 10]).all()
diff --git a/subtrees/dagflow/test/test_input_handler.py b/subtrees/dagflow/test/test_input_handler.py
new file mode 100755
index 0000000000000000000000000000000000000000..d8a763aa96e2e858fcb7ddea637861dc027c8b31
--- /dev/null
+++ b/subtrees/dagflow/test/test_input_handler.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+"""Test missing input handlers"""
+
+from contextlib import suppress
+
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.input_extra import *
+from dagflow.wrappers import *
+
+nodeargs = dict(typefunc=lambda: True)
+
+
+def test_00():
+    """Test default handler: fail on connect"""
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add", missing_input_handler=MissingInputFail, **nodeargs
+    )
+    graph.close()
+
+    with suppress(Exception):
+        (in1, in2, in3) >> s
+    savegraph(
+        graph, "output/missing_input_handler_00.pdf", label="Fail on connect"
+    )
+
+
+def test_01():
+    """Test InputAdd handler: add new input on each new connect"""
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add",
+        missing_input_handler=MissingInputAdd(
+            output_kws={"allocatable": False}
+        ),
+        **nodeargs
+    )
+
+    (in1, in2, in3) >> s
+    in4 >> s
+
+    print()
+    print("test 01")
+    s.print()
+    graph.close()
+
+    savegraph(
+        graph, "output/missing_input_handler_01.pdf", label="Add only inputs"
+    )
+
+
+def test_02():
+    """
+    Test InputAddPair handler: add new input on each new connect
+    and connect them as inputs to another input
+    """
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add",
+        missing_input_handler=MissingInputAddPair(
+            output_kws={"allocatable": False}
+        ),
+        **nodeargs
+    )
+
+    (in1, in2, in3) >> s
+    in4 >> s
+
+    print()
+    print("test 02")
+    s.print()
+
+    for input, output in zip(s.inputs, s.outputs):
+        assert input.child_output is output
+    graph.close()
+
+    savegraph(
+        graph,
+        "output/missing_input_handler_02.pdf",
+        label="Add inputs and an output for each input",
+    )
+
+
+def test_03():
+    """
+    Test InputAddOne handler: add new input on each new connect and
+    add an output if needed
+    """
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add",
+        missing_input_handler=MissingInputAddOne(
+            output_kws={"allocatable": False}
+        ),
+        **nodeargs
+    )
+
+    (in1, in2, in3) >> s
+    in4 >> s
+
+    print()
+    print("test 03")
+    s.print()
+    graph.close()
+
+    savegraph(
+        graph,
+        "output/missing_input_handler_03.pdf",
+        label="Add only inputs and only one output",
+    )
+
+
+def test_04():
+    """
+    Test InputAddOne handler: add new input on each new connect and
+    add an output if needed.
+    This version also sets the input for each input
+    """
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add",
+        missing_input_handler=MissingInputAddOne(
+            add_child_output=True, output_kws={"allocatable": False}
+        ),
+        **nodeargs
+    )
+
+    (in1, in2, in3) >> s
+    in4 >> s
+
+    print()
+    print("test 04")
+    s.print()
+
+    output = s.outputs[0]
+    for input in s.inputs:
+        assert input.child_output is output
+    graph.close()
+
+    savegraph(
+        graph,
+        "output/missing_input_handler_04.pdf",
+        label="Add inputs and only one output",
+    )
+
+
+def test_05():
+    """
+    Test InputAddEach handler: add new input on each new connect and
+    add an output for each >> group
+    """
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add",
+        missing_input_handler=MissingInputAddEach(
+            add_child_output=False, output_kws={"allocatable": False}
+        ),
+        **nodeargs
+    )
+
+    (in1, in2, in3) >> s
+    in4 >> s
+
+    print()
+    print("test 05")
+    s.print()
+    graph.close()
+
+    savegraph(
+        graph,
+        "output/missing_input_handler_05.pdf",
+        label="Add inputs and an output for each block",
+    )
+
+
+def test_06():
+    """
+    Test InputAddEach handler: add new input on each new connect and
+    add an output for each >> group.
+    This version also sets the child_output for each input
+    """
+    graph = Graph()
+
+    in1 = graph.add_node("n1", **nodeargs)
+    in2 = graph.add_node("n2", **nodeargs)
+    in3 = graph.add_node("n3", **nodeargs)
+    in4 = graph.add_node("n4", **nodeargs)
+    for node in (in1, in2, in3, in4):
+        node.add_output("o1", allocatable=False)
+
+    s = graph.add_node(
+        "add",
+        missing_input_handler=MissingInputAddEach(
+            add_child_output=True, output_kws={"allocatable": False}
+        ),
+        **nodeargs
+    )
+
+    (in1, in2, in3) >> s
+    in4 >> s
+
+    print()
+    print("test 06")
+    s.print()
+
+    o1, o2 = s.outputs
+    for input in s.inputs[:3]:
+        assert input.child_output is o1
+    for input in s.inputs[3:]:
+        assert input.child_output is o2
+    graph.close()
+
+    savegraph(
+        graph,
+        "output/missing_input_handler_06.pdf",
+        label="Add inputs and an output for each block",
+    )
diff --git a/subtrees/dagflow/test/variables/test_load_variables.py b/subtrees/dagflow/test/variables/test_load_variables.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb84ef918336076614c8e1ea96af75fc581184f9
--- /dev/null
+++ b/subtrees/dagflow/test/variables/test_load_variables.py
@@ -0,0 +1,104 @@
+from dagflow.graphviz import savegraph
+from dagflow.graph import Graph
+from dagflow.bundles.load_variables import load_variables
+
+cfg1 = {
+        'variables': {
+            'var1': 1.0,
+            'var2': 1.0,
+            'sub1': {
+                'var3': 2.0
+                }
+            },
+        'format': 'value',
+        'labels': {
+            'var1': {
+                'text': 'text label 1',
+                'latex': r'\LaTeX label 1',
+                'name': 'v1-1'
+                },
+            'var2': 'simple label 2',
+            },
+        }
+
+cfg2 = {
+        'variables': {
+            'var1': (1.0, 1.0, 0.1),
+            'var2': (1.0, 2.0, 0.1),
+            'sub1': {
+                'var3': (2.0, 1.0, 0.1)
+                }
+            },
+        'format': ('value', 'central', 'sigma_absolute'),
+        'labels': {
+            'var1': {
+                'text': 'text label 1',
+                'latex': r'\LaTeX label 1',
+                'name': 'v1-2'
+                },
+            'var2': 'simple label 2'
+            },
+        }
+
+cfg3 = {
+        'variables': {
+            'var1': (1.0, 1.0, 0.1),
+            'var2': (1.0, 2.0, 0.1),
+            'sub1': {
+                'var3': (2.0, 3.0, 0.1)
+                }
+            },
+        'labels': {
+            'var1': {
+                'text': 'text label 1',
+                'latex': r'\LaTeX label 1',
+                'name': 'v1-3'
+                },
+            'var2': 'simple label 2'
+            },
+        'format': ('value', 'central', 'sigma_relative')
+        }
+
+cfg4 = {
+        'variables': {
+            'var1': (1.0, 1.0, 10),
+            'var2': (1.0, 2.0, 10),
+            'sub1': {
+                'var3': (2.0, 3.0, 10)
+                }
+            },
+        'labels': {
+            'var1': {
+                'text': 'text label 1',
+                'latex': r'\LaTeX label 1',
+                },
+            'var2': 'simple label 2'
+            },
+        'format': ('value', 'central', 'sigma_percent')
+        }
+
+cfg5 = {
+        'variables': {
+            'var1': (1.0, 10),
+            'var2': (2.0, 10),
+            'sub1': {
+                'var3': (3.0, 10)
+                }
+            },
+        'labels': {
+            'var1': {
+                'text': 'text label 1',
+                'latex': r'\LaTeX label 1',
+                },
+            'var2': 'simple label 2'
+            },
+        'format': ('central', 'sigma_percent')
+        }
+
+def test_load_variables_v01():
+    cfgs = (cfg1, cfg2, cfg3, cfg4, cfg5)
+    with Graph(close=True) as g:
+        for cfg in cfgs:
+            load_variables(cfg)
+
+    savegraph(g, 'output/test_load_variables.pdf', show='all')
diff --git a/subtrees/dagflow/test/variables/test_variables.py b/subtrees/dagflow/test/variables/test_variables.py
new file mode 100644
index 0000000000000000000000000000000000000000..149ac3c566d33f1d2c96dc5bd6c86181f822035f
--- /dev/null
+++ b/subtrees/dagflow/test/variables/test_variables.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+from dagflow.lib import Array
+from dagflow.variable import GaussianParameters
+from dagflow.graph import Graph
+from dagflow.graphviz import savegraph
+from dagflow.exception import CriticalError
+
+from numpy import square, allclose
+import pytest
+
+@pytest.mark.parametrize('mode', ('single', 'uncorr', 'cov', 'cov1d'))
+def test_variables_00_variable(mode) -> None:
+    value_in    = [1.1, 1.8, 5.0]
+    central_in  = [1.0, 2.0, 3.0]
+    sigma_in    = [1.0, 0.5, 2.0]
+    corrs_in    = [-0.1, 0.5, -0.9] # 01, 02, 12
+    variance_in = square(sigma_in)
+    zeros_in    = [0.0, 0.0, 0.0]
+
+    if mode=='single':
+        value_in = value_in[:1]
+        central_in = central_in[:1]
+        sigma_in = sigma_in[:1]
+        zeros_in = zeros_in[:1]
+
+    with Graph(debug=False, close=False) as graph:
+        value   = Array("variable", value_in, mode='store_weak', mark='v')
+        central = Array("central",  central_in, mark='vâ‚€')
+
+        if mode in ('single', 'uncorr', 'cor'):
+            sigma = Array("sigma", sigma_in, mark='σ')
+
+        if mode in ('single', 'uncorr'):
+            gp = GaussianParameters(value, central, sigma=sigma)
+        elif mode=='cov':
+            covariance = Array("covariance", [
+                    [variance_in[0],                      corrs_in[0]*sigma_in[0]*sigma_in[1], corrs_in[1]*sigma_in[0]*sigma_in[2]],
+                    [corrs_in[0]*sigma_in[0]*sigma_in[1], variance_in[1],                      corrs_in[2]*sigma_in[1]*sigma_in[2]],
+                    [corrs_in[1]*sigma_in[0]*sigma_in[2], corrs_in[2]*sigma_in[1]*sigma_in[2], variance_in[2]]
+                                ],
+                               mark='V')
+            gp = GaussianParameters(value, central, covariance=covariance)
+        elif mode=='cov1d':
+            covariance = Array("covariance", variance_in, mark='diag(V)')
+            gp = GaussianParameters(value, central, covariance=covariance)
+        elif mode=='cor':
+            correlation = Array("correlation", [
+                [1.0,         corrs_in[0], corrs_in[1]],
+                [corrs_in[0], 1.0,         corrs_in[2]],
+                [corrs_in[1], corrs_in[2], 1.0],
+                ], mark='C')
+            gp = GaussianParameters(value, central, sigma=sigma, correlation=correlation)
+        else:
+            raise RuntimeError(f"Invalid mode {mode}")
+
+    try:
+        graph.close()
+    except CriticalError as error:
+        savegraph(graph, f"output/test_variables_00_{mode}.png")
+        raise error
+
+    value_out0 = gp.value.data.copy()
+    normvalue_out0 = gp.normvalue.data
+    assert allclose(value_in, value_out0, atol=0, rtol=0)
+    assert all(normvalue_out0!=0)
+
+    gp.normvalue.set(zeros_in)
+    value_out1 = gp.value.data
+    normvalue_out1 = gp.normvalue.data
+    assert allclose(central_in, value_out1, atol=0, rtol=0)
+    assert allclose(normvalue_out1, 0.0, atol=0, rtol=0)
+
+    gp.value.set(value_out0)
+    value_out2 = gp.value.data
+    normvalue_out2 = gp.normvalue.data
+    assert allclose(value_in, value_out2, atol=0, rtol=0)
+    assert allclose(normvalue_out2, normvalue_out0, atol=0, rtol=0)
+
+    savegraph(graph, f"output/test_variables_00_{mode}.png", show=['all'])
+    savegraph(graph, f"output/test_variables_00_{mode}.pdf", show=['all'])
+